code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def _n_onset_midi(patterns):
return len([o_m for pat in patterns for occ in pat for o_m in occ]) | Computes the number of onset_midi objects in a pattern
Parameters
----------
patterns :
A list of patterns using the format returned by
:func:`mir_eval.io.load_patterns()`
Returns
-------
n_onsets : int
Number of onsets within the pattern. |
def __try_read_byte_prev(self, address):
if address not in self.__memory_prev:
return False, None
return True, self.__memory_prev[address] | Read previous value for memory location.
Return a tuple (True, Byte) in case of successful read,
(False, None) otherwise. |
def is_active(self, name):
if name in self._plugins.keys():
return self._plugins["name"].active
return None | Returns True if plugin exists and is active.
If plugin does not exist, it returns None
:param name: plugin name
:return: boolean or None |
def t_insert_dict_if_new(self, tblname, d, PKfields, fields=None):
SQL, values = self._insert_dict_if_new_inner(tblname, d, PKfields, fields=fields)
if SQL != False:
self.execute_select(SQL, parameters=values, locked=True)
return True, d
return False, values | A version of insertDictIfNew for transactions. This does not call commit. |
def _put_bucket_lifecycle(self):
status = 'deleted'
if self.s3props['lifecycle']['enabled']:
lifecycle_config = {
'Rules': self.s3props['lifecycle']['lifecycle_rules']
}
LOG.debug('Lifecycle Config: %s', lifecycle_config)
_response = self.s3client.put_bucket_lifecycle_configuration(Bucket=self.bucket,
LifecycleConfiguration=lifecycle_config)
status = 'applied'
else:
_response = self.s3client.delete_bucket_lifecycle(Bucket=self.bucket)
LOG.debug('Response setting up S3 lifecycle: %s', _response)
LOG.info('S3 lifecycle configuration %s', status) | Adds bucket lifecycle configuration. |
def push_state(self):
new = dict(self.states[-1])
self.states.append(new)
return self.state | Push a copy of the topmost state on top of the state stack,
returns the new top. |
def start(self):
setproctitle('oq-zworkerpool %s' % self.ctrl_url[6:])
self.workers = []
for _ in range(self.num_workers):
sock = z.Socket(self.task_out_port, z.zmq.PULL, 'connect')
proc = multiprocessing.Process(target=self.worker, args=(sock,))
proc.start()
sock.pid = proc.pid
self.workers.append(sock)
with z.Socket(self.ctrl_url, z.zmq.REP, 'bind') as ctrlsock:
for cmd in ctrlsock:
if cmd in ('stop', 'kill'):
msg = getattr(self, cmd)()
ctrlsock.send(msg)
break
elif cmd == 'getpid':
ctrlsock.send(self.pid)
elif cmd == 'get_num_workers':
ctrlsock.send(self.num_workers) | Start worker processes and a control loop |
def to_coo(self, fp=None, vartype_header=False):
import dimod.serialization.coo as coo
if fp is None:
return coo.dumps(self, vartype_header)
else:
coo.dump(self, fp, vartype_header) | Serialize the binary quadratic model to a COOrdinate_ format encoding.
.. _COOrdinate: https://en.wikipedia.org/wiki/Sparse_matrix#Coordinate_list_(COO)
Args:
fp (file, optional):
`.write()`-supporting `file object`_ to save the linear and quadratic biases
of a binary quadratic model to. The model is stored as a list of 3-tuples,
(i, j, bias), where :math:`i=j` for linear biases. If not provided,
returns a string.
vartype_header (bool, optional, default=False):
If true, the binary quadratic model's variable type as prepended to the
string or file as a header.
.. _file object: https://docs.python.org/3/glossary.html#term-file-object
.. note:: Variables must use index lables (numeric lables). Binary quadratic
models saved to COOrdinate format encoding do not preserve offsets.
Examples:
This is an example of a binary quadratic model encoded in COOrdinate format.
.. code-block:: none
0 0 0.50000
0 1 0.50000
1 1 -1.50000
The Coordinate format with a header
.. code-block:: none
# vartype=SPIN
0 0 0.50000
0 1 0.50000
1 1 -1.50000
This is an example of writing a binary quadratic model to a COOrdinate-format
file.
>>> bqm = dimod.BinaryQuadraticModel({0: -1.0, 1: 1.0}, {(0, 1): -1.0}, 0.0, dimod.SPIN)
>>> with open('tmp.ising', 'w') as file: # doctest: +SKIP
... bqm.to_coo(file)
This is an example of writing a binary quadratic model to a COOrdinate-format string.
>>> bqm = dimod.BinaryQuadraticModel({0: -1.0, 1: 1.0}, {(0, 1): -1.0}, 0.0, dimod.SPIN)
>>> bqm.to_coo() # doctest: +SKIP
0 0 -1.000000
0 1 -1.000000
1 1 1.000000 |
def glyph_has_ink(font: TTFont, name: Text) -> bool:
if 'glyf' in font:
return ttf_glyph_has_ink(font, name)
elif ('CFF ' in font) or ('CFF2' in font):
return cff_glyph_has_ink(font, name)
else:
raise Exception("Could not find 'glyf', 'CFF ', or 'CFF2' table.") | Checks if specified glyph has any ink.
That is, that it has at least one defined contour associated.
Composites are considered to have ink if any of their components have ink.
Args:
font: the font
glyph_name: The name of the glyph to check for ink.
Returns:
True if the font has at least one contour associated with it. |
def extract_mime(self, mime, def_mime='unk'):
self['mime'] = def_mime
if mime:
self['mime'] = self.MIME_RE.split(mime, 1)[0]
self['_content_type'] = mime | Utility function to extract mimetype only
from a full content type, removing charset settings |
def run_details(self, run):
run_data = dict(run=run)
req = urllib.request.Request("%s/nglims/api_run_details" % self._base_url,
urllib.parse.urlencode(run_data))
response = urllib.request.urlopen(req)
info = json.loads(response.read())
if "error" in info:
raise ValueError("Problem retrieving info: %s" % info["error"])
else:
return info["details"] | Retrieve sequencing run details as a dictionary. |
def validate(cls, **kwargs):
errors = ValidationErrors()
obj = cls()
redis = cls.get_redis()
for fieldname, field in obj.proxy:
if not field.fillable:
value = field.default
else:
try:
value = field.validate(kwargs.get(fieldname), redis)
except BadField as e:
errors.append(e)
continue
setattr(
obj,
fieldname,
value
)
for fieldname in dir(cls):
rule = getattr(cls, fieldname)
if hasattr(rule, '_is_validation_rule') and rule._is_validation_rule:
try:
rule(obj)
except BadField as e:
errors.append(e)
if errors.has_errors():
raise errors
return obj | Validates the data received as keyword arguments whose name match
this class attributes. |
def get_ref(profile, ref):
resource = "/refs/" + ref
data = api.get_request(profile, resource)
return prepare(data) | Fetch a ref.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
ref
The ref to fetch, e.g., ``heads/my-feature-branch``.
Returns
A dict with data about the ref. |
def qsize(self, qname):
if qname in self._queues:
return self._queues[qname].qsize()
else:
raise ValueError(_("queue %s is not defined"), qname) | Return the approximate size of the queue. |
def _joint_calling(items):
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), items[0])
if jointcaller:
assert len(items) == 1, "Can only do joint calling preparation with GATK with single samples"
assert tz.get_in(("metadata", "batch"), items[0]) is not None, \
"Joint calling requires batched samples, %s has no metadata batch." % dd.get_sample_name(items[0])
return jointcaller | Determine if this call feeds downstream into joint calls. |
def run_step(context):
logger.debug("started")
context.assert_key_has_value(key='defaults', caller=__name__)
context.set_defaults(context['defaults'])
logger.info(f"set {len(context['defaults'])} context item defaults.")
logger.debug("done") | Set hierarchy into context with substitutions if it doesn't exist yet.
context is a dictionary or dictionary-like.
context['defaults'] must exist. It's a dictionary.
Will iterate context['defaults'] and add these as new values where
their keys don't already exist. While it's doing so, it will leave
all other values in the existing hierarchy untouched.
List merging is purely additive, with no checks for uniqueness or already
existing list items. E.g context [0,1,2] with contextMerge=[2,3,4]
will result in [0,1,2,2,3,4]
Keep this in mind especially where complex types like
dicts nest inside a list - a merge will always add a new dict list item,
not merge it into whatever dicts might exist on the list already.
For example, say input context is:
key1: value1
key2: value2
key3:
k31: value31
k32: value32
defaults:
key2: 'aaa_{key1}_zzz'
key3:
k33: value33
key4: 'bbb_{key2}_yyy'
This will result in return context:
key1: value1
key2: value2
key3:
k31: value31
k32: value32
k33: value33
key4: bbb_value2_yyy |
def _get_names(dirs):
alphabets = set()
label_names = {}
for d in dirs:
for example in _walk_omniglot_dir(d):
alphabet, alphabet_char_id, label, _ = example
alphabets.add(alphabet)
label_name = "%s_%d" % (alphabet, alphabet_char_id)
if label in label_names:
assert label_names[label] == label_name
else:
label_names[label] = label_name
label_names = [label_names[k] for k in sorted(label_names)]
return alphabets, label_names | Get alphabet and label names, union across all dirs. |
def xread_group(self, group_name, consumer_name, streams, timeout=0,
count=None, latest_ids=None):
args = self._xread(streams, timeout, count, latest_ids)
fut = self.execute(
b'XREADGROUP', b'GROUP', group_name, consumer_name, *args
)
return wait_convert(fut, parse_messages_by_stream) | Perform a blocking read on the given stream as part of a consumer group
:raises ValueError: if the length of streams and latest_ids do
not match |
def adjust_bounding_box(bbox):
for i in range(0, 4):
if i in bounding_box:
bbox[i] = bounding_box[i]
else:
bbox[i] += delta_bounding_box[i]
return bbox | Adjust the bounding box as specified by user.
Returns the adjusted bounding box.
- bbox: Bounding box computed from the canvas drawings.
It must be a four-tuple of numbers. |
def configure(root_url, **kwargs):
default = kwargs.pop('default', True)
kwargs['client_agent'] = 'example-client/' + __version__
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Accept-Type'] = 'application/json'
if default:
default_config.reset(root_url, **kwargs)
else:
Client.config = wac.Config(root_url, **kwargs) | Notice that `configure` can either apply to the default configuration or
`Client.config`, which is the configuration used by the current thread
since `Client` inherits form `threading.local`. |
def _ParseTriggerStartTime(self, parser_mediator, trigger):
time_elements_tuple = (
trigger.start_date.year, trigger.start_date.month,
trigger.start_date.day_of_month, trigger.start_time.hours,
trigger.start_time.minutes, 0)
date_time = None
if time_elements_tuple != (0, 0, 0, 0, 0, 0):
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
date_time._precision = dfdatetime_definitions.PRECISION_1_MINUTE
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid trigger start time: {0!s}'.format(time_elements_tuple))
return date_time | Parses the start time from a trigger.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
trigger (job_trigger): a trigger.
Returns:
dfdatetime.DateTimeValues: last run date and time or None if not
available. |
def _call_zincrby(self, command, value, *args, **kwargs):
if self.indexable:
self.index([value])
return self._traverse_command(command, value, *args, **kwargs) | This command update a score of a given value. But it can be a new value
of the sorted set, so we index it. |
def find_module(self, fullname, path=None):
root, base, target = fullname.partition(self.root_name + '.')
if root:
return
if not any(map(target.startswith, self.vendored_names)):
return
return self | Return self when fullname starts with root_name and the
target module is one vendored through this importer. |
def add_repository(self, name, repository_type, repository_class,
aggregate_class, make_default, configuration):
repo_mgr = self.get_registered_utility(IRepositoryManager)
if name is None:
name = REPOSITORY_DOMAINS.ROOT
repo = repo_mgr.new(repository_type, name=name,
make_default=make_default,
repository_class=repository_class,
aggregate_class=aggregate_class,
configuration=configuration)
repo_mgr.set(repo) | Generic method for adding a repository. |
def add_genes(in_file, data, max_distance=10000, work_dir=None):
gene_file = regions.get_sv_bed(data, "exons", out_dir=os.path.dirname(in_file))
if gene_file and utils.file_exists(in_file):
out_file = "%s-annotated.bed" % utils.splitext_plus(in_file)[0]
if work_dir:
out_file = os.path.join(work_dir, os.path.basename(out_file))
if not utils.file_uptodate(out_file, in_file):
fai_file = ref.fasta_idx(dd.get_ref_file(data))
with file_transaction(data, out_file) as tx_out_file:
_add_genes_to_bed(in_file, gene_file, fai_file, tx_out_file, data, max_distance)
return out_file
else:
return in_file | Add gene annotations to a BED file from pre-prepared RNA-seq data.
max_distance -- only keep annotations within this distance of event |
def to_fs_path(uri):
scheme, netloc, path, _params, _query, _fragment = urlparse(uri)
if netloc and path and scheme == 'file':
value = "//{}{}".format(netloc, path)
elif RE_DRIVE_LETTER_PATH.match(path):
value = path[1].lower() + path[2:]
else:
value = path
if IS_WIN:
value = value.replace('/', '\\')
return value | Returns the filesystem path of the given URI.
Will handle UNC paths and normalize windows drive letters to lower-case. Also
uses the platform specific path separator. Will *not* validate the path for
invalid characters and semantics. Will *not* look at the scheme of this URI. |
def from_url(cls, url, db=None, **kwargs):
connection_pool = ConnectionPool.from_url(url, db=db, **kwargs)
return cls(connection_pool=connection_pool) | Return a Redis client object configured from the given URL
For example::
redis://[:password]@localhost:6379/0
rediss://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
Three URL schemes are supported:
- ```redis://``
<http://www.iana.org/assignments/uri-schemes/prov/redis>`_ creates a
normal TCP socket connection
- ```rediss://``
<http://www.iana.org/assignments/uri-schemes/prov/rediss>`_ creates a
SSL wrapped TCP socket connection
- ``unix://`` creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win. |
def _post(self, uri, data, headers=None):
if not headers:
headers = self._get_headers()
logging.debug("URI=" + str(uri))
logging.debug("HEADERS=" + str(headers))
logging.debug("BODY=" + str(data))
response = self.session.post(uri, headers=headers,
data=json.dumps(data))
logging.debug("STATUS=" + str(response.status_code))
if response.status_code in [200, 201]:
return response.json()
else:
logging.error(b"ERROR=" + response.content)
response.raise_for_status() | Simple POST request for a given uri path. |
def parse_torrent_properties(table_datas):
output = {'category': table_datas[0].text, 'subcategory': None, 'quality': None, 'language': None}
for i in range(1, len(table_datas)):
td = table_datas[i]
url = td.get('href')
params = Parser.get_params(url)
if Parser.is_subcategory(params) and not output['subcategory']:
output['subcategory'] = td.text
elif Parser.is_quality(params) and not output['quality']:
output['quality'] = td.text
elif Parser.is_language(params) and not output['language']:
output['language'] = td.text
return output | Static method that parses a given list of table data elements and using helper methods
`Parser.is_subcategory`, `Parser.is_quality`, `Parser.is_language`, collects torrent properties.
:param list lxml.HtmlElement table_datas: table_datas to parse
:return: identified category, subcategory, quality and languages.
:rtype: dict |
def is_executable(path):
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE]) | is the given path executable? |
def kill(self):
self._killed.set()
if not self.is_alive():
logging.debug('Cannot kill thread that is no longer running.')
return
if not self._is_thread_proc_running():
logging.debug("Thread's _thread_proc function is no longer running, "
'will not kill; letting thread exit gracefully.')
return
self.async_raise(ThreadTerminationError) | Terminates the current thread by raising an error. |
def cache_info(self):
return {
'single_node_repertoire':
self._single_node_repertoire_cache.info(),
'repertoire': self._repertoire_cache.info(),
'mice': self._mice_cache.info()
} | Report repertoire cache statistics. |
def mset_list(item, index, value):
'set mulitple items via index of int, slice or list'
if isinstance(index, (int, slice)):
item[index] = value
else:
map(item.__setitem__, index, value) | set mulitple items via index of int, slice or list |
def write_packets(self):
while self.running:
if len(self.write_queue) > 0:
self.write_queue[0].send(self.client)
self.write_queue.pop(0) | Write packets from the queue |
def getAssociationFilename(self, server_url, handle):
if server_url.find('://') == -1:
raise ValueError('Bad server URL: %r' % server_url)
proto, rest = server_url.split('://', 1)
domain = _filenameEscape(rest.split('/', 1)[0])
url_hash = _safe64(server_url)
if handle:
handle_hash = _safe64(handle)
else:
handle_hash = ''
filename = '%s-%s-%s-%s' % (proto, domain, url_hash, handle_hash)
return os.path.join(self.association_dir, filename) | Create a unique filename for a given server url and
handle. This implementation does not assume anything about the
format of the handle. The filename that is returned will
contain the domain name from the server URL for ease of human
inspection of the data directory.
(str, str) -> str |
def send_faucet_coins(address_to_fund, satoshis, api_key, coin_symbol='bcy'):
assert coin_symbol in ('bcy', 'btc-testnet')
assert is_valid_address_for_coinsymbol(b58_address=address_to_fund, coin_symbol=coin_symbol)
assert satoshis > 0
assert api_key, 'api_key required'
url = make_url(coin_symbol, 'faucet')
data = {
'address': address_to_fund,
'amount': satoshis,
}
params = {'token': api_key}
r = requests.post(url, json=data, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS)
return get_valid_json(r) | Send yourself test coins on the bitcoin or blockcypher testnet
You can see your balance info at:
- https://live.blockcypher.com/bcy/ for BCY
- https://live.blockcypher.com/btc-testnet/ for BTC Testnet |
def is_enabled():
cmd = 'service -e'
services = __salt__['cmd.run'](cmd, python_shell=False)
for service in services.split('\\n'):
if re.search('jail', service):
return True
return False | See if jail service is actually enabled on boot
CLI Example:
.. code-block:: bash
salt '*' jail.is_enabled <jail name> |
def delete(self):
self.__dmlquery__(self.__class__, self,
batch=self._batch,
timestamp=self._timestamp,
consistency=self.__consistency__,
timeout=self._timeout).delete() | Deletes this instance |
def mark_all_as_read(self, recipient=None):
qset = self.unread(True)
if recipient:
qset = qset.filter(recipient=recipient)
return qset.update(unread=False) | Mark as read any unread messages in the current queryset.
Optionally, filter these by recipient first. |
def trim_core(self):
for i in range(self.trim):
self.oracle.solve(assumptions=self.core)
new_core = self.oracle.get_core()
if len(new_core) == len(self.core):
break
self.core = new_core | This method trims a previously extracted unsatisfiable
core at most a given number of times. If a fixed point is
reached before that, the method returns. |
def url_to_text(self, url):
path, headers = urllib.request.urlretrieve(url)
return self.path_to_text(path) | Download PDF file and transform its document to string.
Args:
url: PDF url.
Returns:
string. |
def batch_contains_deleted(self):
"Check if current batch contains already deleted images."
if not self._duplicates: return False
imgs = [self._all_images[:self._batch_size][0][1], self._all_images[:self._batch_size][1][1]]
return any(img in self._deleted_fns for img in imgs) | Check if current batch contains already deleted images. |
def remove_forms(self, form_names):
for form in form_names:
try:
self.parentApp.removeForm(form)
except Exception as e:
pass
return | Remove all forms supplied |
def remote_evb_cfgd_uneq_store(self, remote_evb_cfgd):
if remote_evb_cfgd != self.remote_evb_cfgd:
self.remote_evb_cfgd = remote_evb_cfgd
return True
return False | This saves the EVB cfg, if it is not the same as stored. |
def drop(manager: Manager, network_id: Optional[int], yes):
if network_id:
manager.drop_network_by_id(network_id)
elif yes or click.confirm('Drop all networks?'):
manager.drop_networks() | Drop a network by its identifier or drop all networks. |
def start_stress(self, stress_cmd):
with open(os.devnull, 'w') as dev_null:
try:
stress_proc = subprocess.Popen(stress_cmd, stdout=dev_null,
stderr=dev_null)
self.set_stress_process(psutil.Process(stress_proc.pid))
except OSError:
logging.debug("Unable to start stress") | Starts a new stress process with a given cmd |
def _serialize(self):
if self._defcode is None:
raise exceptions.UnboundResponse()
resp = self.response_class(request=self.req, status=self.code,
headerlist=self._headers.items())
if self.result:
resp.content_type = self.content_type
resp.body = self.serializer(self.result)
return resp | Serialize the ResponseObject. Returns a webob `Response`
object. |
def unlock_kinetis(jlink):
if not jlink.connected():
raise ValueError('No target to unlock.')
method = UNLOCK_METHODS.get(jlink.tif, None)
if method is None:
raise NotImplementedError('Unsupported target interface for unlock.')
return method(jlink) | Unlock for Freescale Kinetis K40 or K60 device.
Args:
jlink (JLink): an instance of a J-Link that is connected to a target.
Returns:
``True`` if the device was successfully unlocked, otherwise ``False``.
Raises:
ValueError: if the J-Link is not connected to a target. |
def create():
if request.method == "POST":
title = request.form["title"]
body = request.form["body"]
error = None
if not title:
error = "Title is required."
if error is not None:
flash(error)
else:
db.session.add(Post(title=title, body=body, author=g.user))
db.session.commit()
return redirect(url_for("blog.index"))
return render_template("blog/create.html") | Create a new post for the current user. |
def position_for_index(self, index):
if not self.elements:
return 0
start = 0
end = int(len(self.elements) / 2)
slice_length = end - start
pivot_point = int(slice_length / 2)
pivot_index = self.elements[pivot_point * 2]
while slice_length > 1:
if pivot_index < index:
start = pivot_point
elif pivot_index > index:
end = pivot_point
else:
break
slice_length = end - start
pivot_point = start + int(slice_length / 2)
pivot_index = self.elements[pivot_point * 2]
if pivot_index == index:
return pivot_point * 2
elif pivot_index > index:
return pivot_point * 2
else:
return (pivot_point + 1) * 2 | Calculates the position within the vector to insert a given index.
This is used internally by insert and upsert. If there are duplicate
indexes then the position is returned as if the value for that index
were to be updated, but it is the callers responsibility to check
whether there is a duplicate at that index |
def network_traffic_ports(instance):
for key, obj in instance['objects'].items():
if ('type' in obj and obj['type'] == 'network-traffic' and
('src_port' not in obj or 'dst_port' not in obj)):
yield JSONError("The Network Traffic object '%s' should contain "
"both the 'src_port' and 'dst_port' properties."
% key, instance['id'], 'network-traffic-ports') | Ensure network-traffic objects contain both src_port and dst_port. |
def resource_create(resource_id, resource_type, resource_options=None, cibfile=None):
return item_create(item='resource',
item_id=resource_id,
item_type=resource_type,
extra_args=resource_options,
cibfile=cibfile) | Create a resource via pcs command
resource_id
name for the resource
resource_type
resource type (f.e. ocf:heartbeat:IPaddr2 or VirtualIP)
resource_options
additional options for creating the resource
cibfile
use cibfile instead of the live CIB for manipulation
CLI Example:
.. code-block:: bash
salt '*' pcs.resource_create resource_id='galera' resource_type='ocf:heartbeat:galera' resource_options="['wsrep_cluster_address=gcomm://node1.example.org,node2.example.org,node3.example.org', '--master']" cibfile='/tmp/cib_for_galera.cib' |
def format_output(func):
return func
@wraps(func)
def wrapper(*args, **kwargs):
try:
response = func(*args, **kwargs)
except Exception as error:
print(colored(error, 'red'), file=sys.stderr)
sys.exit(1)
else:
print(response)
sys.exit(0)
return wrapper | Format output. |
def n_chunks(self):
return self._data_source.n_chunks(self.chunksize, stride=self.stride, skip=self.skip) | rough estimate of how many chunks will be processed |
def do_batch(args):
if args.subcommand == 'list':
do_batch_list(args)
if args.subcommand == 'show':
do_batch_show(args)
if args.subcommand == 'status':
do_batch_status(args)
if args.subcommand == 'submit':
do_batch_submit(args) | Runs the batch list, batch show or batch status command, printing output
to the console
Args:
args: The parsed arguments sent to the command at runtime |
def connect(self, listener, pass_signal=False):
info = listenerinfo(listener, pass_signal)
self._listeners.append(info)
_logger.debug("connect %r to %r", str(listener), self._name)
if inspect.ismethod(listener):
listener_object = listener.__self__
if not hasattr(listener_object, "__listeners__"):
listener_object.__listeners__ = collections.defaultdict(list)
listener_object.__listeners__[listener].append(self) | Connect a new listener to this signal
:param listener:
The listener (callable) to add
:param pass_signal:
An optional argument that controls if the signal object is
explicitly passed to this listener when it is being fired.
If enabled, a ``signal=`` keyword argument is passed to the
listener function.
:returns:
None
The listener will be called whenever :meth:`fire()` or
:meth:`__call__()` are called. The listener is appended to the list of
listeners. Duplicates are not checked and if a listener is added twice
it gets called twice. |
def split(expr, frac, seed=None):
if hasattr(expr, '_xflow_split'):
return expr._xflow_split(frac, seed=seed)
else:
return _split(expr, frac, seed=seed) | Split the current column into two column objects with certain ratio.
:param float frac: Split ratio
:return: two split DataFrame objects |
def nla_for_each_attr(head, len_, rem):
pos = head
rem.value = len_
while nla_ok(pos, rem):
yield pos
pos = nla_next(pos, rem) | Iterate over a stream of attributes.
https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/attr.h#L262
Positional arguments:
head -- first nlattr with more in its bytearray payload (nlattr class instance).
len_ -- length of attribute stream (integer).
rem -- initialized to len, holds bytes currently remaining in stream (c_int).
Returns:
Generator yielding nlattr instances. |
def register(self, email, username, password, first_name, last_name, birthday="1974-11-20", captcha_result=None):
self.username = username
self.password = password
register_message = sign_up.RegisterRequest(email, username, password, first_name, last_name, birthday, captcha_result,
self.device_id_override, self.android_id_override)
log.info("[+] Sending sign up request (name: {} {}, email: {})...".format(first_name, last_name, email))
return self._send_xmpp_element(register_message) | Sends a register request to sign up a new user to kik with the given details. |
def get(self, alias, target=None):
for target_part in reversed(list(self._get_targets(target))):
options = self._get(target_part, alias)
if options:
return options | Get a dictionary of aliased options.
:param alias: The name of the aliased options.
:param target: Get alias for this specific target (optional).
If no matching alias is found, returns ``None``. |
def footnotemap(self, cache=True):
if self.__footnotemap is not None and cache==True:
return self.__footnotemap
else:
x = self.xml(src='word/footnotes.xml')
d = Dict()
if x is None: return d
for footnote in x.root.xpath("w:footnote", namespaces=self.NS):
id = footnote.get("{%(w)s}id" % self.NS)
typ = footnote.get("{%(w)s}type" % self.NS)
d[id] = Dict(id=id, type=typ, elem=footnote)
if cache==True: self.__footnotemap = d
return d | return the footnotes from the docx, keyed to string id. |
def nth(lst, n):
expect_type(n, (String, Number), unit=None)
if isinstance(n, String):
if n.value.lower() == 'first':
i = 0
elif n.value.lower() == 'last':
i = -1
else:
raise ValueError("Invalid index %r" % (n,))
else:
i = n.to_python_index(len(lst), circular=True)
return lst[i] | Return the nth item in the list. |
def write(self, string):
self.make_dir()
with open(self.path, "w") as f:
if not string.endswith("\n"):
return f.write(string + "\n")
else:
return f.write(string) | Write string to file. |
def save_pkl(self, filename):
with open(filename, 'wb') as fout:
pickle.dump(self, fout) | Pickles TransitSignal. |
def validateDayOfWeek(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, dayNames=ENGLISH_DAYS_OF_WEEK, excMsg=None):
try:
return validateMonth(value, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes, monthNames=ENGLISH_DAYS_OF_WEEK)
except:
_raiseValidationException(_('%r is not a day of the week') % (_errstr(value)), excMsg) | Raises ValidationException if value is not a day of the week, such as 'Mon' or 'Friday'.
Returns the titlecased day of the week.
* value (str): The value being validated as a day of the week.
* blank (bool): If True, a blank string will be accepted. Defaults to False.
* strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped.
* allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers.
* blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation.
* dayNames (Mapping): A mapping of uppercase day abbreviations to day names, i.e. {'SUN': 'Sunday', ...} The default provides English day names.
* excMsg (str): A custom message to use in the raised ValidationException.
>>> import pysimplevalidate as pysv
>>> pysv.validateDayOfWeek('mon')
'Monday'
>>> pysv.validateDayOfWeek('THURSday')
'Thursday' |
def set_y(self, y):
"Set y position and reset x"
self.x=self.l_margin
if(y>=0):
self.y=y
else:
self.y=self.h+y | Set y position and reset x |
def _write_cpr(self, f, cType, parameter) -> int:
f.seek(0, 2)
byte_loc = f.tell()
block_size = CDF.CPR_BASE_SIZE64 + 4
section_type = CDF.CPR_
rfuA = 0
pCount = 1
cpr = bytearray(block_size)
cpr[0:8] = struct.pack('>q', block_size)
cpr[8:12] = struct.pack('>i', section_type)
cpr[12:16] = struct.pack('>i', cType)
cpr[16:20] = struct.pack('>i', rfuA)
cpr[20:24] = struct.pack('>i', pCount)
cpr[24:28] = struct.pack('>i', parameter)
f.write(cpr)
return byte_loc | Write compression info to the end of the file in a CPR. |
def leaders(self, current_page, **options):
return self.leaders_in(self.leaderboard_name, current_page, **options) | Retrieve a page of leaders from the leaderboard.
@param current_page [int] Page to retrieve from the leaderboard.
@param options [Hash] Options to be used when retrieving the page from the leaderboard.
@return a page of leaders from the leaderboard. |
def soviet_checksum(code):
def sum_digits(code, offset=1):
total = 0
for digit, index in zip(code[:7], count(offset)):
total += int(digit) * index
summed = (total / 11 * 11)
return total - summed
check = sum_digits(code, 1)
if check == 10:
check = sum_digits(code, 3)
if check == 10:
return code + '0'
return code + str(check) | Courtesy of Sir Vlad Lavrov. |
def _instance_parser(self, plugins):
plugins = util.return_list(plugins)
for instance in plugins:
if inspect.isclass(instance):
self._handle_class_instance(instance)
else:
self._handle_object_instance(instance) | internal method to parse instances of plugins.
Determines if each class is a class instance or
object instance and calls the appropiate handler
method. |
def get_connect_redirect_url(self, request, socialaccount):
assert request.user.is_authenticated
url = reverse('socialaccount_connections')
return url | Returns the default URL to redirect to after successfully
connecting a social account. |
def _consolidate_classpath(self, targets, classpath_products):
entries_map = defaultdict(list)
for (cp, target) in classpath_products.get_product_target_mappings_for_targets(targets, True):
entries_map[target].append(cp)
with self.invalidated(targets=targets, invalidate_dependents=True) as invalidation:
for vt in invalidation.all_vts:
entries = entries_map.get(vt.target, [])
for index, (conf, entry) in enumerate(entries):
if ClasspathUtil.is_dir(entry.path):
jarpath = os.path.join(vt.results_dir, 'output-{}.jar'.format(index))
if not vt.valid:
with self.open_jar(jarpath, overwrite=True, compressed=False) as jar:
jar.write(entry.path)
classpath_products.remove_for_target(vt.target, [(conf, entry.path)])
classpath_products.add_for_target(vt.target, [(conf, jarpath)]) | Convert loose directories in classpath_products into jars. |
def reportProgress(self, state, action, text=None, tick=None):
if self.progressFunc is not None:
self.progressFunc(state=state, action=action, text=text, tick=tick) | If we want to keep other code updated about our progress.
state: 'prep' reading sources
'generate' making instances
'done' wrapping up
'error' reporting a problem
action: 'start' begin generating
'stop' end generating
'source' which ufo we're reading
text: <file.ufo> ufoname (for instance)
tick: a float between 0 and 1 indicating progress. |
def _always_running_service(name):
service_info = show(name)
try:
keep_alive = service_info['plist']['KeepAlive']
except KeyError:
return False
if isinstance(keep_alive, dict):
for _file, value in six.iteritems(keep_alive.get('PathState', {})):
if value is True and os.path.exists(_file):
return True
elif value is False and not os.path.exists(_file):
return True
if keep_alive is True:
return True
return False | Check if the service should always be running based on the KeepAlive Key
in the service plist.
:param str name: Service label, file name, or full path
:return: True if the KeepAlive key is set to True, False if set to False or
not set in the plist at all.
:rtype: bool
.. versionadded:: 2019.2.0 |
def total_area_per_neurite(neurites, neurite_type=NeuriteType.all):
return [neurite.area for neurite in iter_neurites(neurites, filt=is_type(neurite_type))] | Surface area in a collection of neurites.
The area is defined as the sum of the area of the sections. |
def dragDrop(self, target, target2=None, modifiers=""):
if modifiers != "":
keyboard.keyDown(modifiers)
if target2 is None:
dragFrom = self._lastMatch
dragTo = target
else:
dragFrom = target
dragTo = target2
self.drag(dragFrom)
time.sleep(Settings.DelayBeforeDrag)
self.dropAt(dragTo)
if modifiers != "":
keyboard.keyUp(modifiers) | Performs a dragDrop operation.
Holds down the mouse button on ``dragFrom``, moves the mouse to ``dragTo``, and releases
the mouse button.
``modifiers`` may be a typeKeys() compatible string. The specified keys will be held
during the drag-drop operation. |
def parse_json(raw_data):
orig_data = raw_data
data = filter_leading_non_json_lines(raw_data)
try:
return json.loads(data)
except:
results = {}
try:
tokens = shlex.split(data)
except:
print "failed to parse json: "+ data
raise
for t in tokens:
if t.find("=") == -1:
raise errors.AnsibleError("failed to parse: %s" % orig_data)
(key,value) = t.split("=", 1)
if key == 'changed' or 'failed':
if value.lower() in [ 'true', '1' ]:
value = True
elif value.lower() in [ 'false', '0' ]:
value = False
if key == 'rc':
value = int(value)
results[key] = value
if len(results.keys()) == 0:
return { "failed" : True, "parsed" : False, "msg" : orig_data }
return results | this version for module return data only |
def taskotron_changed_outcome(config, message):
if not taskotron_result_new(config, message):
return False
outcome = message['msg']['result'].get('outcome')
prev_outcome = message['msg']['result'].get('prev_outcome')
return prev_outcome is not None and outcome != prev_outcome | Taskotron task outcome changed
With this rule, you can limit messages to only those task results
with changed outcomes. This is useful when an object (a build,
an update, etc) gets retested and either the object itself or the
environment changes and the task outcome is now different (e.g.
FAILED -> PASSED). |
def is_not_blocked(self, item: str) -> bool:
assert item is not None
item = self._encode_item(item)
connection = self.__get_connection()
key = self.__redis_conf['blacklist_template'].format(item)
value = connection.get(key)
if value is None:
BlackRed.__release_connection(connection)
return True
if self.__redis_conf['blacklist_refresh_ttl']:
connection.expire(key, self.__redis_conf['blacklist_ttl'])
BlackRed.__release_connection(connection)
return False | Check if an item is _not_ already on the blacklist
:param str item: The item to check
:return: True, when the item is _not_ on the blacklist
:rtype: bool |
def _finished_callback(self, batch_fut, todo):
self._running.remove(batch_fut)
err = batch_fut.get_exception()
if err is not None:
tb = batch_fut.get_traceback()
for (fut, _) in todo:
if not fut.done():
fut.set_exception(err, tb) | Passes exception along.
Args:
batch_fut: the batch future returned by running todo_tasklet.
todo: (fut, option) pair. fut is the future return by each add() call.
If the batch fut was successful, it has already called fut.set_result()
on other individual futs. This method only handles when the batch fut
encountered an exception. |
def load_config_vars(target_config, source_config):
for attr in dir(source_config):
if attr.startswith('_'):
continue
val = getattr(source_config, attr)
if val is not None:
setattr(target_config, attr, val) | Loads all attributes from source config into target config
@type target_config: TestRunConfigManager
@param target_config: Config to dump variables into
@type source_config: TestRunConfigManager
@param source_config: The other config
@return: True |
def list_policies(self):
api_path = '/v1/sys/policy'
response = self._adapter.get(
url=api_path,
)
return response.json() | List all configured policies.
Supported methods:
GET: /sys/policy. Produces: 200 application/json
:return: The JSON response of the request.
:rtype: dict |
def remove_note(self, note, octave=-1):
res = []
for x in self.notes:
if type(note) == str:
if x.name != note:
res.append(x)
else:
if x.octave != octave and octave != -1:
res.append(x)
else:
if x != note:
res.append(x)
self.notes = res
return res | Remove note from container.
The note can either be a Note object or a string representing the
note's name. If no specific octave is given, the note gets removed
in every octave. |
async def get_googlecast_settings(self) -> List[Setting]:
return [
Setting.make(**x)
for x in await self.services["system"]["getWuTangInfo"]({})
] | Get Googlecast settings. |
def handle_command(editor, input_string):
m = COMMAND_GRAMMAR.match(input_string)
if m is None:
return
variables = m.variables()
command = variables.get('command')
go_to_line = variables.get('go_to_line')
shell_command = variables.get('shell_command')
if go_to_line is not None:
_go_to_line(editor, go_to_line)
elif shell_command is not None:
editor.application.run_system_command(shell_command)
elif has_command_handler(command):
call_command_handler(command, editor, variables)
else:
editor.show_message('Not an editor command: %s' % input_string)
return
editor.sync_with_prompt_toolkit() | Handle commands entered on the Vi command line. |
def data_directory():
package_directory = os.path.abspath(os.path.dirname(__file__))
return os.path.join(package_directory, "data") | Return the absolute path to the directory containing the package data. |
def offer_pdf(self, offer_id):
return self._create_get_request(resource=OFFERS, billomat_id=offer_id, command=PDF) | Opens a pdf of an offer
:param offer_id: the offer id
:return: dict |
def get_site_amplification(self, C, sites):
ampl = np.zeros(sites.vs30.shape)
ampl[sites.vs30measured] = (C["d0_obs"] + C["d1_obs"] *
np.log(sites.vs30[sites.vs30measured]))
idx = np.logical_not(sites.vs30measured)
ampl[idx] = (C["d0_inf"] + C["d1_inf"] * np.log(sites.vs30[idx]))
return ampl | Returns the linear site amplification term depending on whether the
Vs30 is observed of inferred |
def handle_overrides(graph, overrides):
for key in overrides:
levels = key.split('.')
part = graph
for lvl in levels[:-1]:
try:
part = part[lvl]
except KeyError:
raise KeyError("'%s' override failed at '%s'", (key, lvl))
try:
part[levels[-1]] = overrides[key]
except KeyError:
raise KeyError("'%s' override failed at '%s'", (key, levels[-1])) | Handle any overrides for this model configuration.
Parameters
----------
graph : dict or object
A dictionary (or an ObjectProxy) containing the object graph
loaded from a YAML file.
overrides : dict
A dictionary containing overrides to apply. The location of
the override is specified in the key as a dot-delimited path
to the desired parameter, e.g. "model.corruptor.corruption_level". |
def summary_permutation(context_counts,
context_to_mut,
seq_context,
gene_seq,
score_dir,
num_permutations=10000,
min_frac=0.0,
min_recur=2,
drop_silent=False):
mycontexts = context_counts.index.tolist()
somatic_base = [base
for one_context in mycontexts
for base in context_to_mut[one_context]]
tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(),
num_permutations)
tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos)
gene_name = gene_seq.bed.gene_name
gene_len = gene_seq.bed.cds_len
summary_info_list = []
for i, row in enumerate(tmp_mut_pos):
tmp_mut_info = mc.get_aa_mut_info(row,
somatic_base,
gene_seq)
tmp_summary = cutils.calc_summary_info(tmp_mut_info['Reference AA'],
tmp_mut_info['Somatic AA'],
tmp_mut_info['Codon Pos'],
gene_name,
score_dir,
min_frac=min_frac,
min_recur=min_recur)
if drop_silent:
tmp_summary[1] = 0
summary_info_list.append([gene_name, i+1, gene_len]+tmp_summary)
return summary_info_list | Performs null-permutations and summarizes the results as features over
the gene.
Parameters
----------
context_counts : pd.Series
number of mutations for each context
context_to_mut : dict
dictionary mapping nucleotide context to a list of observed
somatic base changes.
seq_context : SequenceContext
Sequence context for the entire gene sequence (regardless
of where mutations occur). The nucleotide contexts are
identified at positions along the gene.
gene_seq : GeneSequence
Sequence of gene of interest
num_permutations : int, default: 10000
number of permutations to create for null
drop_silent : bool, default=False
Flage on whether to drop all silent mutations. Some data sources
do not report silent mutations, and the simulations should match this.
Returns
-------
summary_info_list : list of lists
list of non-silent and silent mutation counts under the null along
with information on recurrent missense counts and missense positional
entropy. |
def add_role(ctx, role):
if role is None:
log('Specify the role with --role')
return
if ctx.obj['username'] is None:
log('Specify the username with --username')
return
change_user = ctx.obj['db'].objectmodels['user'].find_one({
'name': ctx.obj['username']
})
if role not in change_user.roles:
change_user.roles.append(role)
change_user.save()
log('Done')
else:
log('User already has that role!', lvl=warn) | Grant a role to an existing user |
def group(self, indent: int = DEFAULT_INDENT, add_line: bool = True) -> _TextGroup:
return _TextGroup(self, indent, add_line) | Returns a context manager which adds an indentation before each line.
:param indent: Number of spaces to print.
:param add_line: If True, a new line will be printed after the group.
:return: A TextGroup context manager. |
def estimateAbsoluteMagnitude(spectralType):
from .astroclasses import SpectralType
specType = SpectralType(spectralType)
if specType.classLetter == '':
return np.nan
elif specType.classNumber == '':
specType.classNumber = 5
if specType.lumType == '':
specType.lumType = 'V'
LNum = LClassRef[specType.lumType]
classNum = specType.classNumber
classLet = specType.classLetter
try:
return absMagDict[classLet][classNum][LNum]
except (KeyError, IndexError):
try:
classLookup = absMagDict[classLet]
values = np.array(list(classLookup.values()))[
:, LNum]
return np.interp(classNum, list(classLookup.keys()), values)
except (KeyError, ValueError):
return np.nan | Uses the spectral type to lookup an approximate absolute magnitude for
the star. |
def revoke(self, target, **prefs):
hash_algo = prefs.pop('hash', None)
if isinstance(target, PGPUID):
sig_type = SignatureType.CertRevocation
elif isinstance(target, PGPKey):
if target.is_primary:
sig_type = SignatureType.KeyRevocation
else:
sig_type = SignatureType.SubkeyRevocation
else:
raise TypeError
sig = PGPSignature.new(sig_type, self.key_algorithm, hash_algo, self.fingerprint.keyid)
reason = prefs.pop('reason', RevocationReason.NotSpecified)
comment = prefs.pop('comment', "")
sig._signature.subpackets.addnew('ReasonForRevocation', hashed=True, code=reason, string=comment)
return self._sign(target, sig, **prefs) | Revoke a key, a subkey, or all current certification signatures of a User ID that were generated by this key so far.
:param target: The key to revoke
:type target: :py:obj:`PGPKey`, :py:obj:`PGPUID`
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is passphrase-protected and has not been unlocked
:raises: :py:exc:`~pgpy.errors.PGPError` if the key is public
:returns: :py:obj:`PGPSignature`
In addition to the optional keyword arguments accepted by :py:meth:`PGPKey.sign`, the following optional
keyword arguments can be used with :py:meth:`PGPKey.revoke`.
:keyword reason: Defaults to :py:obj:`constants.RevocationReason.NotSpecified`
:type reason: One of :py:obj:`constants.RevocationReason`.
:keyword comment: Defaults to an empty string.
:type comment: ``str`` |
def execute_and_commit(*args, **kwargs):
db, cursor = CoyoteDb.execute(*args, **kwargs)
db.commit()
return cursor | Executes and commits the sql statement
@return: None |
def get_archiver(self, kind):
archivers = {
'tar': TarArchiver,
'tbz2': Tbz2Archiver,
'tgz': TgzArchiver,
'zip': ZipArchiver,
}
return archivers[kind]() | Returns instance of archiver class specific to given kind
:param kind: archive kind |
def checkscript(self, content):
if "VERSION" not in self.__capabilities:
raise NotImplementedError(
"server does not support CHECKSCRIPT command")
content = tools.to_bytes(content)
content = tools.to_bytes("{%d+}" % len(content)) + CRLF + content
code, data = self.__send_command("CHECKSCRIPT", [content])
if code == "OK":
return True
return False | Check whether a script is valid
See MANAGESIEVE specifications, section 2.12
:param name: script's content
:rtype: boolean |
def decompose(miz_file: Path, output_folder: Path):
mission_folder, assets_folder = NewMiz._get_subfolders(output_folder)
NewMiz._wipe_folders(mission_folder, assets_folder)
LOGGER.info('unzipping mission file')
with Miz(miz_file) as miz:
version = miz.mission.d['version']
LOGGER.debug(f'mission version: "%s"', version)
LOGGER.info('copying assets to: "%s"', assets_folder)
ignore = shutil.ignore_patterns('mission')
shutil.copytree(str(miz.temp_dir), str(assets_folder), ignore=ignore)
NewMiz._reorder_warehouses(assets_folder)
LOGGER.info('decomposing mission table into: "%s" (this will take a while)', mission_folder)
NewMiz._decompose_dict(miz.mission.d, 'base_info', mission_folder, version, miz) | Decompose this Miz into json
Args:
output_folder: folder to output the json structure as a Path
miz_file: MIZ file path as a Path |
def from_val(val_schema):
definition = getattr(val_schema, "definition", val_schema) if isinstance(
val_schema, BaseSchema) else val_schema
if isinstance(definition, dict):
return _dict_to_teleport(definition)
if isinstance(definition, list):
if len(definition) == 1:
return {"Array": from_val(definition[0])}
if definition in VAL_PRIMITIVES:
return VAL_PRIMITIVES[definition]
raise SerializationError(
"Serializing %r not (yet) supported." % definition) | Serialize a val schema to teleport. |
def deserialize_encryption_context(serialized_encryption_context):
if len(serialized_encryption_context) > aws_encryption_sdk.internal.defaults.MAX_BYTE_ARRAY_SIZE:
raise SerializationError("Serialized context is too long.")
if serialized_encryption_context == b"":
_LOGGER.debug("No encryption context data found")
return {}
deserialized_size = 0
encryption_context = {}
dict_size, deserialized_size = read_short(source=serialized_encryption_context, offset=deserialized_size)
_LOGGER.debug("Found %d keys", dict_size)
for _ in range(dict_size):
key_size, deserialized_size = read_short(source=serialized_encryption_context, offset=deserialized_size)
key, deserialized_size = read_string(
source=serialized_encryption_context, offset=deserialized_size, length=key_size
)
value_size, deserialized_size = read_short(source=serialized_encryption_context, offset=deserialized_size)
value, deserialized_size = read_string(
source=serialized_encryption_context, offset=deserialized_size, length=value_size
)
if key in encryption_context:
raise SerializationError("Duplicate key in serialized context.")
encryption_context[key] = value
if deserialized_size != len(serialized_encryption_context):
raise SerializationError("Formatting error: Extra data in serialized context.")
return encryption_context | Deserializes the contents of a byte string into a dictionary.
:param bytes serialized_encryption_context: Source byte string containing serialized dictionary
:returns: Deserialized encryption context
:rtype: dict
:raises SerializationError: if serialized encryption context is too large
:raises SerializationError: if duplicate key found in serialized encryption context
:raises SerializationError: if malformed data found in serialized encryption context |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.