code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def build_key_from_values(self, schema, hash_key, range_key=None):
dynamodb_key = {}
dynamodb_value = self.dynamize_value(hash_key)
if dynamodb_value.keys()[0] != schema.hash_key_type:
msg = 'Hashkey must be of type: %s' % schema.hash_key_type
raise TypeError(msg)
dynamodb_key['HashKeyElement'] = dynamodb_value
if range_key is not None:
dynamodb_value = self.dynamize_value(range_key)
if dynamodb_value.keys()[0] != schema.range_key_type:
msg = 'RangeKey must be of type: %s' % schema.range_key_type
raise TypeError(msg)
dynamodb_key['RangeKeyElement'] = dynamodb_value
return dynamodb_key | Build a Key structure to be used for accessing items
in Amazon DynamoDB. This method takes the supplied hash_key
and optional range_key and validates them against the
schema. If there is a mismatch, a TypeError is raised.
Otherwise, a Python dict version of a Amazon DynamoDB Key
data structure is returned.
:type hash_key: int, float, str, or unicode
:param hash_key: The hash key of the item you are looking for.
The type of the hash key should match the type defined in
the schema.
:type range_key: int, float, str or unicode
:param range_key: The range key of the item your are looking for.
This should be supplied only if the schema requires a
range key. The type of the range key should match the
type defined in the schema. |
def copy(self):
geometry = {n: g.copy() for n, g in self.geometry.items()}
copied = Scene(geometry=geometry,
graph=self.graph.copy())
return copied | Return a deep copy of the current scene
Returns
----------
copied : trimesh.Scene
Copy of the current scene |
def concept(self, cid, **kwargs):
if cid not in self.__concept_map:
if 'default' in kwargs:
return kwargs['default']
else:
raise KeyError("Invalid cid")
else:
return self.__concept_map[cid] | Get concept by concept ID |
def create_user(username, key, session):
try:
user = um.User(username=username)
session.add(user)
session.commit()
except Exception as e:
session.rollback()
session.flush()
raise e
try:
ukey = um.UserKey(key=key, keytype='public', user_id=user.id)
session.add(ukey)
session.commit()
except Exception as e:
session.rollback()
session.flush()
session.delete(user)
session.commit()
raise e
return user | Create a User and UserKey record in the session provided.
Will rollback both records if any issues are encountered.
After rollback, Exception is re-raised.
:param username: The username for the User
:param key: The public key to associate with this User
:param session: The sqlalchemy session to use
:rtype: User
:return: the new User record |
def total_cycles(self) -> int:
return sum((int(re.sub(r'\D', '', op)) for op in self.tokens)) | The number of total number of cycles in the structure. |
def from_neighbor_pores(target, pore_prop='pore.seed', mode='min'):
r
prj = target.project
network = prj.network
throats = network.map_throats(target.throats(), target)
P12 = network.find_connected_pores(throats)
lookup = prj.find_full_domain(target)
pvalues = lookup[pore_prop][P12]
if mode == 'min':
value = np.amin(pvalues, axis=1)
if mode == 'max':
value = np.amax(pvalues, axis=1)
if mode == 'mean':
value = np.mean(pvalues, axis=1)
return value | r"""
Adopt a value based on the values in neighboring pores
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
pore_prop : string
The dictionary key to the array containing the pore property to be
used in the calculation. Default is 'pore.seed'.
mode : string
Controls how the throat property is calculated. Options are 'min',
'max' and 'mean'. |
def pack(self, remaining_size):
arguments_count, payload = self.pack_data(remaining_size - self.header_size)
payload_length = len(payload)
if payload_length % 8 != 0:
payload += b"\x00" * (8 - payload_length % 8)
self.header = PartHeader(self.kind, self.attribute, arguments_count, self.bigargumentcount,
payload_length, remaining_size)
hdr = self.header_struct.pack(*self.header)
if pyhdb.tracing:
self.trace_header = humanhexlify(hdr, 30)
self.trace_payload = humanhexlify(payload, 30)
return hdr + payload | Pack data of part into binary format |
def _get_user_hash(self):
if request:
user_hash = '{ip}-{ua}'.format(ip=request.remote_addr,
ua=self._get_user_agent())
alg = hashlib.md5()
alg.update(user_hash.encode('utf8'))
return alg.hexdigest()
return None | Calculate a digest based on request's User-Agent and IP address. |
def add_exec_permission_to(target_file):
mode = os.stat(target_file).st_mode
os.chmod(target_file, mode | stat.S_IXUSR) | Add executable permissions to the file
:param target_file: the target file whose permission to be changed |
def tupleize(element, ignore_types=(str, bytes)):
if hasattr(element, '__iter__') and not isinstance(element, ignore_types):
return element
else:
return tuple((element,)) | Cast a single element to a tuple. |
def get_cell_boundary_variables(ds):
boundary_variables = []
has_bounds = ds.get_variables_by_attributes(bounds=lambda x: x is not None)
for var in has_bounds:
if var.bounds in ds.variables:
boundary_variables.append(var.bounds)
return boundary_variables | Returns a list of variable names for variables that represent cell
boundaries through the `bounds` attribute
:param netCDF4.Dataset nc: netCDF dataset |
def upgrade_available(name, **kwargs):
saltenv = kwargs.get('saltenv', 'base')
refresh = salt.utils.data.is_true(kwargs.get('refresh', True))
return latest_version(name, saltenv=saltenv, refresh=refresh) != '' | Check whether or not an upgrade is available for a given package
Args:
name (str): The name of a single package
Kwargs:
refresh (bool): Refresh package metadata. Default ``True``
saltenv (str): The salt environment. Default ``base``
Returns:
bool: True if new version available, otherwise False
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available <package name> |
def _parse(self):
cur_ver = None
cur_line = None
for line in self.content:
m = re.match('[^ ]+ \(([0-9]+\.[0-9]+\.[0-9]+)-[0-9]+\) [^ ]+; urgency=[^ ]+', line)
if m:
cur_ver = m.group(1)
self.versions.append(cur_ver)
self.entries[cur_ver] = []
cur_entry = self.entries[cur_ver]
if self.latest_version is None or StrictVersion(cur_ver) > StrictVersion(self.latest_version):
self.latest_version = m.group(1)
elif cur_ver:
m = re.match(' \* (.*)', line)
if m:
cur_entry.append(m.group(1).strip())
elif not re.match('$', line) and re.match(' *[^$]+', line):
cur_entry[-1] += " " + line.strip() | Parse content of DCH file |
def create_archaius(self):
utils.banner("Creating S3")
s3.init_properties(env=self.env, app=self.app) | Create S3 bucket for Archaius. |
def cleanup(self):
with LogTask('Stop prefix'):
self.stop()
with LogTask("Tag prefix as uninitialized"):
os.unlink(self.paths.prefix_lagofile()) | Stops any running entities in the prefix and uninitializes it, usually
you want to do this if you are going to remove the prefix afterwards
Returns:
None |
def send_file_to_host(src_filename, dst_file, filesize):
import sys
import ubinascii
try:
with open(src_filename, 'rb') as src_file:
bytes_remaining = filesize
if HAS_BUFFER:
buf_size = BUFFER_SIZE
else:
buf_size = BUFFER_SIZE // 2
while bytes_remaining > 0:
read_size = min(bytes_remaining, buf_size)
buf = src_file.read(read_size)
if HAS_BUFFER:
sys.stdout.buffer.write(buf)
else:
sys.stdout.write(ubinascii.hexlify(buf))
bytes_remaining -= read_size
while True:
char = sys.stdin.read(1)
if char:
if char == '\x06':
break
sys.stdout.write(char)
return True
except:
return False | Function which runs on the pyboard. Matches up with recv_file_from_remote. |
async def connect(self):
PYVLXLOG.warning("Connecting to KLF 200.")
await self.connection.connect()
login = Login(pyvlx=self, password=self.config.password)
await login.do_api_call()
if not login.success:
raise PyVLXException("Login to KLF 200 failed, check credentials") | Connect to KLF 200. |
def start(self, fork=True):
if not fork:
distributed_logger.info('Starting metrics aggregator, not forking')
_registry_aggregator(self.reporter, self.socket_addr)
else:
distributed_logger.info('Starting metrics aggregator, forking')
p = Process(target=_registry_aggregator, args=(self.reporter, self.socket_addr, ))
p.start()
distributed_logger.info('Started metrics aggregator as PID %s', p.pid)
self.process = p | Starts the registry aggregator.
:param fork: whether to fork a process; if ``False``, blocks and stays in the existing process |
def sdiffstore(self, dest, keys, *args):
result = self.sdiff(keys, *args)
self.redis[self._encode(dest)] = result
return len(result) | Emulate sdiffstore. |
def one_batch(self, ds_type:DatasetType=DatasetType.Train, detach:bool=True, denorm:bool=True, cpu:bool=True)->Collection[Tensor]:
"Get one batch from the data loader of `ds_type`. Optionally `detach` and `denorm`."
dl = self.dl(ds_type)
w = self.num_workers
self.num_workers = 0
try: x,y = next(iter(dl))
finally: self.num_workers = w
if detach: x,y = to_detach(x,cpu=cpu),to_detach(y,cpu=cpu)
norm = getattr(self,'norm',False)
if denorm and norm:
x = self.denorm(x)
if norm.keywords.get('do_y',False): y = self.denorm(y, do_x=True)
return x,y | Get one batch from the data loader of `ds_type`. Optionally `detach` and `denorm`. |
def strip_ansi_escape_codes(self, string_buffer):
output = re.sub(r"\x00", "", string_buffer)
return super(DellIsilonSSH, self).strip_ansi_escape_codes(output) | Remove Null code |
def _enum_generator(descriptor):
'Helper to create protobuf enums'
vals = descriptor.enum_type.values_by_number.keys()
return gen.IterValueGenerator(descriptor.name, vals) | Helper to create protobuf enums |
def run_async(
stream_spec, cmd='ffmpeg', pipe_stdin=False, pipe_stdout=False, pipe_stderr=False,
quiet=False, overwrite_output=False):
args = compile(stream_spec, cmd, overwrite_output=overwrite_output)
stdin_stream = subprocess.PIPE if pipe_stdin else None
stdout_stream = subprocess.PIPE if pipe_stdout or quiet else None
stderr_stream = subprocess.PIPE if pipe_stderr or quiet else None
return subprocess.Popen(
args, stdin=stdin_stream, stdout=stdout_stream, stderr=stderr_stream) | Asynchronously invoke ffmpeg for the supplied node graph.
Args:
pipe_stdin: if True, connect pipe to subprocess stdin (to be
used with ``pipe:`` ffmpeg inputs).
pipe_stdout: if True, connect pipe to subprocess stdout (to be
used with ``pipe:`` ffmpeg outputs).
pipe_stderr: if True, connect pipe to subprocess stderr.
quiet: shorthand for setting ``capture_stdout`` and
``capture_stderr``.
**kwargs: keyword-arguments passed to ``get_args()`` (e.g.
``overwrite_output=True``).
Returns:
A `subprocess Popen`_ object representing the child process.
Examples:
Run and stream input::
process = (
ffmpeg
.input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
.output(out_filename, pix_fmt='yuv420p')
.overwrite_output()
.run_async(pipe_stdin=True)
)
process.communicate(input=input_data)
Run and capture output::
process = (
ffmpeg
.input(in_filename)
.output('pipe':, format='rawvideo', pix_fmt='rgb24')
.run_async(pipe_stdout=True, pipe_stderr=True)
)
out, err = process.communicate()
Process video frame-by-frame using numpy::
process1 = (
ffmpeg
.input(in_filename)
.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run_async(pipe_stdout=True)
)
process2 = (
ffmpeg
.input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
.output(out_filename, pix_fmt='yuv420p')
.overwrite_output()
.run_async(pipe_stdin=True)
)
while True:
in_bytes = process1.stdout.read(width * height * 3)
if not in_bytes:
break
in_frame = (
np
.frombuffer(in_bytes, np.uint8)
.reshape([height, width, 3])
)
out_frame = in_frame * 0.3
process2.stdin.write(
frame
.astype(np.uint8)
.tobytes()
)
process2.stdin.close()
process1.wait()
process2.wait()
.. _subprocess Popen: https://docs.python.org/3/library/subprocess.html#popen-objects |
def load_config(self):
logger.debug('loading config file: %s', self.config_file)
if os.path.exists(self.config_file):
with open(self.config_file) as file_handle:
return json.load(file_handle)
else:
logger.error('configuration file is required for eventify')
logger.error('unable to load configuration for service')
raise EventifyConfigError(
'Configuration is required! Missing: %s' % self.config_file
) | Load configuration for the service
Args:
config_file: Configuration file path |
def run(self):
self.modnames_to_reload = []
for modname, module in list(sys.modules.items()):
if modname not in self.previous_modules:
if self.is_module_reloadable(module, modname):
self.modnames_to_reload.append(modname)
del sys.modules[modname]
else:
continue
if self.verbose and self.modnames_to_reload:
modnames = self.modnames_to_reload
_print("\x1b[4;33m%s\x1b[24m%s\x1b[0m"\
% ("Reloaded modules", ": "+", ".join(modnames))) | Delete user modules to force Python to deeply reload them
Do not del modules which are considered as system modules, i.e.
modules installed in subdirectories of Python interpreter's binary
Do not del C modules |
def iter_lines(self, warn_only=False):
remain = ""
for data in self.iter_content(LINE_CHUNK_SIZE, warn_only=True):
line_break_found = data[-1] in (b"\n", b"\r")
lines = data.decode(self.codec).splitlines()
lines[0] = remain + lines[0]
if not line_break_found:
remain = lines.pop()
for line in lines:
yield line
if remain:
yield remain
self._state = FINISHED
if not warn_only:
self.raise_for_error() | yields stdout text, line by line. |
def render(file):
with file.open() as fp:
encoding = detect_encoding(fp, default='utf-8')
file_content = fp.read().decode(encoding)
parsed_xml = xml.dom.minidom.parseString(file_content)
return parsed_xml.toprettyxml(indent=' ', newl='') | Pretty print the XML file for rendering. |
def get_readable_tasks(self, course):
course_fs = self._filesystem.from_subfolder(course.get_id())
tasks = [
task[0:len(task)-1]
for task in course_fs.list(folders=True, files=False, recursive=False)
if self._task_file_exists(course_fs.from_subfolder(task))]
return tasks | Returns the list of all available tasks in a course |
def ClaimNotificationsForCollection(cls,
token=None,
start_time=None,
lease_time=200,
collection=None):
class CollectionFilter(object):
def __init__(self, collection):
self.collection = collection
def FilterRecord(self, notification):
if self.collection is None:
self.collection = notification.result_collection_urn
return self.collection != notification.result_collection_urn
f = CollectionFilter(collection)
results = []
with aff4.FACTORY.OpenWithLock(
RESULT_NOTIFICATION_QUEUE,
aff4_type=HuntResultQueue,
lease_time=300,
blocking=True,
blocking_sleep_interval=15,
blocking_lock_timeout=600,
token=token) as queue:
for record in queue.ClaimRecords(
record_filter=f.FilterRecord,
start_time=start_time,
timeout=lease_time,
limit=100000):
results.append(record)
return (f.collection, results) | Return unclaimed hunt result notifications for collection.
Args:
token: The security token to perform database operations with.
start_time: If set, an RDFDateTime indicating at what point to start
claiming notifications. Only notifications with a timestamp after this
point will be claimed.
lease_time: How long to claim the notifications for.
collection: The urn of the collection to find notifications for. If unset,
the earliest (unclaimed) notification will determine the collection.
Returns:
A pair (collection, results) where collection is the collection
that notifications were retrieved for and results is a list of
Record objects which identify GrrMessage within the result
collection. |
def create_issue(title, body, repo, token):
owner, name = repo.split('/')
url = 'https://api.github.com/repos/%s/%s/issues' % (owner, name)
data = {'title': title, 'body': body }
headers = { "Authorization": "token %s" % token,
"Accept": "application/vnd.github.symmetra-preview+json" }
response = requests.post(url, data=json.dumps(data), headers=headers)
if response.status_code in [201, 202]:
url = response.json()['html_url']
bot.info(url)
return url
elif response.status_code == 404:
bot.error('Cannot create issue. Does your token have scope repo?')
sys.exit(1)
else:
bot.error('Cannot create issue %s' %title)
bot.error(response.content)
sys.exit(1) | create a Github issue, given a title, body, repo, and token.
Parameters
==========
title: the issue title
body: the issue body
repo: the full name of the repo
token: the user's personal Github token |
def _ensureHtmlAttribute(self):
tag = self.tag
if tag:
styleDict = self._styleDict
tagAttributes = tag._attributes
if not issubclass(tagAttributes.__class__, SpecialAttributesDict):
return
if not styleDict:
tagAttributes._direct_del('style')
else:
tagAttributes._direct_set('style', self) | _ensureHtmlAttribute - INTERNAL METHOD.
Ensure the "style" attribute is present in the html attributes when
is has a value, and absent when it does not.
This requires special linkage. |
def filter(self, run_counts, criteria):
correctness = criteria['correctness']
assert correctness.dtype == np.bool
filtered_counts = deep_copy(run_counts)
for key in filtered_counts:
filtered_counts[key] = filtered_counts[key][correctness]
return filtered_counts | Return run counts only for examples that are still correctly classified |
def build_src(ctx, dest=None):
if dest:
if not dest.startswith('/'):
dest = os.path.join(os.getcwd(), dest)
os.chdir(PROJECT_DIR)
ctx.run('python setup.py sdist --dist-dir {0}'.format(dest))
else:
os.chdir(PROJECT_DIR)
ctx.run('python setup.py sdist') | build source archive |
def request_sensor_sampling_clear(self, req):
f = Future()
@gen.coroutine
def _clear_strategies():
self.clear_strategies(req.client_connection)
raise gen.Return(('ok',))
self.ioloop.add_callback(lambda: chain_future(_clear_strategies(), f))
return f | Set all sampling strategies for this client to none.
Returns
-------
success : {'ok', 'fail'}
Whether sending the list of devices succeeded.
Examples
--------
?sensor-sampling-clear
!sensor-sampling-clear ok |
def encrypt(self, password):
if not password or not self._crypter:
return password or b''
return self._crypter.encrypt(password) | Encrypt the password. |
def run(self, *coros: CoroWrapper):
if not self.running:
raise RuntimeError("not running!")
async def wrapper():
results = []
for coro in coros:
try:
if inspect.isawaitable(coro):
results.append(await coro)
elif inspect.isfunction(coro):
res = coro()
if inspect.isawaitable(res):
results.append(await res)
else:
results.append(res)
else:
raise RuntimeError(
"don't know how to run {}".format(coro))
except Exception as ex:
logger.error("Error while running coroutine {}: {}".format(coro.__name__, ex.__repr__()))
raise ex
if len(results) == 1:
return results[0]
return results
if coros:
what = wrapper()
else:
what = self.runFut
return self.loop.run_until_complete(what) | Runs an arbitrary list of coroutines in order and then quits the loop,
if not running as a context manager. |
def create_token(self, *, holder_name, card_number, credit_card_cvv, expiration_date, token_type='credit_card',
identity_document=None, billing_address=None, additional_details=None):
headers = self.client._get_public_headers()
payload = {
"token_type": token_type,
"credit_card_cvv": credit_card_cvv,
"card_number": card_number,
"expiration_date": expiration_date,
"holder_name": holder_name,
"identity_document": identity_document,
"billing_address": billing_address,
"additional_details": additional_details,
}
endpoint = '/tokens'
return self.client._post(self.client.URL_BASE + endpoint, json=payload, headers=headers) | When creating a Token, remember to use the public-key header instead of the private-key header,
and do not include the app-id header.
Args:
holder_name: Name of the credit card holder.
card_number: Credit card number.
credit_card_cvv: The CVV number on the card (3 or 4 digits) to be encrypted.
expiration_date: Credit card expiration date. Possible formats: mm-yyyy, mm-yy, mm.yyyy,
mm.yy, mm/yy, mm/yyyy, mm yyyy, or mm yy.
token_type: The type of token
billing_address: Address.
identity_document: National identity document of the card holder.
additional_details: Optional additional data stored with your token in key/value pairs.
Returns: |
def run():
Settings.pool = multiprocessing.Pool(Settings.jobs)
record_dirs, bytes_in, bytes_out, nag_about_gifs, errors = \
_walk_all_files()
Settings.pool.close()
Settings.pool.join()
for filename in record_dirs:
timestamp.record_timestamp(filename)
stats.report_totals(bytes_in, bytes_out, nag_about_gifs, errors) | Use preconfigured settings to optimize files. |
def keys(self, pattern, *, encoding=_NOTSET):
return self.execute(b'KEYS', pattern, encoding=encoding) | Returns all keys matching pattern. |
def path(self, target, args, kw):
if type(target) in string_types:
if ':' in target:
prefix, rest = target.split(':', 1)
route = self.named_routes[prefix]
prefix_params = route._pop_params(args, kw)
prefix_path = route.path([], prefix_params)
next_mapper = route.resource
return prefix_path + next_mapper.path(rest, args, kw)
else:
return self.named_routes[target].path(args, kw)
elif isinstance(target, Route):
for route in self.routes:
if route is target:
return route.path(args, kw)
raise InvalidArgumentError("Route '%s' not found in this %s object." % (target, self.__class__.__name__))
else:
target_id = id(target)
if target_id in self._lookup:
return self._lookup[target_id].path(args, kw)
raise InvalidArgumentError("No Route found for target '%s' in this %s object." % (target, self.__class__.__name__)) | Build a URL path fragment for a resource or route.
Possible values for `target`:
A string that does not start with a '.' and does not contain ':'.
: Looks up the route of the same name on this mapper and returns it's
path.
A string of the form 'a:b', 'a:b:c', etc.
: Follows the route to nested mappers by splitting off consecutive
segments. Returns the path of the route found by looking up the
final segment on the last mapper.
A `Route` object
: Returns the path for the route.
A resource that was added previously
: Looks up the first route that points to this resource and
returns its path. |
def clear_url(self):
if (self.get_url_metadata().is_read_only() or
self.get_url_metadata().is_required()):
raise errors.NoAccess()
self._my_map['url'] = self._url_default | Removes the url.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* |
def get_point_from_bins_and_idx(self, chi1_bin, chi2_bin, idx):
mass1 = self.massbank[chi1_bin][chi2_bin]['mass1s'][idx]
mass2 = self.massbank[chi1_bin][chi2_bin]['mass2s'][idx]
spin1z = self.massbank[chi1_bin][chi2_bin]['spin1s'][idx]
spin2z = self.massbank[chi1_bin][chi2_bin]['spin2s'][idx]
return mass1, mass2, spin1z, spin2z | Find masses and spins given bin numbers and index.
Given the chi1 bin, chi2 bin and an index, return the masses and spins
of the point at that index. Will fail if no point exists there.
Parameters
-----------
chi1_bin : int
The bin number for chi1.
chi2_bin : int
The bin number for chi2.
idx : int
The index within the chi1, chi2 bin.
Returns
--------
mass1 : float
Mass of heavier body.
mass2 : float
Mass of lighter body.
spin1z : float
Spin of heavier body.
spin2z : float
Spin of lighter body. |
def xline(self):
if self.unstructured:
raise ValueError(self._unstructured_errmsg)
if self._xline is not None:
return self._xline
self._xline = Line(self,
self.xlines,
self._xline_length,
self._xline_stride,
self.offsets,
'crossline',
)
return self._xline | Interact with segy in crossline mode
Returns
-------
xline : Line or None
Raises
------
ValueError
If the file is unstructured
Notes
-----
.. versionadded:: 1.1 |
def _normalize(self, metric_name, submit_method, prefix):
metric_prefix = "mongodb." if not prefix else "mongodb.{0}.".format(prefix)
metric_suffix = "ps" if submit_method == RATE else ""
for pattern, repl in iteritems(self.CASE_SENSITIVE_METRIC_NAME_SUFFIXES):
metric_name = re.compile(pattern).sub(repl, metric_name)
return u"{metric_prefix}{normalized_metric_name}{metric_suffix}".format(
normalized_metric_name=self.normalize(metric_name.lower()),
metric_prefix=metric_prefix,
metric_suffix=metric_suffix,
) | Replace case-sensitive metric name characters, normalize the metric name,
prefix and suffix according to its type. |
def tupleize_version(version):
if version is None:
return (("unknown",),)
if version.startswith("<unknown"):
return (("unknown",),)
split = re.split("(?:\.|(-))", version)
parsed = tuple(try_fix_num(x) for x in split if x)
def is_dash(s):
return s == "-"
grouped = groupby(parsed, is_dash)
return tuple(tuple(group) for dash, group in grouped if not dash) | Split ``version`` into a lexicographically comparable tuple.
"1.0.3" -> ((1, 0, 3),)
"1.0.3-dev" -> ((1, 0, 3), ("dev",))
"1.0.3-rc-5" -> ((1, 0, 3), ("rc",), (5,)) |
def enforce_drop(self):
ub = self.ubnd
lb = self.lbnd
drop = []
for id in self.index:
if (ub - self.loc[id,:]).min() < 0.0 or\
(lb - self.loc[id,:]).max() > 0.0:
drop.append(id)
self.loc[drop,:] = np.NaN
self.dropna(inplace=True) | enforce parameter bounds on the ensemble by dropping
violating realizations |
def parse(self, line):
tree = list(self.parser.raw_parse(line))[0]
tree = tree[0]
return tree | Returns tree objects from a sentence
Args:
line: Sentence to be parsed into a tree
Returns:
Tree object representing parsed sentence
None if parse fails |
def _relative_name(self, record_name):
if not record_name:
return None
subdomain = super(Provider, self)._relative_name(record_name)
return subdomain if subdomain else None | Returns sub-domain of a domain name |
def set_gene_name(self,name):
self._options = self._options._replace(gene_name = name) | assign a gene name
:param name: name
:type name: string |
def binary(self, name):
if not isinstance(name, str):
raise ValueError('name must be a binary name, given {} of type {}'.format(name, type(name)))
self.validate()
return self._validated_executable(name) | Returns the path to the command of the given name for this distribution.
For example: ::
>>> d = Distribution()
>>> jar = d.binary('jar')
>>> jar
'/usr/bin/jar'
>>>
If this distribution has no valid command of the given name raises Distribution.Error.
If this distribution is a JDK checks both `bin` and `jre/bin` for the binary. |
def rectify_ajax_form_data(self, data):
for name, field in self.base_fields.items():
try:
data[name] = field.convert_ajax_data(data.get(name, {}))
except AttributeError:
pass
return data | If a widget was converted and the Form data was submitted through an Ajax request,
then these data fields must be converted to suit the Django Form validation |
def clean_message(message: Message, topmost: bool = False) -> Message:
if message.is_multipart():
if message.get_content_type() != 'message/external-body':
parts = message.get_payload()
parts[:] = map(clean_message, parts)
elif message_is_binary(message):
if not topmost:
message = gut_message(message)
return message | Clean a message of all its binary parts.
This guts all binary attachments, and returns the message itself for
convenience. |
def translate_stringlist(val):
if not isinstance(val, list):
try:
val = split(val)
except AttributeError:
val = split(six.text_type(val))
for idx in range(len(val)):
if not isinstance(val[idx], six.string_types):
val[idx] = six.text_type(val[idx])
return val | On the CLI, these are passed as multiple instances of a given CLI option.
In Salt, we accept these as a comma-delimited list but the API expects a
Python list. This function accepts input and returns it back as a Python
list of strings. If the input is a string which is a comma-separated list
of items, split that string and return it. |
def getString(t):
slen = c_int()
s = c_char_p()
if PL_get_string_chars(t, byref(s), byref(slen)):
return s.value
else:
raise InvalidTypeError("string") | If t is of type string, return it, otherwise raise InvalidTypeError. |
def normalize_url(url):
if not url or len(url) == 0:
return '/'
if not url.startswith('/'):
url = '/' + url
if len(url) > 1 and url.endswith('/'):
url = url[0:len(url) - 1]
return url | Return a normalized url with trailing and without leading slash.
>>> normalize_url(None)
'/'
>>> normalize_url('/')
'/'
>>> normalize_url('/foo/bar')
'/foo/bar'
>>> normalize_url('foo/bar')
'/foo/bar'
>>> normalize_url('/foo/bar/')
'/foo/bar' |
def add(self, namespace_uri):
alias = self.namespace_to_alias.get(namespace_uri)
if alias is not None:
return alias
i = 0
while True:
alias = 'ext' + str(i)
try:
self.addAlias(namespace_uri, alias)
except KeyError:
i += 1
else:
return alias
assert False, "Not reached" | Add this namespace URI to the mapping, without caring what
alias it ends up with |
def _return_assoc_tuple(self, objects):
if objects:
result = [(u'OBJECTPATH', {}, obj) for obj in objects]
return self._make_tuple(result)
return None | Create the property tuple for _imethod return of references,
referencenames, associators, and associatornames methods.
This is different than the get/enum imethod return tuples. It creates an
OBJECTPATH for each object in the return list.
_imethod call returns None when there are zero objects rather
than a tuple with empty object path |
def folders(self):
for directory in self.directory:
for path in os.listdir(directory):
full_path = os.path.join(directory, path)
if os.path.isdir(full_path):
if not path.startswith('.'):
self.filepaths.append(full_path)
return self._get_filepaths() | Return list of folders in root directory |
def update(self, read, write, manage):
data = values.of({'Read': read, 'Write': write, 'Manage': manage, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return SyncListPermissionInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
identity=self._solution['identity'],
) | Update the SyncListPermissionInstance
:param bool read: Read access.
:param bool write: Write access.
:param bool manage: Manage access.
:returns: Updated SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionInstance |
def concatenate(ctx, *text):
result = ''
for arg in text:
result += conversions.to_string(arg, ctx)
return result | Joins text strings into one text string |
def post_message(plugin, polled_time, identity, message):
user = plugin.build_identifier(identity)
return plugin.send(user, message) | Post single message
:type plugin: errbot.BotPlugin
:type polled_time: datetime.datetime
:type identity: str
:type message: str |
def export(self, private_key=True):
if private_key is True:
return self._export_all()
else:
return self.export_public() | Exports the key in the standard JSON format.
Exports the key regardless of type, if private_key is False
and the key is_symmetric an exceptionis raised.
:param private_key(bool): Whether to export the private key.
Defaults to True. |
def run(self, redirects = []):
if not isinstance(redirects, redir.Redirects):
redirects = redir.Redirects(self._env._redirects, *redirects)
with copy.copy_session() as sess:
self = copy.deepcopy(self)
processes = self._run(redirects, sess)
pipeline = RunningPipeline(processes, self)
self._env.last_pipeline = pipeline
return pipeline | Runs the pipelines with the specified redirects and returns
a RunningPipeline instance. |
def add_edge(self, source, target, interaction='-', directed=True, dataframe=True):
new_edge = {
'source': source,
'target': target,
'interaction': interaction,
'directed': directed
}
return self.add_edges([new_edge], dataframe=dataframe) | Add a single edge from source to target. |
def close_authenticator(self):
_logger.info("Shutting down CBS session on connection: %r.", self._connection.container_id)
try:
_logger.debug("Unlocked CBS to close on connection: %r.", self._connection.container_id)
self._cbs_auth.destroy()
_logger.info("Auth closed, destroying session on connection: %r.", self._connection.container_id)
self._session.destroy()
finally:
_logger.info("Finished shutting down CBS session on connection: %r.", self._connection.container_id) | Close the CBS auth channel and session. |
def parents(self):
parents = []
if self.parent is None:
return []
category = self
while category.parent is not None:
parents.append(category.parent)
category = category.parent
return parents[::-1] | Returns a list of all the current category's parents. |
def _get_version_mode(self, mode=None):
version_mode = self._version_modes.get(mode)
if not version_mode:
version_mode = self._version_modes[mode] = VersionMode(name=mode)
return version_mode | Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode. |
def parse_auth_token_from_request(self, auth_header):
if not auth_header:
raise falcon.HTTPUnauthorized(
description='Missing Authorization Header')
try:
auth_header_prefix, _ = auth_header.split(' ', 1)
except ValueError:
raise falcon.HTTPUnauthorized(
description='Invalid Authorization Header: Missing Scheme or Parameters')
if auth_header_prefix.lower() != self.auth_header_prefix.lower():
raise falcon.HTTPUnauthorized(
description='Invalid Authorization Header: '
'Must start with {0}'.format(self.auth_header_prefix))
return auth_header | Parses and returns the Hawk Authorization header if it is present and well-formed.
Raises `falcon.HTTPUnauthoried exception` with proper error message |
def dispatch(self,request,*args,**kwargs):
lessonSession = request.session.get(PRIVATELESSON_VALIDATION_STR,{})
try:
self.lesson = PrivateLessonEvent.objects.get(id=lessonSession.get('lesson'))
except (ValueError, ObjectDoesNotExist):
messages.error(request,_('Invalid lesson identifier passed to sign-up form.'))
return HttpResponseRedirect(reverse('bookPrivateLesson'))
expiry = parse_datetime(lessonSession.get('expiry',''),)
if not expiry or expiry < timezone.now():
messages.info(request,_('Your registration session has expired. Please try again.'))
return HttpResponseRedirect(reverse('bookPrivateLesson'))
self.payAtDoor = lessonSession.get('payAtDoor',False)
return super(PrivateLessonStudentInfoView,self).dispatch(request,*args,**kwargs) | Handle the session data passed by the prior view. |
def _make_query_from_terms(self, terms):
expanded_terms = self._expand_terms(terms)
cterms = ''
if expanded_terms['doc']:
cterms = self.backend._or_join(expanded_terms['doc'])
keywords = expanded_terms['keywords']
frm_to = self._from_to_as_term(expanded_terms['from'], expanded_terms['to'])
if frm_to:
keywords.append(frm_to)
if keywords:
if cterms:
cterms = self.backend._and_join(
[cterms, self.backend._field_term('keywords', expanded_terms['keywords'])])
else:
cterms = self.backend._field_term('keywords', expanded_terms['keywords'])
logger.debug('partition terms conversion: `{}` terms converted to `{}` query.'.format(terms, cterms))
return cterms | returns a FTS query for partition created from decomposed search terms.
args:
terms (dict or str):
returns:
str containing fts query. |
def inserir(self, type, description):
script_type_map = dict()
script_type_map['type'] = type
script_type_map['description'] = description
code, xml = self.submit(
{'script_type': script_type_map}, 'POST', 'scripttype/')
return self.response(code, xml) | Inserts a new Script Type and returns its identifier.
:param type: Script Type type. String with a minimum 3 and maximum of 40 characters
:param description: Script Type description. String with a minimum 3 and maximum of 100 characters
:return: Dictionary with the following structure:
::
{'script_type': {'id': < id_script_type >}}
:raise InvalidParameterError: Type or description is null and invalid.
:raise NomeTipoRoteiroDuplicadoError: Type script already registered with informed.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. |
def ddtodms(self, dd):
negative = dd < 0
dd = abs(dd)
minutes,seconds = divmod(dd*3600,60)
degrees,minutes = divmod(minutes,60)
if negative:
if degrees > 0:
degrees = -degrees
elif minutes > 0:
minutes = -minutes
else:
seconds = -seconds
return (degrees,minutes,seconds) | Take in dd string and convert to dms |
def _init_get_dict():
get_dict = {'main chain': PandasPdb._get_mainchain,
'hydrogen': PandasPdb._get_hydrogen,
'c-alpha': PandasPdb._get_calpha,
'carbon': PandasPdb._get_carbon,
'heavy': PandasPdb._get_heavy}
return get_dict | Initialize dictionary for filter operations. |
def shutdown_waits_for(coro, loop=None):
loop = loop or get_event_loop()
fut = loop.create_future()
async def coro_proxy():
try:
result = await coro
except (CancelledError, Exception) as e:
set_fut_done = partial(fut.set_exception, e)
else:
set_fut_done = partial(fut.set_result, result)
if not fut.cancelled():
set_fut_done()
new_coro = coro_proxy()
_DO_NOT_CANCEL_COROS.add(new_coro)
loop.create_task(new_coro)
async def inner():
return await fut
return inner() | Prevent coro from being cancelled during the shutdown sequence.
The trick here is that we add this coro to the global
"DO_NOT_CANCEL" collection, and then later during the shutdown
sequence we make sure that the task that wraps this coro will NOT
be cancelled.
To make this work, we have to create a super-secret task, below, that
communicates with the caller (which "awaits" us) via a Future. Using
a Future in this way allows us to avoid awaiting the Task, which
decouples the Task from the normal exception propagation which would
normally happen when the outer Task gets cancelled. We get the
result of coro back to the caller via Future.set_result.
NOTE that during the shutdown sequence, the caller WILL NOT be able
to receive a result, since the caller will likely have been
cancelled. So you should probably not rely on capturing results
via this function. |
def new(self, attribute, operation=ChainOperator.AND):
if isinstance(operation, str):
operation = ChainOperator(operation)
self._chain = operation
self._attribute = self._get_mapping(attribute) if attribute else None
self._negation = False
return self | Combine with a new query
:param str attribute: attribute of new query
:param ChainOperator operation: operation to combine to new query
:rtype: Query |
def visit_Bytes(self, node: ast.Bytes) -> bytes:
result = node.s
self.recomputed_values[node] = result
return node.s | Recompute the value as the bytes at the node. |
def from_name(cls, relation_name, conversations=None):
if relation_name is None:
return None
relation_class = cls._cache.get(relation_name)
if relation_class:
return relation_class(relation_name, conversations)
role, interface = hookenv.relation_to_role_and_interface(relation_name)
if role and interface:
relation_class = cls._find_impl(role, interface)
if relation_class:
cls._cache[relation_name] = relation_class
return relation_class(relation_name, conversations)
return None | Find relation implementation in the current charm, based on the
name of the relation.
:return: A Relation instance, or None |
def CaptureFrameLocals(self, frame):
variables = {n: self.CaptureNamedVariable(n, v, 1,
self.default_capture_limits)
for n, v in six.viewitems(frame.f_locals)}
nargs = frame.f_code.co_argcount
if frame.f_code.co_flags & inspect.CO_VARARGS: nargs += 1
if frame.f_code.co_flags & inspect.CO_VARKEYWORDS: nargs += 1
frame_arguments = []
for argname in frame.f_code.co_varnames[:nargs]:
if argname in variables: frame_arguments.append(variables.pop(argname))
return (frame_arguments, list(six.viewvalues(variables))) | Captures local variables and arguments of the specified frame.
Args:
frame: frame to capture locals and arguments.
Returns:
(arguments, locals) tuple. |
def replace_pyof_version(module_fullname, version):
module_version = MetaStruct.get_pyof_version(module_fullname)
if not module_version or module_version == version:
return None
return module_fullname.replace(module_version, version) | Replace the OF Version of a module fullname.
Get's a module name (eg. 'pyof.v0x01.common.header') and returns it on
a new 'version' (eg. 'pyof.v0x02.common.header').
Args:
module_fullname (str): The fullname of the module
(e.g.: pyof.v0x01.common.header)
version (str): The version to be 'inserted' on the module fullname.
Returns:
str: module fullname
The new module fullname, with the replaced version,
on the format "pyof.v0x01.common.header". If the requested
version is the same as the one of the module_fullname or if
the module_fullname is not a 'OF version' specific module,
returns None. |
def status(config='root', num_pre=None, num_post=None):
try:
pre, post = _get_num_interval(config, num_pre, num_post)
snapper.CreateComparison(config, int(pre), int(post))
files = snapper.GetFiles(config, int(pre), int(post))
status_ret = {}
SUBVOLUME = list_configs()[config]['SUBVOLUME']
for file in files:
_filepath = file[0][len(SUBVOLUME):] if file[0].startswith(SUBVOLUME) else file[0]
status_ret[os.path.normpath(SUBVOLUME + _filepath)] = {'status': status_to_string(file[1])}
return status_ret
except dbus.DBusException as exc:
raise CommandExecutionError(
'Error encountered while listing changed files: {0}'
.format(_dbus_exception_to_reason(exc, locals()))
) | Returns a comparison between two snapshots
config
Configuration name.
num_pre
first snapshot ID to compare. Default is last snapshot
num_post
last snapshot ID to compare. Default is 0 (current state)
CLI example:
.. code-block:: bash
salt '*' snapper.status
salt '*' snapper.status num_pre=19 num_post=20 |
def _send_mail(subject_or_message: Optional[Union[str, Message]] = None,
to: Optional[Union[str, List[str]]] = None,
template: Optional[str] = None,
**kwargs):
subject_or_message = subject_or_message or kwargs.pop('subject')
to = to or kwargs.pop('recipients', [])
msg = make_message(subject_or_message, to, template, **kwargs)
with mail.connect() as connection:
connection.send(msg) | The default function used for sending emails.
:param subject_or_message: A subject string, or for backwards compatibility with
stock Flask-Mail, a :class:`~flask_mail.Message` instance
:param to: An email address, or a list of email addresses
:param template: Which template to render.
:param kwargs: Extra kwargs to pass on to :class:`~flask_mail.Message` |
def get_commit_log(from_rev=None):
check_repo()
rev = None
if from_rev:
rev = '...{from_rev}'.format(from_rev=from_rev)
for commit in repo.iter_commits(rev):
yield (commit.hexsha, commit.message) | Yields all commit messages from last to first. |
def from_file(cls, filename, source):
_logger.info('Loading theme %s', filename)
try:
config = configparser.ConfigParser()
config.optionxform = six.text_type
with codecs.open(filename, encoding='utf-8') as fp:
config.readfp(fp)
except configparser.ParsingError as e:
raise ConfigError(e.message)
if not config.has_section('theme'):
raise ConfigError(
'Error loading {0}:\n'
' missing [theme] section'.format(filename))
theme_name = os.path.basename(filename)
theme_name, _ = os.path.splitext(theme_name)
elements = {}
for element, line in config.items('theme'):
if element not in cls.DEFAULT_ELEMENTS:
_logger.info('Skipping element %s', element)
continue
elements[element] = cls._parse_line(element, line, filename)
return cls(name=theme_name, source=source, elements=elements) | Load a theme from the specified configuration file.
Parameters:
filename: The name of the filename to load.
source: A description of where the theme was loaded from. |
def get_game_logs(self):
logs = self.response.json()['resultSets'][0]['rowSet']
headers = self.response.json()['resultSets'][0]['headers']
df = pd.DataFrame(logs, columns=headers)
df.GAME_DATE = pd.to_datetime(df.GAME_DATE)
return df | Returns team game logs as a pandas DataFrame |
def options(self, **options):
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self | Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone. |
def encode_http_params(**kw):
try:
_fo = lambda k, v: '{name}={value}'.format(
name=k, value=to_basestring(quote(v)))
except:
_fo = lambda k, v: '%s=%s' % (k, to_basestring(quote(v)))
_en = utf8
return '&'.join([_fo(k, _en(v)) for k, v in kw.items() if not is_empty(v)]) | url paremeter encode |
def validate_source_dir(script, directory):
if directory:
if not os.path.isfile(os.path.join(directory, script)):
raise ValueError('No file named "{}" was found in directory "{}".'.format(script, directory))
return True | Validate that the source directory exists and it contains the user script
Args:
script (str): Script filename.
directory (str): Directory containing the source file.
Raises:
ValueError: If ``directory`` does not exist, is not a directory, or does not contain ``script``. |
def _uniqueid(n=30):
return ''.join(random.SystemRandom().choice(
string.ascii_uppercase + string.ascii_lowercase)
for _ in range(n)) | Return a unique string with length n.
:parameter int N: number of character in the uniqueid
:return: the uniqueid
:rtype: str |
def master_ref(self):
return ReferencesDataFrame(self._engine_dataframe.getReferences().getHEAD(),
self._session, self._implicits) | Filters the current DataFrame references to only contain those rows whose reference is master.
>>> master_df = repos_df.master_ref
:rtype: ReferencesDataFrame |
def _normalize_helper(number, replacements, remove_non_matches):
normalized_number = []
for char in number:
new_digit = replacements.get(char.upper(), None)
if new_digit is not None:
normalized_number.append(new_digit)
elif not remove_non_matches:
normalized_number.append(char)
return U_EMPTY_STRING.join(normalized_number) | Normalizes a string of characters representing a phone number by
replacing all characters found in the accompanying map with the values
therein, and stripping all other characters if remove_non_matches is true.
Arguments:
number -- a string representing a phone number
replacements -- a mapping of characters to what they should be replaced
by in the normalized version of the phone number
remove_non_matches -- indicates whether characters that are not able to be
replaced should be stripped from the number. If this is False,
they will be left unchanged in the number.
Returns the normalized string version of the phone number. |
def global_unlock_percent(self):
percent = CRef.cfloat()
result = self._iface.get_ach_progress(self.name, percent)
if not result:
return 0.0
return float(percent) | Global achievement unlock percent.
:rtype: float |
def get_index2latex(model_description):
index2latex = {}
translation_csv = os.path.join(get_project_root(),
model_description["data-source"],
"index2formula_id.csv")
with open(translation_csv) as csvfile:
csvreader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in csvreader:
index2latex[int(row['index'])] = row['latex']
return index2latex | Get a dictionary that maps indices to LaTeX commands.
Parameters
----------
model_description : string
A model description file that points to a feature folder where an
`index2formula_id.csv` has to be.
Returns
-------
dictionary :
Maps indices to LaTeX commands |
def series64bitto32bit(s):
if s.dtype == np.float64:
return s.astype('float32')
elif s.dtype == np.int64:
return s.astype('int32')
return s | Convert a Pandas series from 64 bit types to 32 bit types to save
memory or disk space.
Parameters
----------
s : The series to convert
Returns
-------
The converted series |
def present(self, path, timeout=0):
ret, data = self.sendmess(MSG_PRESENCE, str2bytez(path),
timeout=timeout)
assert ret <= 0 and not data, (ret, data)
if ret < 0:
return False
else:
return True | returns True if there is an entity at path |
def set_priority(self, priority):
if priority in ["hidden", "background", "info", "foreground", "alert", "input"]:
self.priority = priority
self.server.request("screen_set %s priority %s" % (self.ref, self.priority)) | Set Screen Priority Class |
def load_zae(file_obj, resolver=None, **kwargs):
archive = util.decompress(file_obj,
file_type='zip')
file_name = next(i for i in archive.keys()
if i.lower().endswith('.dae'))
resolver = visual.resolvers.ZipResolver(archive)
loaded = load_collada(archive[file_name],
resolver=resolver,
**kwargs)
return loaded | Load a ZAE file, which is just a zipped DAE file.
Parameters
-------------
file_obj : file object
Contains ZAE data
resolver : trimesh.visual.Resolver
Resolver to load additional assets
kwargs : dict
Passed to load_collada
Returns
------------
loaded : dict
Results of loading |
def op_get_mutate_fields( op_name ):
global MUTATE_FIELDS
if op_name not in MUTATE_FIELDS.keys():
raise Exception("No such operation '%s'" % op_name)
fields = MUTATE_FIELDS[op_name][:]
return fields | Get the names of the fields that will change
when this operation gets applied to a record. |
def _create_payload(payload):
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload | Return the string payload filled with zero bytes
up to the next 512 byte border. |
def version_cmd(argv):
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__) | Prints current pew version |
def get_assessments_taken(self):
collection = JSONClientValidated('assessment',
collection='AssessmentTaken',
runtime=self._runtime)
result = collection.find(self._view_filter()).sort('_id', DESCENDING)
return objects.AssessmentTakenList(result, runtime=self._runtime, proxy=self._proxy) | Gets all ``AssessmentTaken`` elements.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
return: (osid.assessment.AssessmentTakenList) - a list of
``AssessmentTaken`` elements
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.* |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.