code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _create_link(self, act_node, name, instance):
"""Creates a link and checks if names are appropriate
"""
act_node._links[name] = instance
act_node._children[name] = instance
full_name = instance.v_full_name
if full_name not in self._root_instance._linked_by:
self._root_instance._linked_by[full_name] = {}
linking = self._root_instance._linked_by[full_name]
if act_node.v_full_name not in linking:
linking[act_node.v_full_name] = (act_node, set())
linking[act_node.v_full_name][1].add(name)
if name not in self._links_count:
self._links_count[name] = 0
self._links_count[name] = self._links_count[name] + 1
self._logger.debug('Added link `%s` under `%s` pointing '
'to `%s`.' % (name, act_node.v_full_name,
instance.v_full_name))
return instance
|
Creates a link and checks if names are appropriate
|
def _encode_utf8(self, **kwargs):
"""
UTF8 encodes all of the NVP values.
"""
if is_py3:
# This is only valid for Python 2. In Python 3, unicode is
# everywhere (yay).
return kwargs
unencoded_pairs = kwargs
for i in unencoded_pairs.keys():
#noinspection PyUnresolvedReferences
if isinstance(unencoded_pairs[i], types.UnicodeType):
unencoded_pairs[i] = unencoded_pairs[i].encode('utf-8')
return unencoded_pairs
|
UTF8 encodes all of the NVP values.
|
def predict_survival_function(self, X, times=None):
"""
Predict the survival function for individuals, given their covariates. This assumes that the individual
just entered the study (that is, we do not condition on how long they have already lived for.)
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
Returns
-------
survival_function : DataFrame
the survival probabilities of individuals over the timeline
"""
return np.exp(-self.predict_cumulative_hazard(X, times=times))
|
Predict the survival function for individuals, given their covariates. This assumes that the individual
just entered the study (that is, we do not condition on how long they have already lived for.)
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
Returns
-------
survival_function : DataFrame
the survival probabilities of individuals over the timeline
|
def track_from_file(file_object, filetype, timeout=DEFAULT_ASYNC_TIMEOUT, force_upload=False):
"""
Create a track object from a file-like object.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
file_object: a file-like Python object
filetype: the file type. Supported types include mp3, ogg, wav, m4a, mp4, au
force_upload: skip the MD5 shortcut path, force an upload+analysis
Example:
>>> f = open("Miaow-01-Tempered-song.mp3")
>>> t = track.track_from_file(f, 'mp3')
>>> t
< Track >
>>>
"""
if not force_upload:
try:
# Check if this file has already been uploaded.
# This is much faster than uploading.
md5 = hashlib.md5(file_object.read()).hexdigest()
return track_from_md5(md5)
except util.EchoNestAPIError:
# Fall through to do a fresh upload.
pass
file_object.seek(0)
return _track_from_data(file_object.read(), filetype, timeout)
|
Create a track object from a file-like object.
NOTE: Does not create the detailed analysis for the Track. Call
Track.get_analysis() for that.
Args:
file_object: a file-like Python object
filetype: the file type. Supported types include mp3, ogg, wav, m4a, mp4, au
force_upload: skip the MD5 shortcut path, force an upload+analysis
Example:
>>> f = open("Miaow-01-Tempered-song.mp3")
>>> t = track.track_from_file(f, 'mp3')
>>> t
< Track >
>>>
|
def guess_labels(self, doc):
"""
return a prediction of label names
"""
if doc.nb_pages <= 0:
return set()
self.label_guesser.total_nb_documents = len(self._docs_by_id.keys())
label_names = self.label_guesser.guess(doc)
labels = set()
for label_name in label_names:
label = self.labels[label_name]
labels.add(label)
return labels
|
return a prediction of label names
|
def do_workers(self, args):
'''list all known workers'''
workers = self.task_master.workers(alive=not args.all)
for k in sorted(workers.iterkeys()):
self.stdout.write('{0} ({1})\n'.format(k, workers[k]))
if args.details:
heartbeat = self.task_master.get_heartbeat(k)
for hk, hv in heartbeat.iteritems():
self.stdout.write(' {0}: {1}\n'.format(hk, hv))
|
list all known workers
|
def addOntology(self):
"""
Adds a new Ontology to this repo.
"""
self._openRepo()
name = self._args.name
filePath = self._getFilePath(self._args.filePath,
self._args.relativePath)
if name is None:
name = getNameFromPath(filePath)
ontology = ontologies.Ontology(name)
ontology.populateFromFile(filePath)
self._updateRepo(self._repo.insertOntology, ontology)
|
Adds a new Ontology to this repo.
|
def load_data_file(filename, encoding='utf-8'):
"""Load a data file and return it as a list of lines.
Parameters:
filename: The name of the file (no directories included).
encoding: The file encoding. Defaults to utf-8.
"""
data = pkgutil.get_data(PACKAGE_NAME, os.path.join(DATA_DIR, filename))
return data.decode(encoding).splitlines()
|
Load a data file and return it as a list of lines.
Parameters:
filename: The name of the file (no directories included).
encoding: The file encoding. Defaults to utf-8.
|
def _onMessageNotification(self, client, userdata, pahoMessage):
"""
Internal callback for gateway notification messages, parses source device from topic string and
passes the information on to the registered device command callback
"""
try:
note = Notification(pahoMessage, self._messageCodecs)
except InvalidEventException as e:
self.logger.critical(str(e))
else:
self.logger.debug("Received Notification")
if self.notificationCallback:
self.notificationCallback(note)
|
Internal callback for gateway notification messages, parses source device from topic string and
passes the information on to the registered device command callback
|
def open_imports(self, imported_definitions):
"""Import the I{imported} WSDLs."""
for imp in self.imports:
imp.load(self, imported_definitions)
|
Import the I{imported} WSDLs.
|
def _get_all_attributes(network):
"""
Get all the complex mode attributes in the network so that they
can be used for mapping to resource scenarios later.
"""
attrs = network.attributes
for n in network.nodes:
attrs.extend(n.attributes)
for l in network.links:
attrs.extend(l.attributes)
for g in network.resourcegroups:
attrs.extend(g.attributes)
return attrs
|
Get all the complex mode attributes in the network so that they
can be used for mapping to resource scenarios later.
|
def list(gandi, domain, zone_id, output, format, limit):
"""List DNS zone records for a domain."""
options = {
'items_per_page': limit,
}
output_keys = ['name', 'type', 'value', 'ttl']
if not zone_id:
result = gandi.domain.info(domain)
zone_id = result['zone_id']
if not zone_id:
gandi.echo('No zone records found, domain %s doesn\'t seems to be '
'managed at Gandi.' % domain)
return
records = gandi.record.list(zone_id, options)
if not output and not format:
for num, rec in enumerate(records):
if num:
gandi.separator_line()
output_generic(gandi, rec, output_keys, justify=12)
elif output:
zone_filename = domain + "_" + str(zone_id)
if os.path.isfile(zone_filename):
open(zone_filename, 'w').close()
for record in records:
format_record = ('%s %s IN %s %s' %
(record['name'], record['ttl'],
record['type'], record['value']))
with open(zone_filename, 'ab') as zone_file:
zone_file.write(format_record + '\n')
gandi.echo('Your zone file have been writen in %s' % zone_filename)
elif format:
if format == 'text':
for record in records:
format_record = ('%s %s IN %s %s' %
(record['name'], record['ttl'],
record['type'], record['value']))
gandi.echo(format_record)
if format == 'json':
format_record = json.dumps(records, sort_keys=True,
indent=4, separators=(',', ': '))
gandi.echo(format_record)
return records
|
List DNS zone records for a domain.
|
def add_child_catalog(self, catalog_id, child_id):
"""Adds a child to a catalog.
arg: catalog_id (osid.id.Id): the ``Id`` of a catalog
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: AlreadyExists - ``catalog_id`` is already a parent of
``child_id``
raise: NotFound - ``catalog_id`` or ``child_id`` not found
raise: NullArgument - ``catalog_id`` or ``child_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.add_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.add_child_catalog(catalog_id=catalog_id, child_id=child_id)
return self._hierarchy_session.add_child(id_=catalog_id, child_id=child_id)
|
Adds a child to a catalog.
arg: catalog_id (osid.id.Id): the ``Id`` of a catalog
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: AlreadyExists - ``catalog_id`` is already a parent of
``child_id``
raise: NotFound - ``catalog_id`` or ``child_id`` not found
raise: NullArgument - ``catalog_id`` or ``child_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
|
def _update_field(self, action, field, value, max_tries, tries=0):
"""
Private update_field method. Wrapped by Document.update_field.
Tracks a "tries" var to help limit recursion.
"""
# Refresh our view of the document.
self.fetch()
# Update the field.
action(self, field, value)
# Attempt to save, retrying conflicts up to max_tries.
try:
self.save()
except requests.HTTPError as ex:
if tries < max_tries and ex.response.status_code == 409:
self._update_field(
action, field, value, max_tries, tries=tries+1)
else:
raise
|
Private update_field method. Wrapped by Document.update_field.
Tracks a "tries" var to help limit recursion.
|
def convert_to_spaces(cls, ops, kwargs):
"""For all operands that are merely of type str or int, substitute
LocalSpace objects with corresponding labels:
For a string, just itself, for an int, a string version of that int.
"""
from qnet.algebra.core.hilbert_space_algebra import (
HilbertSpace, LocalSpace)
cops = [o if isinstance(o, HilbertSpace) else LocalSpace(o) for o in ops]
return cops, kwargs
|
For all operands that are merely of type str or int, substitute
LocalSpace objects with corresponding labels:
For a string, just itself, for an int, a string version of that int.
|
def get(self, filename):
"""
Download a distribution archive from the configured Amazon S3 bucket.
:param filename: The filename of the distribution archive (a string).
:returns: The pathname of a distribution archive on the local file
system or :data:`None`.
:raises: :exc:`.CacheBackendError` when any underlying method fails.
"""
timer = Timer()
self.check_prerequisites()
with PatchedBotoConfig():
# Check if the distribution archive is available.
raw_key = self.get_cache_key(filename)
logger.info("Checking if distribution archive is available in S3 bucket: %s", raw_key)
key = self.s3_bucket.get_key(raw_key)
if key is None:
logger.debug("Distribution archive is not available in S3 bucket.")
else:
# Download the distribution archive to the local binary index.
# TODO Shouldn't this use LocalCacheBackend.put() instead of
# implementing the same steps manually?!
logger.info("Downloading distribution archive from S3 bucket ..")
file_in_cache = os.path.join(self.config.binary_cache, filename)
makedirs(os.path.dirname(file_in_cache))
with AtomicReplace(file_in_cache) as temporary_file:
key.get_contents_to_filename(temporary_file)
logger.debug("Finished downloading distribution archive from S3 bucket in %s.", timer)
return file_in_cache
|
Download a distribution archive from the configured Amazon S3 bucket.
:param filename: The filename of the distribution archive (a string).
:returns: The pathname of a distribution archive on the local file
system or :data:`None`.
:raises: :exc:`.CacheBackendError` when any underlying method fails.
|
def generate_neuroml2_from_network(nl_model,
nml_file_name=None,
print_summary=True,
seed=1234,
format='xml',
base_dir=None,
copy_included_elements=False,
target_dir=None,
validate=False):
"""
Generate and save NeuroML2 file (in either XML or HDF5 format) from the
NeuroMLlite description
"""
print_v("Generating NeuroML2 for %s%s..." % (nl_model.id, ' (base dir: %s; target dir: %s)'
% (base_dir, target_dir) if base_dir or target_dir else ''))
import neuroml
from neuroml.hdf5.NetworkBuilder import NetworkBuilder
neuroml_handler = NetworkBuilder()
generate_network(nl_model, neuroml_handler, seed=seed, base_dir=base_dir)
nml_doc = neuroml_handler.get_nml_doc()
for i in nl_model.input_sources:
if nml_doc.get_by_id(i.id) == None:
if i.neuroml2_source_file:
incl = neuroml.IncludeType(_locate_file(i.neuroml2_source_file, base_dir))
if not incl in nml_doc.includes:
nml_doc.includes.append(incl)
if i.neuroml2_input:
input_params = i.parameters if i.parameters else {}
# TODO make more generic...
if i.neuroml2_input.lower() == 'pulsegenerator':
input = neuroml.PulseGenerator(id=i.id)
nml_doc.pulse_generators.append(input)
elif i.neuroml2_input.lower() == 'pulsegeneratordl':
input = neuroml.PulseGeneratorDL(id=i.id)
nml_doc.pulse_generator_dls.append(input)
elif i.neuroml2_input.lower() == 'poissonfiringsynapse':
input = neuroml.PoissonFiringSynapse(id=i.id)
nml_doc.poisson_firing_synapses.append(input)
for p in input_params:
exec('input.%s = "%s"' % (p, evaluate(input_params[p], nl_model.parameters)))
for c in nl_model.cells:
if c.neuroml2_source_file:
incl = neuroml.IncludeType(_locate_file(c.neuroml2_source_file, base_dir))
found_cell = False
for cell in nml_doc.cells:
if cell.id == c.id:
nml_doc.cells.remove(cell) # Better to use imported cell file; will have channels, etc.
nml_doc.includes.append(incl)
found_cell = True
if not found_cell:
for p in nl_model.populations:
if p.component == c.id:
pass
if not incl in nml_doc.includes:
nml_doc.includes.append(incl)
''' Needed???
if c.lems_source_file:
incl = neuroml.IncludeType(_locate_file(c.lems_source_file, base_dir))
if not incl in nml_doc.includes:
nml_doc.includes.append(incl)'''
if c.neuroml2_cell:
cell_params = c.parameters if c.parameters else {}
# TODO make more generic...
if c.neuroml2_cell.lower() == 'spikegenerator':
cell = neuroml.SpikeGenerator(id=c.id)
nml_doc.spike_generators.append(cell)
elif c.neuroml2_cell.lower() == 'spikegeneratorpoisson':
cell = neuroml.SpikeGeneratorPoisson(id=c.id)
nml_doc.spike_generator_poissons.append(cell)
elif c.neuroml2_cell.lower() == 'spikegeneratorrefpoisson':
cell = neuroml.SpikeGeneratorRefPoisson(id=c.id)
nml_doc.spike_generator_ref_poissons.append(cell)
else:
raise Exception('The neuroml2_cell: %s is not yet supported...'%c.neuroml2_cell)
for p in cell_params:
exec('cell.%s = "%s"' % (p, evaluate(cell_params[p], nl_model.parameters)))
for s in nl_model.synapses:
if nml_doc.get_by_id(s.id) == None:
if s.neuroml2_source_file:
incl = neuroml.IncludeType(_locate_file(s.neuroml2_source_file, base_dir))
if not incl in nml_doc.includes:
nml_doc.includes.append(incl)
# Look for and add the PyNN based elements to the NeuroMLDocument
_extract_pynn_components_to_neuroml(nl_model, nml_doc)
if print_summary:
# Print info
print_v(nml_doc.summary())
# Save to file
if target_dir == None:
target_dir = base_dir
if format == 'xml':
if not nml_file_name:
nml_file_name = _locate_file('%s.net.nml' % nml_doc.id, target_dir)
from neuroml.writers import NeuroMLWriter
NeuroMLWriter.write(nml_doc, nml_file_name)
if format == 'hdf5':
if not nml_file_name:
nml_file_name = _locate_file('%s.net.nml.h5' % nml_doc.id, target_dir)
from neuroml.writers import NeuroMLHdf5Writer
NeuroMLHdf5Writer.write(nml_doc, nml_file_name)
print_v("Written NeuroML to %s" % nml_file_name)
if validate and format == 'xml':
from pyneuroml import pynml
success = pynml.validate_neuroml2(nml_file_name, verbose_validate=False)
if success:
print_v('Generated file is valid NeuroML2!')
else:
print_v('Generated file is NOT valid NeuroML2!')
return nml_file_name, nml_doc
|
Generate and save NeuroML2 file (in either XML or HDF5 format) from the
NeuroMLlite description
|
def write_list(path_out, image_list):
"""Hepler function to write image list into the file.
The format is as below,
integer_image_index \t float_label_index \t path_to_image
Note that the blank between number and tab is only used for readability.
Parameters
----------
path_out: string
image_list: list
"""
with open(path_out, 'w') as fout:
for i, item in enumerate(image_list):
line = '%d\t' % item[0]
for j in item[2:]:
line += '%f\t' % j
line += '%s\n' % item[1]
fout.write(line)
|
Hepler function to write image list into the file.
The format is as below,
integer_image_index \t float_label_index \t path_to_image
Note that the blank between number and tab is only used for readability.
Parameters
----------
path_out: string
image_list: list
|
def tag(self, value):
"""The name of the program that generated the log message.
The tag can only contain alphanumeric
characters. If the tag is longer than {MAX_TAG_LEN} characters
it will be truncated automatically.
"""
if value is None:
value = sys.argv[0]
self._tag = value[:self.MAX_TAG_LEN]
|
The name of the program that generated the log message.
The tag can only contain alphanumeric
characters. If the tag is longer than {MAX_TAG_LEN} characters
it will be truncated automatically.
|
def get_record(self, msg_id):
"""Get a specific Task Record, by msg_id."""
r = self._records.find_one({'msg_id': msg_id})
if not r:
# r will be '' if nothing is found
raise KeyError(msg_id)
return r
|
Get a specific Task Record, by msg_id.
|
def _non_framed_body_length(header, plaintext_length):
"""Calculates the length of a non-framed message body, given a complete header.
:param header: Complete message header object
:type header: aws_encryption_sdk.structures.MessageHeader
:param int plaintext_length: Length of plaintext in bytes
:rtype: int
"""
body_length = header.algorithm.iv_len # IV
body_length += 8 # Encrypted Content Length
body_length += plaintext_length # Encrypted Content
body_length += header.algorithm.auth_len # Authentication Tag
return body_length
|
Calculates the length of a non-framed message body, given a complete header.
:param header: Complete message header object
:type header: aws_encryption_sdk.structures.MessageHeader
:param int plaintext_length: Length of plaintext in bytes
:rtype: int
|
def hdrval(cls):
"""Construct dictionary mapping display column title to
IterationStats entries.
"""
hdrmap = {'Itn': 'Iter', 'X r': 'PrimalRsdl', 'X s': 'DualRsdl',
u('X ρ'): 'Rho', 'D cnstr': 'Cnstr', 'D dlt': 'DeltaD',
u('D η'): 'Eta'}
return hdrmap
|
Construct dictionary mapping display column title to
IterationStats entries.
|
def _merge_nested_if_from_else(self, ifStm: "IfContainer"):
"""
Merge nested IfContarner form else branch to this IfContainer
as elif and else branches
"""
self.elIfs.append((ifStm.cond, ifStm.ifTrue))
self.elIfs.extend(ifStm.elIfs)
self.ifFalse = ifStm.ifFalse
|
Merge nested IfContarner form else branch to this IfContainer
as elif and else branches
|
def dskstl(keywrd, dpval):
"""
Set the value of a specified DSK tolerance or margin parameter.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskstl_c.html
:param keywrd: Code specifying parameter to set.
:type keywrd: int
:param dpval: Value of parameter.
:type dpval: float
:return:
"""
keywrd = ctypes.c_int(keywrd)
dpval = ctypes.c_double(dpval)
libspice.dskstl_c(keywrd, dpval)
|
Set the value of a specified DSK tolerance or margin parameter.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskstl_c.html
:param keywrd: Code specifying parameter to set.
:type keywrd: int
:param dpval: Value of parameter.
:type dpval: float
:return:
|
def remove(self, member):
"""Remove member."""
if not self.client.zrem(self.name, member):
raise KeyError(member)
|
Remove member.
|
def header_output(self):
'''只输出cookie的key-value字串.
比如: HISTORY=21341; PHPSESSION=3289012u39jsdijf28; token=233129
'''
result = []
for key in self.keys():
result.append(key + '=' + self.get(key).value)
return '; '.join(result)
|
只输出cookie的key-value字串.
比如: HISTORY=21341; PHPSESSION=3289012u39jsdijf28; token=233129
|
def as_alias_handler(alias_list):
"""Returns a list of all the names that will be called."""
list_ = list()
for alias in alias_list:
if alias.asname:
list_.append(alias.asname)
else:
list_.append(alias.name)
return list_
|
Returns a list of all the names that will be called.
|
def loads(cls, data, store_password, try_decrypt_keys=True):
"""
See :meth:`jks.jks.KeyStore.loads`.
:param bytes data: Byte string representation of the keystore to be loaded.
:param str password: Keystore password string
:param bool try_decrypt_keys: Whether to automatically try to decrypt any encountered key entries using the same password
as the keystore password.
:returns: A loaded :class:`UberKeyStore` instance, if the keystore could be successfully parsed and the supplied store password is correct.
If the ``try_decrypt_keys`` parameters was set to ``True``, any keys that could be successfully decrypted using the
store password have already been decrypted; otherwise, no atttempt to decrypt any key entries is made.
:raises BadKeystoreFormatException: If the keystore is malformed in some way
:raises UnsupportedKeystoreVersionException: If the keystore contains an unknown format version number
:raises KeystoreSignatureException: If the keystore signature could not be verified using the supplied store password
:raises DecryptionFailureException: If the keystore contents could not be decrypted using the supplied store password
:raises DuplicateAliasException: If the keystore contains duplicate aliases
"""
# Uber keystores contain the same entry data as BKS keystores, except they wrap it differently:
# BKS = BKS_store || HMAC-SHA1(BKS_store)
# UBER = PBEWithSHAAndTwofish-CBC(BKS_store || SHA1(BKS_store))
#
# where BKS_store represents the entry format shared by both keystore types.
#
# The Twofish key size is 256 bits, the PBE key derivation scheme is that as outlined by PKCS#12 (RFC 7292),
# and the padding scheme for the Twofish cipher is PKCS#7.
try:
pos = 0
version = b4.unpack_from(data, pos)[0]; pos += 4
if version != 1:
raise UnsupportedKeystoreVersionException('Unsupported UBER keystore version; only v1 supported, found v'+repr(version))
salt, pos = cls._read_data(data, pos)
iteration_count = b4.unpack_from(data, pos)[0]; pos += 4
encrypted_bks_store = data[pos:]
try:
decrypted = rfc7292.decrypt_PBEWithSHAAndTwofishCBC(encrypted_bks_store, store_password, salt, iteration_count)
except BadDataLengthException as e:
raise BadKeystoreFormatException("Bad UBER keystore format: %s" % str(e))
except BadPaddingException as e:
raise DecryptionFailureException("Failed to decrypt UBER keystore: bad password?")
# Note: we can assume that the hash must be present at the last 20 bytes of the decrypted data (i.e. without first
# parsing through to see where the entry data actually ends), because valid UBER keystores generators should not put
# any trailing bytes after the hash prior to encrypting.
hash_fn = hashlib.sha1
hash_digest_size = hash_fn().digest_size
bks_store = decrypted[:-hash_digest_size]
bks_hash = decrypted[-hash_digest_size:]
if len(bks_hash) != hash_digest_size:
raise BadKeystoreFormatException("Insufficient signature bytes; found %d bytes, expected %d bytes" % (len(bks_hash), hash_digest_size))
if hash_fn(bks_store).digest() != bks_hash:
raise KeystoreSignatureException("Hash mismatch; incorrect keystore password?")
store_type = "uber"
entries, size = cls._load_bks_entries(bks_store, store_type, store_password, try_decrypt_keys=try_decrypt_keys)
return cls(store_type, entries, version=version)
except struct.error as e:
raise BadKeystoreFormatException(e)
|
See :meth:`jks.jks.KeyStore.loads`.
:param bytes data: Byte string representation of the keystore to be loaded.
:param str password: Keystore password string
:param bool try_decrypt_keys: Whether to automatically try to decrypt any encountered key entries using the same password
as the keystore password.
:returns: A loaded :class:`UberKeyStore` instance, if the keystore could be successfully parsed and the supplied store password is correct.
If the ``try_decrypt_keys`` parameters was set to ``True``, any keys that could be successfully decrypted using the
store password have already been decrypted; otherwise, no atttempt to decrypt any key entries is made.
:raises BadKeystoreFormatException: If the keystore is malformed in some way
:raises UnsupportedKeystoreVersionException: If the keystore contains an unknown format version number
:raises KeystoreSignatureException: If the keystore signature could not be verified using the supplied store password
:raises DecryptionFailureException: If the keystore contents could not be decrypted using the supplied store password
:raises DuplicateAliasException: If the keystore contains duplicate aliases
|
def _get_disk_size(self, device):
'''
Get a size of a disk.
'''
out = __salt__['cmd.run_all']("df {0}".format(device))
if out['retcode']:
msg = "Disk size info error: {0}".format(out['stderr'])
log.error(msg)
raise SIException(msg)
devpath, blocks, used, available, used_p, mountpoint = [elm for elm in
out['stdout'].split(os.linesep)[-1].split(" ") if elm]
return {
'device': devpath, 'blocks': blocks, 'used': used,
'available': available, 'used (%)': used_p, 'mounted': mountpoint,
}
|
Get a size of a disk.
|
def folderitem(self, obj, item, index):
"""Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
"""
fullname = obj.getFullname()
if fullname:
item["Fullname"] = fullname
item["replace"]["Fullname"] = get_link(
obj.absolute_url(), value=fullname)
else:
item["Fullname"] = ""
default_department = obj.getDefaultDepartment()
if default_department:
item["replace"]["DefaultDepartment"] = get_link(
default_department.absolute_url(),
value=default_department.Title())
departments = obj.getDepartments()
if departments:
links = map(
lambda o: get_link(o.absolute_url(),
value=o.Title(),
css_class="link"),
departments)
item["replace"]["Departments"] = ", ".join(links)
email = obj.getEmailAddress()
if email:
item["EmailAddress"] = obj.getEmailAddress()
item["replace"]["EmailAddress"] = get_email_link(
email, value=email)
item["BusinessPhone"] = obj.getBusinessPhone()
item["Fax"] = obj.getBusinessFax()
item["MobilePhone"] = obj.getMobilePhone()
return item
|
Service triggered each time an item is iterated in folderitems.
The use of this service prevents the extra-loops in child objects.
:obj: the instance of the class to be foldered
:item: dict containing the properties of the object to be used by
the template
:index: current index of the item
|
def poll_parser(poll):
"""
Parses a poll object
"""
if __is_deleted(poll):
return deleted_parser(poll)
if poll['type'] not in poll_types:
raise Exception('Not a poll type')
return Poll(
poll['id'],
poll['by'],
__check_key('kids', poll), # poll and pollopt differ this property
__check_key('parts', poll), # poll and pollopt differ this property
poll['score'],
poll['text'],
poll['time'],
poll['title'],
poll['type'],
)
|
Parses a poll object
|
def get_css(self):
""" Fetches and returns stylesheet file path or contents, for both
print and screen contexts, depending if we want a standalone
presentation or not.
"""
css = {}
print_css = os.path.join(self.theme_dir, 'css', 'print.css')
if not os.path.exists(print_css):
# Fall back to default theme
print_css = os.path.join(THEMES_DIR, 'default', 'css', 'print.css')
if not os.path.exists(print_css):
raise IOError(u"Cannot find css/print.css in default theme")
with codecs.open(print_css, encoding=self.encoding) as css_file:
css['print'] = {
'path_url': utils.get_path_url(print_css, self.relative),
'contents': css_file.read(),
}
screen_css = os.path.join(self.theme_dir, 'css', 'screen.css')
if (os.path.exists(screen_css)):
with codecs.open(screen_css, encoding=self.encoding) as css_file:
css['screen'] = {
'path_url': utils.get_path_url(screen_css, self.relative),
'contents': css_file.read(),
}
else:
self.log(u"No screen stylesheet provided in current theme",
'warning')
return css
|
Fetches and returns stylesheet file path or contents, for both
print and screen contexts, depending if we want a standalone
presentation or not.
|
def load_toml_validator_config(filename):
"""Returns a ValidatorConfig created by loading a TOML file from the
filesystem.
"""
if not os.path.exists(filename):
LOGGER.info(
"Skipping validator config loading from non-existent config file:"
" %s", filename)
return ValidatorConfig()
LOGGER.info("Loading validator information from config: %s", filename)
try:
with open(filename) as fd:
raw_config = fd.read()
except IOError as e:
raise LocalConfigurationError(
"Unable to load validator configuration file: {}".format(str(e)))
toml_config = toml.loads(raw_config)
invalid_keys = set(toml_config.keys()).difference(
['bind', 'endpoint', 'peering', 'seeds', 'peers', 'network_public_key',
'network_private_key', 'scheduler', 'permissions', 'roles',
'opentsdb_url', 'opentsdb_db', 'opentsdb_username',
'opentsdb_password', 'minimum_peer_connectivity',
'maximum_peer_connectivity', 'state_pruning_block_depth',
'fork_cache_keep_time',
'component_thread_pool_workers', 'network_thread_pool_workers',
'signature_thread_pool_workers'])
if invalid_keys:
raise LocalConfigurationError(
"Invalid keys in validator config: "
"{}".format(", ".join(sorted(list(invalid_keys)))))
bind_network = None
bind_component = None
bind_consensus = None
for bind in toml_config.get("bind", []):
if "network" in bind:
bind_network = bind[bind.find(":") + 1:]
if "component" in bind:
bind_component = bind[bind.find(":") + 1:]
if "consensus" in bind:
bind_consensus = bind[bind.find(":") + 1:]
network_public_key = None
network_private_key = None
if toml_config.get("network_public_key") is not None:
network_public_key = toml_config.get("network_public_key").encode()
if toml_config.get("network_private_key") is not None:
network_private_key = toml_config.get("network_private_key").encode()
config = ValidatorConfig(
bind_network=bind_network,
bind_component=bind_component,
bind_consensus=bind_consensus,
endpoint=toml_config.get("endpoint", None),
peering=toml_config.get("peering", None),
seeds=toml_config.get("seeds", None),
peers=toml_config.get("peers", None),
network_public_key=network_public_key,
network_private_key=network_private_key,
scheduler=toml_config.get("scheduler", None),
permissions=parse_permissions(toml_config.get("permissions", None)),
roles=toml_config.get("roles", None),
opentsdb_url=toml_config.get("opentsdb_url", None),
opentsdb_db=toml_config.get("opentsdb_db", None),
opentsdb_username=toml_config.get("opentsdb_username", None),
opentsdb_password=toml_config.get("opentsdb_password", None),
minimum_peer_connectivity=toml_config.get(
"minimum_peer_connectivity", None),
maximum_peer_connectivity=toml_config.get(
"maximum_peer_connectivity", None),
state_pruning_block_depth=toml_config.get(
"state_pruning_block_depth", None),
fork_cache_keep_time=toml_config.get(
"fork_cache_keep_time", None),
component_thread_pool_workers=toml_config.get(
"component_thread_pool_workers", None),
network_thread_pool_workers=toml_config.get(
"network_thread_pool_workers", None),
signature_thread_pool_workers=toml_config.get(
"signature_thread_pool_workers", None)
)
return config
|
Returns a ValidatorConfig created by loading a TOML file from the
filesystem.
|
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
|
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
|
def round(self, value_array):
"""
If value falls within bounds, just return it
otherwise return min or max, whichever is closer to the value
Assumes an 1d array with a single element as an input.
"""
min_value = self.domain[0]
max_value = self.domain[1]
rounded_value = value_array[0]
if rounded_value < min_value:
rounded_value = min_value
elif rounded_value > max_value:
rounded_value = max_value
return [rounded_value]
|
If value falls within bounds, just return it
otherwise return min or max, whichever is closer to the value
Assumes an 1d array with a single element as an input.
|
def get_binfo(self):
"""
Fetch a node's build information.
node - the node whose sources will be collected
cache - alternate node to use for the signature cache
returns - the build signature
This no longer handles the recursive descent of the
node's children's signatures. We expect that they're
already built and updated by someone else, if that's
what's wanted.
"""
try:
return self.binfo
except AttributeError:
pass
binfo = self.new_binfo()
self.binfo = binfo
executor = self.get_executor()
ignore_set = self.ignore_set
if self.has_builder():
binfo.bact = str(executor)
binfo.bactsig = SCons.Util.MD5signature(executor.get_contents())
if self._specific_sources:
sources = [ s for s in self.sources if not s in ignore_set]
else:
sources = executor.get_unignored_sources(self, self.ignore)
seen = set()
binfo.bsources = [s for s in sources if s not in seen and not seen.add(s)]
binfo.bsourcesigs = [s.get_ninfo() for s in binfo.bsources]
binfo.bdepends = self.depends
binfo.bdependsigs = [d.get_ninfo() for d in self.depends if d not in ignore_set]
binfo.bimplicit = self.implicit or []
binfo.bimplicitsigs = [i.get_ninfo() for i in binfo.bimplicit if i not in ignore_set]
return binfo
|
Fetch a node's build information.
node - the node whose sources will be collected
cache - alternate node to use for the signature cache
returns - the build signature
This no longer handles the recursive descent of the
node's children's signatures. We expect that they're
already built and updated by someone else, if that's
what's wanted.
|
def add_equad(psr, equad, flagid=None, flags=None, seed=None):
"""Add quadrature noise of rms `equad` [s].
Optionally take a pseudorandom-number-generator seed."""
if seed is not None:
N.random.seed(seed)
# default equadvec
equadvec = N.zeros(psr.nobs)
# check that equad is scalar if flags is None
if flags is None:
if not N.isscalar(equad):
raise ValueError('ERROR: If flags is None, equad must be a scalar')
else:
equadvec = N.ones(psr.nobs) * equad
if flags is not None and flagid is not None and not N.isscalar(equad):
if len(equad) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == N.array(psr.flagvals(flagid))
equadvec[ind] = equad[ct]
psr.stoas[:] += (equadvec / day) * N.random.randn(psr.nobs)
|
Add quadrature noise of rms `equad` [s].
Optionally take a pseudorandom-number-generator seed.
|
def set_is_immediate(self, value):
"""
Setter for 'is_immediate' field.
:param value - a new value of 'is_immediate' field. Must be a boolean type.
"""
if value is None:
self.__is_immediate = value
elif not isinstance(value, bool):
raise TypeError("IsImediate must be set to a bool")
else:
self.__is_immediate = value
|
Setter for 'is_immediate' field.
:param value - a new value of 'is_immediate' field. Must be a boolean type.
|
def _from_dict(cls, _dict):
"""Initialize a QueryResult object from a json dictionary."""
args = {}
xtra = _dict.copy()
if 'id' in _dict:
args['id'] = _dict.get('id')
del xtra['id']
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
del xtra['metadata']
if 'collection_id' in _dict:
args['collection_id'] = _dict.get('collection_id')
del xtra['collection_id']
if 'result_metadata' in _dict:
args['result_metadata'] = QueryResultMetadata._from_dict(
_dict.get('result_metadata'))
del xtra['result_metadata']
if 'title' in _dict:
args['title'] = _dict.get('title')
del xtra['title']
args.update(xtra)
return cls(**args)
|
Initialize a QueryResult object from a json dictionary.
|
def postinit(self, args, body):
"""Do some setup after initialisation.
:param args: The arguments that the function takes.
:type args: Arguments
:param body: The contents of the function body.
:type body: list(NodeNG)
"""
self.args = args
self.body = body
|
Do some setup after initialisation.
:param args: The arguments that the function takes.
:type args: Arguments
:param body: The contents of the function body.
:type body: list(NodeNG)
|
def widgets(self):
"""Get the items."""
widgets = []
for i, chart in enumerate(most_visited_pages_charts()):
widgets.append(Widget(html_id='most_visited_chart_%d' % i,
content=json.dumps(chart),
template='meerkat/widgets/highcharts.html',
js_code=['plotOptions.tooltip.pointFormatter']))
return widgets
|
Get the items.
|
def _maketicks_selected(self, plt, branches):
"""
utility private method to add ticks to a band structure with selected branches
"""
ticks = self.get_ticks()
distance = []
label = []
rm_elems = []
for i in range(1, len(ticks['distance'])):
if ticks['label'][i] == ticks['label'][i - 1]:
rm_elems.append(i)
for i in range(len(ticks['distance'])):
if i not in rm_elems:
distance.append(ticks['distance'][i])
label.append(ticks['label'][i])
l_branches = [distance[i] - distance[i - 1] for i in
range(1, len(distance))]
n_distance = []
n_label = []
for branch in branches:
n_distance.append(l_branches[branch])
if ("$\\mid$" not in label[branch]) and (
"$\\mid$" not in label[branch + 1]):
n_label.append([label[branch], label[branch + 1]])
elif ("$\\mid$" in label[branch]) and (
"$\\mid$" not in label[branch + 1]):
n_label.append(
[label[branch].split("$")[-1], label[branch + 1]])
elif ("$\\mid$" not in label[branch]) and (
"$\\mid$" in label[branch + 1]):
n_label.append([label[branch], label[branch + 1].split("$")[0]])
else:
n_label.append([label[branch].split("$")[-1],
label[branch + 1].split("$")[0]])
f_distance = []
rf_distance = []
f_label = []
f_label.append(n_label[0][0])
f_label.append(n_label[0][1])
f_distance.append(0.0)
f_distance.append(n_distance[0])
rf_distance.append(0.0)
rf_distance.append(n_distance[0])
length = n_distance[0]
for i in range(1, len(n_distance)):
if n_label[i][0] == n_label[i - 1][1]:
f_distance.append(length)
f_distance.append(length + n_distance[i])
f_label.append(n_label[i][0])
f_label.append(n_label[i][1])
else:
f_distance.append(length + n_distance[i])
f_label[-1] = n_label[i - 1][1] + "$\\mid$" + n_label[i][0]
f_label.append(n_label[i][1])
rf_distance.append(length + n_distance[i])
length += n_distance[i]
n_ticks = {'distance': f_distance, 'label': f_label}
uniq_d = []
uniq_l = []
temp_ticks = list(zip(n_ticks['distance'], n_ticks['label']))
for i in range(len(temp_ticks)):
if i == 0:
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
else:
if temp_ticks[i][1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(
i=temp_ticks[i][1]))
else:
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(n_ticks['label'])):
if n_ticks['label'][i] is not None:
# don't print the same label twice
if i != 0:
if n_ticks['label'][i] == n_ticks['label'][i - 1]:
logger.debug("already print label... "
"skipping label {i}".format(
i=n_ticks['label'][i]))
else:
logger.debug("Adding a line at {d}"
" for label {l}".format(
d=n_ticks['distance'][i], l=n_ticks['label'][i]))
plt.axvline(n_ticks['distance'][i], color='k')
else:
logger.debug("Adding a line at {d} for label {l}".format(
d=n_ticks['distance'][i], l=n_ticks['label'][i]))
plt.axvline(n_ticks['distance'][i], color='k')
shift = []
br = -1
for branch in branches:
br += 1
shift.append(distance[branch] - rf_distance[br])
return plt, shift
|
utility private method to add ticks to a band structure with selected branches
|
def find_method_params(self):
"""Return the method params
:returns: tuple (args, kwargs) that will be passed as *args, **kwargs
"""
req = self.request
args = req.controller_info["method_args"]
kwargs = req.controller_info["method_kwargs"]
return args, kwargs
|
Return the method params
:returns: tuple (args, kwargs) that will be passed as *args, **kwargs
|
def get_active_trips_df(trip_times: DataFrame) -> DataFrame:
"""
Count the number of trips in ``trip_times`` that are active
at any given time.
Parameters
----------
trip_times : DataFrame
Contains columns
- start_time: start time of the trip in seconds past midnight
- end_time: end time of the trip in seconds past midnight
Returns
-------
Series
index is times from midnight when trips start and end,
values are number of active trips for that time
"""
active_trips = (
pd.concat(
[
pd.Series(1, trip_times.start_time), # departed add 1
pd.Series(-1, trip_times.end_time), # arrived subtract 1
]
)
.groupby(level=0, sort=True)
.sum()
.cumsum()
.ffill()
)
return active_trips
|
Count the number of trips in ``trip_times`` that are active
at any given time.
Parameters
----------
trip_times : DataFrame
Contains columns
- start_time: start time of the trip in seconds past midnight
- end_time: end time of the trip in seconds past midnight
Returns
-------
Series
index is times from midnight when trips start and end,
values are number of active trips for that time
|
def connect_job(job_id,
deployment_name,
token_manager=None,
app_url=defaults.APP_URL,
persist=False,
websocket=None,
data_url=None):
"""
connect to a running Juttle program by job_id
"""
if data_url == None:
data_url = get_data_url_for_job(job_id,
deployment_name,
token_manager=token_manager,
app_url=app_url)
if websocket == None:
websocket = __wss_connect(data_url,
token_manager,
job_id=job_id)
pong = json.dumps({
'pong': True
})
if not persist:
job_finished = False
while not job_finished:
try:
data = websocket.recv()
if data:
payload = json.loads(data)
if is_debug_enabled():
printable_payload = dict(payload)
if 'points' in payload:
# don't want to print out all the outputs when in
# debug mode
del printable_payload['points']
printable_payload['points'] = 'NOT SHOWN'
debug('received %s' % json.dumps(printable_payload))
if 'ping' in payload.keys():
# ping/pong (ie heartbeat) mechanism
websocket.send(pong)
if is_debug_enabled():
debug('sent %s' % json.dumps(pong))
if 'job_end' in payload.keys() and payload['job_end'] == True:
job_finished = True
if token_manager.is_access_token_expired():
debug('refreshing access token')
token_obj = {
"accessToken": token_manager.get_access_token()
}
# refresh authentication token
websocket.send(json.dumps(token_obj))
if 'error' in payload:
if payload['error'] == 'NONEXISTENT-JOB':
raise JutException('Job "%s" no longer running' % job_id)
# return all channel messages
yield payload
else:
debug('payload was "%s", forcing websocket reconnect' % data)
raise IOError()
except IOError:
if is_debug_enabled():
traceback.print_exc()
#
# We'll retry for just under 30s since internally we stop
# running non persistent programs after 30s of not heartbeating
# with the client
#
retry = 1
while retry <= 5:
try:
debug('network error reconnecting to job %s, '
'try %s of 5' % (job_id, retry))
websocket = __wss_connect(data_url, token_manager, job_id=job_id)
break
except socket.error:
if is_debug_enabled():
traceback.print_exc()
retry += 1
time.sleep(5)
debug('network error reconnecting to job %s, '
'try %s of 5' % (job_id, retry))
websocket = __wss_connect(data_url, token_manager, job_id=job_id)
websocket.close()
|
connect to a running Juttle program by job_id
|
def get(self, task_id):
'''taobao.topats.result.get 获取异步任务结果
使用指南:http://open.taobao.com/doc/detail.htm?id=30
- 1.此接口用于获取异步任务处理的结果,传入的task_id必需属于当前的appKey才可以
- 2.此接口只返回执行完成的任务结果,未执行完的返回结果里面不包含任务结果,只有任务id,执行状态
- 3.执行完成的每个task的子任务结果内容与单个任务的结果结构一致。如:taobao.topats.trades.fullinfo.get返回的子任务结果就会是Trade的结构体。'''
request = TOPRequest('taobao.topats.result.get')
request['task_id'] = task_id
self.create(self.execute(request)['task'])
return self
|
taobao.topats.result.get 获取异步任务结果
使用指南:http://open.taobao.com/doc/detail.htm?id=30
- 1.此接口用于获取异步任务处理的结果,传入的task_id必需属于当前的appKey才可以
- 2.此接口只返回执行完成的任务结果,未执行完的返回结果里面不包含任务结果,只有任务id,执行状态
- 3.执行完成的每个task的子任务结果内容与单个任务的结果结构一致。如:taobao.topats.trades.fullinfo.get返回的子任务结果就会是Trade的结构体。
|
def get(self):
"""
*download the image*
"""
self.log.info('starting the ``get`` method')
ra = self.ra
dec = self.dec
if self.covered == False or self.covered == 999 or self.covered == "999":
return self.covered
self._download_sdss_image()
self.log.info('completed the ``get`` method')
return self.covered
|
*download the image*
|
def _two_to_one(datadir):
"""After this command, your environment will be converted to format version {}
and will not work with Datacats versions beyond and including 1.0.0.
This format version doesn't support multiple sites, and after this only your
"primary" site will be usable, though other sites will be maintained if you
wish to do a migration back to a version which supports multisite.
Would you like to continue the migration? (y/n) [n]:"""
_, env_name = _split_path(datadir)
print 'Making sure that containers are stopped...'
# New-style names
remove_container('datacats_web_{}_primary'.format(env_name))
remove_container('datacats_postgres_{}_primary'.format(env_name))
remove_container('datacats_solr_{}_primary'.format(env_name))
print 'Doing conversion...'
if exists(path_join(datadir, '.version')):
os.remove(path_join(datadir, '.version'))
to_move = (['files', 'passwords.ini', 'run', 'solr'] +
(['postgres'] if not is_boot2docker() else []))
web_command(
command=['/scripts/migrate.sh',
'/project/data/sites/primary',
'/project/data'] + to_move,
ro={scripts.get_script_path('migrate.sh'): '/scripts/migrate.sh'},
rw={datadir: '/project/data'}
)
pgdata_name = 'datacats_pgdata_{}_primary'.format(env_name)
if is_boot2docker() and inspect_container(pgdata_name):
rename_container(pgdata_name, 'datacats_pgdata_{}'.format(env_name))
print 'Doing cleanup...'
with open(path_join(datadir, 'project-dir')) as pd:
datacats_env_location = path_join(pd.read(), '.datacats-environment')
cp = SafeConfigParser()
cp.read(datacats_env_location)
# We need to move the port OUT of site_primary section and INTO datacats
cp.set('datacats', 'port', cp.get('site_primary', 'port'))
cp.remove_section('site_primary')
with open(datacats_env_location, 'w') as config:
cp.write(config)
cp = SafeConfigParser()
cp.read(path_join(datadir, 'passwords.ini'))
# This isn't needed in this version
cp.remove_option('passwords', 'beaker_session_secret')
with open(path_join(datadir, 'passwords.ini'), 'w') as config:
cp.write(config)
|
After this command, your environment will be converted to format version {}
and will not work with Datacats versions beyond and including 1.0.0.
This format version doesn't support multiple sites, and after this only your
"primary" site will be usable, though other sites will be maintained if you
wish to do a migration back to a version which supports multisite.
Would you like to continue the migration? (y/n) [n]:
|
def run(self):
"""
Run the interactive window until the user quits
"""
# pyglet.app.run() has issues like https://bitbucket.org/pyglet/pyglet/issues/199/attempting-to-resize-or-close-pyglet
# and also involves inverting your code to run inside the pyglet framework
# avoid both by using a while loop
prev_frame_time = time.time()
while True:
self._win.switch_to()
self._win.dispatch_events()
now = time.time()
self._update(now - prev_frame_time)
prev_frame_time = now
self._draw()
self._win.flip()
|
Run the interactive window until the user quits
|
def parse(cls, line, ns={}):
"""
Parse an options specification, returning a dictionary with
path keys and {'plot':<options>, 'style':<options>} values.
"""
parses = [p for p in cls.opts_spec.scanString(line)]
if len(parses) != 1:
raise SyntaxError("Invalid specification syntax.")
else:
e = parses[0][2]
processed = line[:e]
if (processed.strip() != line.strip()):
raise SyntaxError("Failed to parse remainder of string: %r" % line[e:])
grouped_paths = cls._group_paths_without_options(cls.opts_spec.parseString(line))
parse = {}
for pathspecs, group in grouped_paths:
options = {}
normalization = cls.process_normalization(group)
if normalization is not None:
options['norm'] = normalization
if 'plot_options' in group:
plotopts = group['plot_options'][0]
opts = cls.todict(plotopts, 'brackets', ns=ns)
options['plot'] = {cls.aliases.get(k,k):v for k,v in opts.items()}
if 'style_options' in group:
styleopts = group['style_options'][0]
opts = cls.todict(styleopts, 'parens', ns=ns)
options['style'] = {cls.aliases.get(k,k):v for k,v in opts.items()}
for pathspec in pathspecs:
parse[pathspec] = merge_option_dicts(parse.get(pathspec, {}), options)
return {
cls.apply_deprecations(path): {
option_type: Options(**option_pairs)
for option_type, option_pairs in options.items()
}
for path, options in parse.items()
}
|
Parse an options specification, returning a dictionary with
path keys and {'plot':<options>, 'style':<options>} values.
|
def unschedule(self, campaign_id):
"""
Unschedule a scheduled campaign that hasn’t started sending.
:param campaign_id: The unique id for the campaign.
:type campaign_id: :py:class:`str`
"""
self.campaign_id = campaign_id
return self._mc_client._post(url=self._build_path(campaign_id, 'actions/unschedule'))
|
Unschedule a scheduled campaign that hasn’t started sending.
:param campaign_id: The unique id for the campaign.
:type campaign_id: :py:class:`str`
|
def has_storage(func):
""" Ensure that self/cls contains a Storage backend. """
@wraps(func)
def wrapped(*args, **kwargs):
me = args[0]
if not hasattr(me, '_storage') or \
not me._storage:
raise exceptions.ImproperConfigurationError(
'No storage backend attached to schema <{0}>.'
.format(me._name.upper())
)
return func(*args, **kwargs)
return wrapped
|
Ensure that self/cls contains a Storage backend.
|
def pixels_connectivity_compute(raster, i, j, idx):
"""Compute if the two given value's pixels have connectivity
Compute if the two given value's pixels of raster have connectivity between
the [i.j]pixel and its 8-neighborhood. If they have connectivity,
then put its neighborhood to List idx and go in a recursion. If the [i,
j]pixel and its neighborhood don't have connectivity, do nothing.
Args:
raster: A rasterfile stored pixels initial values.
i: The pixel's x coord.
j: The pixel's y coord.
idx: A List stored pixels which have the same ID(means same that
these pixels have connectivity)
"""
nrows, ncols = raster.shape
value = raster[i][j]
for di in [-1, 0, 1]:
for dj in [-1, 0, 1]:
if 0 <= i + di < nrows and 0 <= j + dj < ncols:
if raster[i + di][j + dj] == value and not (di == dj and di == 0):
if [i + di, j + dj] not in idx:
idx.append([i + di, j + dj])
pixels_connectivity_compute(raster, i + di, j + dj, idx)
|
Compute if the two given value's pixels have connectivity
Compute if the two given value's pixels of raster have connectivity between
the [i.j]pixel and its 8-neighborhood. If they have connectivity,
then put its neighborhood to List idx and go in a recursion. If the [i,
j]pixel and its neighborhood don't have connectivity, do nothing.
Args:
raster: A rasterfile stored pixels initial values.
i: The pixel's x coord.
j: The pixel's y coord.
idx: A List stored pixels which have the same ID(means same that
these pixels have connectivity)
|
def _update_record_with_name(self, old_record, rtype, new_name, content):
"""Updates existing record and changes it's sub-domain name"""
new_type = rtype if rtype else old_record['type']
new_ttl = self._get_lexicon_option('ttl')
if new_ttl is None and 'ttl' in old_record:
new_ttl = old_record['ttl']
new_priority = self._get_lexicon_option('priority')
if new_priority is None and 'priority' in old_record:
new_priority = old_record['priority']
new_content = content
if new_content is None and 'content' in old_record:
new_content = old_record['content']
record = self._create_request_record(None,
new_type,
new_name,
new_content,
new_ttl,
new_priority)
# This will be a different domain name, so no name collision should
# happen. First create a new entry and when it succeeds, delete the old
# one.
self._request_add_dns_record(record)
self._request_delete_dns_record_by_id(old_record['id'])
|
Updates existing record and changes it's sub-domain name
|
def _determine_types(start_node, first_name, add_leaf, add_link):
"""Determines types for generic additions"""
if start_node.v_is_root:
where = first_name
else:
where = start_node._branch
if where in SUBTREE_MAPPING:
type_tuple = SUBTREE_MAPPING[where]
else:
type_tuple = (GROUP, LEAF)
if add_link:
return type_tuple[0], LINK
if add_leaf:
return type_tuple
else:
return type_tuple[0], type_tuple[0]
|
Determines types for generic additions
|
def p_qualifierType_1(p):
"""qualifierType_1 : ':' dataType array
| ':' dataType array defaultValue
"""
dv = None
if len(p) == 5:
dv = p[4]
p[0] = (p[2], True, p[3], dv)
|
qualifierType_1 : ':' dataType array
| ':' dataType array defaultValue
|
def result_key_for(self, op_name):
"""
Checks for the presence of a ``result_key``, which defines what data
should make up an instance.
Returns ``None`` if there is no ``result_key``.
:param op_name: The operation name to look for the ``result_key`` in.
:type op_name: string
:returns: The expected key to look for data within
:rtype: string or None
"""
ops = self.resource_data.get('operations', {})
op = ops.get(op_name, {})
key = op.get('result_key', None)
return key
|
Checks for the presence of a ``result_key``, which defines what data
should make up an instance.
Returns ``None`` if there is no ``result_key``.
:param op_name: The operation name to look for the ``result_key`` in.
:type op_name: string
:returns: The expected key to look for data within
:rtype: string or None
|
def extra_prepare(self, configuration, args_dict):
"""
Called before the configuration.converters are activated
Here we make sure that we have harpoon options from ``args_dict`` in
the configuration.
We then load all the harpoon modules as specified by the
``harpoon.addons`` setting.
Finally we inject into the configuration:
$@
The ``harpoon.extra`` setting
bash
The ``bash`` setting
command
The ``command`` setting
harpoon
The harpoon settings
collector
This instance
"""
harpoon = self.find_harpoon_options(configuration, args_dict)
self.register = self.setup_addon_register(harpoon)
# Make sure images is started
if "images" not in self.configuration:
self.configuration["images"] = {}
# Add our special stuff to the configuration
self.configuration.update(
{ "$@": harpoon.get("extra", "")
, "bash": args_dict["bash"] or sb.NotSpecified
, "harpoon": harpoon
, "assume_role": args_dict["assume_role"] or NotSpecified
, "command": args_dict['command'] or sb.NotSpecified
, "collector": self
}
, source = "<args_dict>"
)
|
Called before the configuration.converters are activated
Here we make sure that we have harpoon options from ``args_dict`` in
the configuration.
We then load all the harpoon modules as specified by the
``harpoon.addons`` setting.
Finally we inject into the configuration:
$@
The ``harpoon.extra`` setting
bash
The ``bash`` setting
command
The ``command`` setting
harpoon
The harpoon settings
collector
This instance
|
def connectExec(connection, protocol, commandLine):
"""Connect a Protocol to a ssh exec session
"""
deferred = connectSession(connection, protocol)
@deferred.addCallback
def requestSubsystem(session):
return session.requestExec(commandLine)
return deferred
|
Connect a Protocol to a ssh exec session
|
def dcc(self):
"""return the :class:`~irc3.dcc.DCCManager`"""
if self._dcc is None:
self._dcc = DCCManager(self)
return self._dcc
|
return the :class:`~irc3.dcc.DCCManager`
|
def corners(self):
"""
Iterate the vector of all corners of the hyperrectangles
>>> Tile(3, dim=2).corners
array([[0, 0],
[0, 3],
[3, 0],
[3, 3]])
"""
corners = []
for ind in itertools.product(*((0,1),)*self.dim):
ind = np.array(ind)
corners.append(self.l + ind*self.r)
return np.array(corners)
|
Iterate the vector of all corners of the hyperrectangles
>>> Tile(3, dim=2).corners
array([[0, 0],
[0, 3],
[3, 0],
[3, 3]])
|
def sargasso_chart (self):
""" Make the sargasso plot """
# Config for the plot
config = {
'id': 'sargasso_assignment_plot',
'title': 'Sargasso: Assigned Reads',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
#We only want to plot the READs at the moment
return bargraph.plot(self.sargasso_data, [name for name in self.sargasso_keys if 'Reads' in name], config)
|
Make the sargasso plot
|
def airplane(self, model_mask: str = '###') -> str:
"""Generate a dummy airplane model.
:param model_mask: Mask of truck model. Here '@' is a
placeholder of characters and '#' is a placeholder of digits.
:return: Airplane model.
:Example:
Boeing 727.
"""
model = self.random.custom_code(mask=model_mask)
plane = self.random.choice(AIRPLANES)
return '{} {}'.format(plane, model)
|
Generate a dummy airplane model.
:param model_mask: Mask of truck model. Here '@' is a
placeholder of characters and '#' is a placeholder of digits.
:return: Airplane model.
:Example:
Boeing 727.
|
def save_beat(
self,
output_file_name,
frequencys,
play_time,
sample_rate=44100,
volume=0.01
):
'''
引数で指定した条件でビートを鳴らす
Args:
frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple
play_time: 再生時間(秒)
sample_rate: サンプルレート
volume: 音量
Returns:
void
'''
left_frequency, right_frequency = frequencys
left_chunk = self.__create_chunk(left_frequency, play_time, sample_rate)
right_chunk = self.__create_chunk(right_frequency, play_time, sample_rate)
frame_list = self.read_stream(left_chunk, right_chunk, volume)
wf = wave.open(output_file_name, 'wb')
wf.setparams((2, 2, sample_rate, 0, 'NONE', 'not compressed'))
wf.writeframes(b''.join(frame_list))
wf.close()
|
引数で指定した条件でビートを鳴らす
Args:
frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple
play_time: 再生時間(秒)
sample_rate: サンプルレート
volume: 音量
Returns:
void
|
def register_postloop_hook(self, func: Callable[[None], None]) -> None:
"""Register a function to be called at the end of the command loop."""
self._validate_prepostloop_callable(func)
self._postloop_hooks.append(func)
|
Register a function to be called at the end of the command loop.
|
def build_msg_fmtstr2(lbl, length, invert_rate, backspace):
r"""
Args:
lbl (str):
invert_rate (bool):
backspace (bool):
Returns:
str: msg_fmtstr_time
CommandLine:
python -m utool.util_progress --exec-ProgressIter.build_msg_fmtstr2
Setup:
>>> from utool.util_progress import * # NOQA
>>> lbl = 'foo'
>>> invert_rate = True
>>> backspace = False
>>> length = None
Example:
>>> # DISABLE_DOCTEST
>>> msg_fmtstr_time = ProgressIter.build_msg_fmtstr2(lbl, length, invert_rate, backspace)
>>> result = ('%s' % (ut.repr2(msg_fmtstr_time),))
>>> print(result)
"""
with_wall = True
tzname = time.tzname[0]
if util_cplat.WIN32:
tzname = tzname.replace('Eastern Standard Time', 'EST')
# ansii/vt100 code for clearline
# CLEARLINE_L2 = '\33[2K'
# BEFORE_PROG = '\r\033[?25l'
CLEARLINE_EL0 = '\33[0K' # clear line to right
# CLEARLINE_EL1 = '\33[1K' # clear line to left
CLEARLINE_EL2 = '\33[2K' # clear line
# DECTCEM_HIDE = '\033[?25l' # hide cursor
CLEAR_BEFORE = '\r' + CLEARLINE_EL2 # + DECTCEM_HIDE
# FIXME: hideing cursor persists if the program crashes
CLEAR_AFTER = CLEARLINE_EL0
msg_head = ProgressIter.build_msg_fmtstr_head_cols(length, lbl)
if backspace:
msg_head = [CLEAR_BEFORE] + msg_head
msg_tail = [
(
'rate={rate:4.2f} sec/iter, '
if invert_rate else
'rate={rate:4.2f} Hz,'
),
(
''
if length == 0 else
' etr={etr},'
),
' ellapsed={ellapsed},',
(
' wall={wall} ' + tzname
if with_wall
else ''
),
# backslash-r is a carrage return and undoes all previous output on
# a written line
(' {extra}'),
CLEAR_AFTER if backspace else '\n',
]
msg_fmtstr_time = ''.join((msg_head + msg_tail))
return msg_fmtstr_time
|
r"""
Args:
lbl (str):
invert_rate (bool):
backspace (bool):
Returns:
str: msg_fmtstr_time
CommandLine:
python -m utool.util_progress --exec-ProgressIter.build_msg_fmtstr2
Setup:
>>> from utool.util_progress import * # NOQA
>>> lbl = 'foo'
>>> invert_rate = True
>>> backspace = False
>>> length = None
Example:
>>> # DISABLE_DOCTEST
>>> msg_fmtstr_time = ProgressIter.build_msg_fmtstr2(lbl, length, invert_rate, backspace)
>>> result = ('%s' % (ut.repr2(msg_fmtstr_time),))
>>> print(result)
|
def convert_attribute_name_to_tag(value):
"""
A utility function that converts an attribute name string into the
corresponding attribute tag.
For example: 'State' -> enums.Tags.STATE
Args:
value (string): The string name of the attribute.
Returns:
enum: The Tags enumeration value that corresponds to the attribute
name string.
Raises:
ValueError: if the attribute name string is not a string or if it is
an unrecognized attribute name
"""
if not isinstance(value, six.string_types):
raise ValueError("The attribute name must be a string.")
for entry in attribute_name_tag_table:
if value == entry[0]:
return entry[1]
raise ValueError("Unrecognized attribute name: '{}'".format(value))
|
A utility function that converts an attribute name string into the
corresponding attribute tag.
For example: 'State' -> enums.Tags.STATE
Args:
value (string): The string name of the attribute.
Returns:
enum: The Tags enumeration value that corresponds to the attribute
name string.
Raises:
ValueError: if the attribute name string is not a string or if it is
an unrecognized attribute name
|
def _config_convert_to_address_helper(self) -> None:
"""
converts the config from ports to zmq ip addresses
Operates on `self.config` using `self._socket_factory.to_address`
"""
to_address = self._socket_factory.to_address
for k, v in self.config.items():
if k == 'chatter_subscription_port':
continue
if k.endswith('port'):
self.config[k] = to_address(v)
|
converts the config from ports to zmq ip addresses
Operates on `self.config` using `self._socket_factory.to_address`
|
def _WebSafeComponent(c, alt=False):
'''Convert a color component to its web safe equivalent.
Parameters:
:c:
The component value [0...1]
:alt:
If True, return the alternative value instead of the nearest one.
Returns:
The web safe equivalent of the component value.
'''
# This sucks, but floating point between 0 and 1 is quite fuzzy...
# So we just change the scale a while to make the equality tests
# work, otherwise it gets wrong at some decimal far to the right.
sc = c * 100.0
# If the color is already safe, return it straight away
d = sc % 20
if d==0: return c
# Get the lower and upper safe values
l = sc - d
u = l + 20
# Return the 'closest' value according to the alt flag
if alt:
if (sc-l) >= (u-sc): return l/100.0
else: return u/100.0
else:
if (sc-l) >= (u-sc): return u/100.0
else: return l/100.0
|
Convert a color component to its web safe equivalent.
Parameters:
:c:
The component value [0...1]
:alt:
If True, return the alternative value instead of the nearest one.
Returns:
The web safe equivalent of the component value.
|
def iter_follower_file(fname):
""" Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
"""
with open(fname, 'rt') as f:
for line in f:
parts = line.split()
if len(parts) > 3:
yield parts[1].lower(), set(int(x) for x in parts[2:])
|
Iterator from a file of follower information and return a tuple of screen_name, follower ids.
File format is:
<iso timestamp> <screen_name> <follower_id1> <follower_ids2> ...
|
def generate_additional_properties(self):
"""
Means object with keys with values defined by definition.
.. code-block:: python
{
'properties': {
'key': {'type': 'number'},
}
'additionalProperties': {'type': 'string'},
}
Valid object is containing key called 'key' and it's value any number and
any other key with any string.
"""
self.create_variable_is_dict()
with self.l('if {variable}_is_dict:'):
self.create_variable_keys()
add_prop_definition = self._definition["additionalProperties"]
if add_prop_definition:
properties_keys = list(self._definition.get("properties", {}).keys())
with self.l('for {variable}_key in {variable}_keys:'):
with self.l('if {variable}_key not in {}:', properties_keys):
self.l('{variable}_value = {variable}.get({variable}_key)')
self.generate_func_code_block(
add_prop_definition,
'{}_value'.format(self._variable),
'{}.{{{}_key}}'.format(self._variable_name, self._variable),
)
else:
with self.l('if {variable}_keys:'):
self.l('raise JsonSchemaException("{name} must contain only specified properties")')
|
Means object with keys with values defined by definition.
.. code-block:: python
{
'properties': {
'key': {'type': 'number'},
}
'additionalProperties': {'type': 'string'},
}
Valid object is containing key called 'key' and it's value any number and
any other key with any string.
|
def retarget_with_change_points(song, cp_times, duration):
"""Create a composition of a song of a given duration that reaches
music change points at specified times. This is still under
construction. It might not work as well with more than
2 ``cp_times`` at the moment.
Here's an example of retargeting music to be 40 seconds long and
hit a change point at the 10 and 30 second marks::
song = Song("instrumental_music.wav")
composition, change_points =\
retarget.retarget_with_change_points(song, [10, 30], 40)
composition.export(filename="retargeted_instrumental_music.")
:param song: Song to retarget
:type song: :py:class:`radiotool.composer.Song`
:param cp_times: Times to reach change points (in seconds)
:type cp_times: list of floats
:param duration: Target length of retargeted music (in seconds)
:type duration: float
:returns: Composition of retargeted song and list of locations of
change points in the retargeted composition
:rtype: (:py:class:`radiotool.composer.Composition`, list)
"""
analysis = song.analysis
beat_length = analysis[BEAT_DUR_KEY]
beats = np.array(analysis["beats"])
# find change points
cps = np.array(novelty(song, nchangepoints=4))
cp_times = np.array(cp_times)
# mark change points in original music
def music_labels(t):
# find beat closest to t
closest_beat_idx = np.argmin(np.abs(beats - t))
closest_beat = beats[closest_beat_idx]
closest_cp = cps[np.argmin(np.abs(cps - closest_beat))]
if np.argmin(np.abs(beats - closest_cp)) == closest_beat_idx:
return "cp"
else:
return "noncp"
# mark where we want change points in the output music
# (a few beats of slack to improve the quality of the end result)
def out_labels(t):
if np.min(np.abs(cp_times - t)) < 1.5 * beat_length:
return "cp"
return "noncp"
m_labels = [music_labels(i) for i in
np.arange(0, song.duration_in_seconds, beat_length)]
o_labels = [out_labels(i) for i in np.arange(0, duration, beat_length)]
constraints = [
rt_constraints.TimbrePitchConstraint(
context=0, timbre_weight=1.0, chroma_weight=1.0),
rt_constraints.EnergyConstraint(penalty=.5),
rt_constraints.MinimumLoopConstraint(8),
rt_constraints.NoveltyConstraint(m_labels, o_labels, 1.0)
]
comp, info = retarget(
[song], duration, constraints=[constraints],
fade_in_len=None, fade_out_len=None)
final_cp_locations = [beat_length * i
for i, label in enumerate(info['result_labels'])
if label == 'cp']
return comp, final_cp_locations
|
Create a composition of a song of a given duration that reaches
music change points at specified times. This is still under
construction. It might not work as well with more than
2 ``cp_times`` at the moment.
Here's an example of retargeting music to be 40 seconds long and
hit a change point at the 10 and 30 second marks::
song = Song("instrumental_music.wav")
composition, change_points =\
retarget.retarget_with_change_points(song, [10, 30], 40)
composition.export(filename="retargeted_instrumental_music.")
:param song: Song to retarget
:type song: :py:class:`radiotool.composer.Song`
:param cp_times: Times to reach change points (in seconds)
:type cp_times: list of floats
:param duration: Target length of retargeted music (in seconds)
:type duration: float
:returns: Composition of retargeted song and list of locations of
change points in the retargeted composition
:rtype: (:py:class:`radiotool.composer.Composition`, list)
|
def _mk_range_bucket(name, n1, n2, r1, r2):
"""
Create a named range specification for encoding.
:param name: The name of the range as it should appear in the result
:param n1: The name of the lower bound of the range specifier
:param n2: The name of the upper bound of the range specified
:param r1: The value of the lower bound (user value)
:param r2: The value of the upper bound (user value)
:return: A dictionary containing the range bounds. The upper and lower
bounds are keyed under ``n1`` and ``n2``.
More than just a simple wrapper, this will not include any range bound
which has a user value of `None`. Likewise it will raise an exception if
both range values are ``None``.
"""
d = {}
if r1 is not None:
d[n1] = r1
if r2 is not None:
d[n2] = r2
if not d:
raise TypeError('Must specify at least one range boundary!')
d['name'] = name
return d
|
Create a named range specification for encoding.
:param name: The name of the range as it should appear in the result
:param n1: The name of the lower bound of the range specifier
:param n2: The name of the upper bound of the range specified
:param r1: The value of the lower bound (user value)
:param r2: The value of the upper bound (user value)
:return: A dictionary containing the range bounds. The upper and lower
bounds are keyed under ``n1`` and ``n2``.
More than just a simple wrapper, this will not include any range bound
which has a user value of `None`. Likewise it will raise an exception if
both range values are ``None``.
|
def get_enclosed_object(self):
"""Return the enclosed object"""
if self._enclosed_object is None:
enclosed_object_id = self.get_enclosed_object_id()
package_name = enclosed_object_id.get_identifier_namespace().split('.')[0]
obj_name = enclosed_object_id.get_identifier_namespace().split('.')[1]
mgr = self.my_osid_object._get_provider_manager(package_name.upper())
try:
lookup_session = getattr(mgr, 'get_' + obj_name.lower() + '_lookup_session')(self.my_osid_object._proxy)
except TypeError:
lookup_session = getattr(mgr, 'get_' + obj_name.lower() + '_lookup_session')()
getattr(lookup_session, 'use_federated_' + CATALOG_LOOKUP[package_name] + '_view')()
self._enclosed_object = getattr(
lookup_session, 'get_' + obj_name.lower())(enclosed_object_id)
return self._enclosed_object
|
Return the enclosed object
|
def get_cp2k_structure(atoms):
"""Convert the atoms structure to a CP2K input file skeleton string"""
from cp2k_tools.generator import dict2cp2k
# CP2K's default unit is angstrom, convert it, but still declare it explictly:
cp2k_cell = {sym: ('[angstrom]',) + tuple(coords) for sym, coords in zip(('a', 'b', 'c'), atoms.get_cell()*Bohr)}
cp2k_cell['periodic'] = 'XYZ' # anything else does not make much sense
cp2k_coord = {
'scaled': True,
'*': [[sym] + list(coord) for sym, coord in zip(atoms.get_chemical_symbols(), atoms.get_scaled_positions())],
}
return dict2cp2k(
{
'global': {
'run_type': 'ENERGY_FORCE',
},
'force_eval': {
'subsys': {
'cell': cp2k_cell,
'coord': cp2k_coord,
},
'print': {
'forces': {
'filename': 'forces',
},
},
},
}
)
|
Convert the atoms structure to a CP2K input file skeleton string
|
def create_order(self, oid, price, context=None, expires=None):
"""
CREATES a single order for object ``oid``, with price set to ``price``
and validity until ``expires``.
:type oid: ``bigint``
:param oid:
Object ID.
:type price: ``bigint``
:param price:
Vingd amount (in cents) the user/buyer shall be charged upon
successful purchase.
:type context: ``string``
:param context:
Purchase (order-related) context. Retrieved upon purchase
verification.
:type expires: ``datetime``/``dict``
:param expires:
Order expiry timestamp, absolute (``datetime``) or relative
(``dict``). Valid keys for relative expiry timestamp dictionary are
same as keyword arguments for `datetime.timedelta` (``days``,
``seconds``, ``minutes``, ``hours``, ``weeks``). Default:
`Vingd.EXP_ORDER`.
:rtype: ``dict``
:returns:
Order dictionary::
order = {
'id': <order_id>,
'expires': <order_expiry>,
'context': <purchase_context>,
'object': {
'id': <oid>,
'price': <amount_in_cents>
},
'urls': {
'redirect': <url_for_failsafe_redirect_purchase_mode>,
'popup': <url_for_popup_purchase_mode>
}
}
:raises GeneralException:
:resource: ``objects/<oid>/orders/``
:access: authorized users
"""
expires = absdatetime(expires, default=self.EXP_ORDER)
orders = self.request(
'post',
safeformat('objects/{:int}/orders/', oid),
json.dumps({
'price': price,
'order_expires': expires.isoformat(),
'context': context
}))
orderid = self._extract_id_from_batch_response(orders)
return {
'id': orderid,
'expires': expires,
'context': context,
'object': {
'id': oid,
'price': price
},
'urls': {
'redirect': urljoin(self.usr_frontend, '/orders/%d/add/' % orderid),
'popup': urljoin(self.usr_frontend, '/popup/orders/%d/add/' % orderid)
}
}
|
CREATES a single order for object ``oid``, with price set to ``price``
and validity until ``expires``.
:type oid: ``bigint``
:param oid:
Object ID.
:type price: ``bigint``
:param price:
Vingd amount (in cents) the user/buyer shall be charged upon
successful purchase.
:type context: ``string``
:param context:
Purchase (order-related) context. Retrieved upon purchase
verification.
:type expires: ``datetime``/``dict``
:param expires:
Order expiry timestamp, absolute (``datetime``) or relative
(``dict``). Valid keys for relative expiry timestamp dictionary are
same as keyword arguments for `datetime.timedelta` (``days``,
``seconds``, ``minutes``, ``hours``, ``weeks``). Default:
`Vingd.EXP_ORDER`.
:rtype: ``dict``
:returns:
Order dictionary::
order = {
'id': <order_id>,
'expires': <order_expiry>,
'context': <purchase_context>,
'object': {
'id': <oid>,
'price': <amount_in_cents>
},
'urls': {
'redirect': <url_for_failsafe_redirect_purchase_mode>,
'popup': <url_for_popup_purchase_mode>
}
}
:raises GeneralException:
:resource: ``objects/<oid>/orders/``
:access: authorized users
|
def new(project_name):
"""Creates a new project"""
try:
locale.setlocale(locale.LC_ALL, '')
except:
print("Warning: Unable to set locale. Expect encoding problems.")
config = utils.get_config()
config['new_project']['project_name'] = project_name
values = new_project_ui(config)
if type(values) is not str:
print('New project options:')
pprint.pprint(values)
project_dir = render.render_project(**values)
git.init_repo(project_dir, **values)
else:
print(values)
|
Creates a new project
|
def put_on_top(self, request, queryset):
"""
Put the selected entries on top at the current date.
"""
queryset.update(publication_date=timezone.now())
self.ping_directories(request, queryset, messages=False)
self.message_user(request, _(
'The selected entries are now set at the current date.'))
|
Put the selected entries on top at the current date.
|
def comment_request(self, request_id, body, commit=None,
filename=None, row=None):
"""
Create a comment on the request.
:param request_id: the id of the request
:param body: the comment body
:param commit: which commit to comment on
:param filename: which file to comment on
:param row: which line of code to comment on
:return:
"""
request_url = ("{}pull-request/{}/comment"
.format(self.create_basic_url(), request_id))
payload = {'comment': body}
if commit is not None:
payload['commit'] = commit
if filename is not None:
payload['filename'] = filename
if row is not None:
payload['row'] = row
return_value = self._call_api(request_url,
method='POST', data=payload)
LOG.debug(return_value)
|
Create a comment on the request.
:param request_id: the id of the request
:param body: the comment body
:param commit: which commit to comment on
:param filename: which file to comment on
:param row: which line of code to comment on
:return:
|
def calculate_month(birth_date):
"""
Calculates and returns a month number basing on PESEL standard.
"""
year = int(birth_date.strftime('%Y'))
month = int(birth_date.strftime('%m')) + ((int(year / 100) - 14) % 5) * 20
return month
|
Calculates and returns a month number basing on PESEL standard.
|
def Connect(self, Username, WaitConnected=False):
"""Connects application to user.
:Parameters:
Username : str
Name of the user to connect to.
WaitConnected : bool
If True, causes the method to wait until the connection is established.
:return: If ``WaitConnected`` is True, returns the stream which can be used to send the
data. Otherwise returns None.
:rtype: `ApplicationStream` or None
"""
if WaitConnected:
self._Connect_Event = threading.Event()
self._Connect_Stream = [None]
self._Connect_Username = Username
self._Connect_ApplicationStreams(self, self.Streams)
self._Owner.RegisterEventHandler('ApplicationStreams', self._Connect_ApplicationStreams)
self._Alter('CONNECT', Username)
self._Connect_Event.wait()
self._Owner.UnregisterEventHandler('ApplicationStreams', self._Connect_ApplicationStreams)
try:
return self._Connect_Stream[0]
finally:
del self._Connect_Stream, self._Connect_Event, self._Connect_Username
else:
self._Alter('CONNECT', Username)
|
Connects application to user.
:Parameters:
Username : str
Name of the user to connect to.
WaitConnected : bool
If True, causes the method to wait until the connection is established.
:return: If ``WaitConnected`` is True, returns the stream which can be used to send the
data. Otherwise returns None.
:rtype: `ApplicationStream` or None
|
def _is_noop_timeperiod(self, process_name, timeperiod):
""" method verifies if the given timeperiod for given process is valid or falls in-between grouping checkpoints
:param process_name: name of the process
:param timeperiod: timeperiod to verify
:return: False, if given process has no time_grouping set or it is equal to 1.
False, if time_grouping is custom but the given timeperiod matches the grouped timeperiod.
True, if the timeperiod falls in-between grouping cracks
"""
time_grouping = context.process_context[process_name].time_grouping
if time_grouping == 1:
return False
process_hierarchy = self.timetable.get_tree(process_name).process_hierarchy
timeperiod_dict = process_hierarchy[process_name].timeperiod_dict
return timeperiod_dict._translate_timeperiod(timeperiod) != timeperiod
|
method verifies if the given timeperiod for given process is valid or falls in-between grouping checkpoints
:param process_name: name of the process
:param timeperiod: timeperiod to verify
:return: False, if given process has no time_grouping set or it is equal to 1.
False, if time_grouping is custom but the given timeperiod matches the grouped timeperiod.
True, if the timeperiod falls in-between grouping cracks
|
def replace(self, new_node):
"""Replace a node after first checking integrity of node stack."""
cur_node = self.cur_node
nodestack = self.nodestack
cur = nodestack.pop()
prev = nodestack[-1]
index = prev[-1] - 1
oldnode, name = prev[-2][index]
assert cur[0] is cur_node is oldnode, (cur[0], cur_node, prev[-2],
index)
parent = prev[0]
if isinstance(parent, list):
parent[index] = new_node
else:
setattr(parent, name, new_node)
|
Replace a node after first checking integrity of node stack.
|
def handle_cmd(self, cmd):
"""Handles a single server command."""
cmd = cmd.strip()
segments = []
for s in cmd.split():
# remove bash-like comments
if s.startswith('#'):
break
# TODO implement escape sequences (also for \#)
segments.append(s)
args = []
if not len(segments):
return
# process more specific commands first
while segments:
cur_cmd = "_".join(segments)
if cur_cmd in self._cmd_methods:
argc = self._cmd_argc[cur_cmd]
if argc is not None and len(args) != argc:
msg('command {0} expects {1} argument(s), got {2}',
" ".join(segments), argc, len(args))
return
self._cmd_methods[cur_cmd](args)
return
args.insert(0, segments.pop())
# invalid command
prefix = '_'.join(args) + '_'
matches = filter(
lambda cmd: cmd.startswith(prefix), self._cmd_methods.keys())
candidates = set([])
for m in matches:
if len(m) <= len(prefix):
continue
m = m[len(prefix):]
if '_' in m:
m = m[:m.index('_')]
candidates.add(m)
if len(candidates):
msg('command "{0}" needs more arguments:', ' '.join(args))
for c in candidates:
msg(' {0}', c)
else:
msg('command "{0}" invalid; type ' +
'help or use <TAB> for a list of commands',
' '.join(args))
|
Handles a single server command.
|
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
|
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
|
def get_rendition_url(self, width=0, height=0):
'''get the rendition URL for a specified size
if the renditions does not exists it will be created
'''
if width == 0 and height == 0:
return self.get_master_url()
target_width, target_height = self.get_rendition_size(width, height)
key = '%sx%s' % (target_width, target_height)
if not self.renditions:
self.renditions = {}
rendition_name = self.renditions.get(key, False)
if not rendition_name:
rendition_name = self.make_rendition(target_width, target_height)
return default_storage.url(rendition_name)
|
get the rendition URL for a specified size
if the renditions does not exists it will be created
|
def _read_as_table(self):
"""
Read the data contained in all entries as a list of
lists containing all of the data
:return: list of dicts containing all tabular data
"""
rows = list()
for row in self._rows:
rows.append([row[i].get() for i in range(self.num_of_columns)])
return rows
|
Read the data contained in all entries as a list of
lists containing all of the data
:return: list of dicts containing all tabular data
|
def get_cts_metadata(self, key: str, lang: str = None) -> Literal:
""" Get easily a metadata from the CTS namespace
:param key: CTS property to retrieve
:param lang: Language in which it should be
:return: Literal value of the CTS graph property
"""
return self.metadata.get_single(RDF_NAMESPACES.CTS.term(key), lang)
|
Get easily a metadata from the CTS namespace
:param key: CTS property to retrieve
:param lang: Language in which it should be
:return: Literal value of the CTS graph property
|
def reorient_image(image, axis1, axis2=None, doreflection=False, doscale=0, txfn=None):
"""
Align image along a specified axis
ANTsR function: `reorientImage`
Arguments
---------
image : ANTsImage
image to reorient
axis1 : list/tuple of integers
vector of size dim, might need to play w/axis sign
axis2 : list/tuple of integers
vector of size dim for 3D
doreflection : boolean
whether to reflect
doscale : scalar value
1 allows automated estimate of scaling
txfn : string
file name for transformation
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> ants.reorient_image(image, (1,0))
"""
inpixeltype = image.pixeltype
if image.pixeltype != 'float':
image = image.clone('float')
axis_was_none = False
if axis2 is None:
axis_was_none = True
axis2 = [0]*image.dimension
axis1 = np.array(axis1)
axis2 = np.array(axis2)
axis1 = axis1 / np.sqrt(np.sum(axis1*axis1)) * (-1)
axis1 = axis1.astype('int')
if not axis_was_none:
axis2 = axis2 / np.sqrt(np.sum(axis2*axis2)) * (-1)
axis2 = axis2.astype('int')
else:
axis2 = np.array([0]*image.dimension).astype('int')
if txfn is None:
txfn = mktemp(suffix='.mat')
if isinstance(doreflection, tuple):
doreflection = list(doreflection)
if not isinstance(doreflection, list):
doreflection = [doreflection]
if isinstance(doscale, tuple):
doscale = list(doscale)
if not isinstance(doscale, list):
doscale = [doscale]
if len(doreflection) == 1:
doreflection = [doreflection[0]]*image.dimension
if len(doscale) == 1:
doscale = [doscale[0]]*image.dimension
libfn = utils.get_lib_fn('reorientImage%s' % image._libsuffix)
libfn(image.pointer, txfn, axis1.tolist(), axis2.tolist(), doreflection, doscale)
image2 = apply_transforms(image, image, transformlist=[txfn])
if image.pixeltype != inpixeltype:
image2 = image2.clone(inpixeltype)
return {'reoimage':image2,
'txfn':txfn}
|
Align image along a specified axis
ANTsR function: `reorientImage`
Arguments
---------
image : ANTsImage
image to reorient
axis1 : list/tuple of integers
vector of size dim, might need to play w/axis sign
axis2 : list/tuple of integers
vector of size dim for 3D
doreflection : boolean
whether to reflect
doscale : scalar value
1 allows automated estimate of scaling
txfn : string
file name for transformation
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> ants.reorient_image(image, (1,0))
|
def probes_used_extract_scores(full_scores, same_probes):
"""Extracts a matrix of scores for a model, given a probes_used row vector of boolean"""
if full_scores.shape[1] != same_probes.shape[0]: raise "Size mismatch"
import numpy as np
model_scores = np.ndarray((full_scores.shape[0],np.sum(same_probes)), 'float64')
c=0
for i in range(0,full_scores.shape[1]):
if same_probes[i]:
for j in range(0,full_scores.shape[0]):
model_scores[j,c] = full_scores[j,i]
c+=1
return model_scores
|
Extracts a matrix of scores for a model, given a probes_used row vector of boolean
|
def _call(self, x, out):
"""Return ``self(x)``."""
if self.domain.is_real:
# Real domain, multiply separately
out.real = self.scalar.real * x
out.imag = self.scalar.imag * x
else:
# Complex domain
out.lincomb(self.scalar, x)
|
Return ``self(x)``.
|
def acknowledge(self, request, *args, **kwargs):
"""
To acknowledge alert - run **POST** against */api/alerts/<alert_uuid>/acknowledge/*. No payload is required.
All users that can see alerts can also acknowledge it. If alert is already acknowledged endpoint
will return error with code 409(conflict).
"""
alert = self.get_object()
if not alert.acknowledged:
alert.acknowledge()
return response.Response(status=status.HTTP_200_OK)
else:
return response.Response({'detail': _('Alert is already acknowledged.')}, status=status.HTTP_409_CONFLICT)
|
To acknowledge alert - run **POST** against */api/alerts/<alert_uuid>/acknowledge/*. No payload is required.
All users that can see alerts can also acknowledge it. If alert is already acknowledged endpoint
will return error with code 409(conflict).
|
def play_song(self, song):
"""播放指定歌曲
如果目标歌曲与当前歌曲不相同,则修改播放列表当前歌曲,
播放列表会发出 song_changed 信号,player 监听到信号后调用 play 方法,
到那时才会真正的播放新的歌曲。如果和当前播放歌曲相同,则忽略。
.. note::
调用方不应该直接调用 playlist.current_song = song 来切换歌曲
"""
if song is not None and song == self.current_song:
logger.warning('The song is already under playing.')
else:
self._playlist.current_song = song
|
播放指定歌曲
如果目标歌曲与当前歌曲不相同,则修改播放列表当前歌曲,
播放列表会发出 song_changed 信号,player 监听到信号后调用 play 方法,
到那时才会真正的播放新的歌曲。如果和当前播放歌曲相同,则忽略。
.. note::
调用方不应该直接调用 playlist.current_song = song 来切换歌曲
|
def set_branding(self, asset_ids):
"""Sets the branding.
arg: asset_ids (osid.id.Id[]): the new assets
raise: InvalidArgument - ``asset_ids`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``asset_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if asset_ids is None:
raise NullArgument('asset_ids cannot be None')
if self.get_branding_metadata().is_read_only():
raise NoAccess()
if not isinstance(asset_ids, list):
raise InvalidArgument('asset_ids must be a list')
if not self.my_osid_object_form._is_valid_input(asset_ids,
self.get_branding_metadata(),
array=True):
raise InvalidArgument()
branding_ids = []
for asset_id in asset_ids:
branding_ids.append(str(asset_id))
self.my_osid_object_form._my_map['brandingIds'] = branding_ids
|
Sets the branding.
arg: asset_ids (osid.id.Id[]): the new assets
raise: InvalidArgument - ``asset_ids`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``asset_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
|
def set_connection(host=None, database=None, user=None, password=None):
"""Set connection parameters. Call set_connection with no arguments to clear."""
c.CONNECTION['HOST'] = host
c.CONNECTION['DATABASE'] = database
c.CONNECTION['USER'] = user
c.CONNECTION['PASSWORD'] = password
|
Set connection parameters. Call set_connection with no arguments to clear.
|
def _von_mises_cdf_series(x, concentration, num_terms, dtype):
"""Computes the von Mises CDF and its derivative via series expansion."""
# Keep the number of terms as a float. It should be a small integer, so
# exactly representable as a float.
num_terms = tf.cast(num_terms, dtype=dtype)
def loop_body(n, rn, drn_dconcentration, vn, dvn_dconcentration):
"""One iteration of the series loop."""
denominator = 2. * n / concentration + rn
ddenominator_dk = -2. * n / concentration ** 2 + drn_dconcentration
rn = 1. / denominator
drn_dconcentration = -ddenominator_dk / denominator ** 2
multiplier = tf.sin(n * x) / n + vn
vn = rn * multiplier
dvn_dconcentration = (drn_dconcentration * multiplier +
rn * dvn_dconcentration)
n -= 1.
return n, rn, drn_dconcentration, vn, dvn_dconcentration
(_, _, _, vn, dvn_dconcentration) = tf.while_loop(
cond=lambda n, *_: n > 0.,
body=loop_body,
loop_vars=(
num_terms, # n
tf.zeros_like(x, name="rn"),
tf.zeros_like(x, name="drn_dconcentration"),
tf.zeros_like(x, name="vn"),
tf.zeros_like(x, name="dvn_dconcentration"),
),
)
cdf = .5 + x / (2. * np.pi) + vn / np.pi
dcdf_dconcentration = dvn_dconcentration / np.pi
# Clip the result to [0, 1].
cdf_clipped = tf.clip_by_value(cdf, 0., 1.)
# The clipped values do not depend on concentration anymore, so set their
# derivative to zero.
dcdf_dconcentration *= tf.cast((cdf >= 0.) & (cdf <= 1.), dtype)
return cdf_clipped, dcdf_dconcentration
|
Computes the von Mises CDF and its derivative via series expansion.
|
def get_assessment_offered_mdata():
"""Return default mdata map for AssessmentOffered"""
return {
'level': {
'element_label': {
'text': 'level',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'start_time': {
'element_label': {
'text': 'start time',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter a valid datetime object.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_date_time_values': [None],
'syntax': 'DATETIME',
'date_time_set': [],
},
'grade_system': {
'element_label': {
'text': 'grade system',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'items_shuffled': {
'element_label': {
'text': 'items shuffled',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter either true or false.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_boolean_values': [None],
'syntax': 'BOOLEAN',
},
'score_system': {
'element_label': {
'text': 'score system',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'deadline': {
'element_label': {
'text': 'deadline',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter a valid datetime object.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_date_time_values': [None],
'syntax': 'DATETIME',
'date_time_set': [],
},
'duration': {
'element_label': {
'text': 'duration',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter a valid duration object.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_duration_values': [None],
'syntax': 'DURATION',
'date_time_set': [],
},
'assessment': {
'element_label': {
'text': 'assessment',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
'items_sequential': {
'element_label': {
'text': 'items sequential',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'enter either true or false.',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_boolean_values': [None],
'syntax': 'BOOLEAN',
},
}
|
Return default mdata map for AssessmentOffered
|
def add_arguments(cls, parser, sys_arg_list=None):
"""
Arguments for the configfile mode.
"""
parser.add_argument('-f', '--file', dest='file', required=True,
help="config file for routing groups "
"(only in configfile mode)")
return ["file"]
|
Arguments for the configfile mode.
|
def extract_words(string):
'''Extract all alphabetic syllabified forms from 'string'.'''
return re.findall(r'[%s]+[%s\.]*[%s]+' % (A, A, A), string, flags=FLAGS)
|
Extract all alphabetic syllabified forms from 'string'.
|
def _underscore_to_camelcase(value):
"""
Convert Python snake case back to mixed case.
"""
def camelcase():
yield str.lower
while True:
yield str.capitalize
c = camelcase()
return "".join(next(c)(x) if x else '_' for x in value.split("_"))
|
Convert Python snake case back to mixed case.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.