code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def compute_exit_code(config, exception=None):
"""Compute an exit code for mutmut mutation testing
The following exit codes are available for mutmut:
* 0 if all mutants were killed (OK_KILLED)
* 1 if a fatal error occurred
* 2 if one or more mutants survived (BAD_SURVIVED)
* 4 if one or more mutants timed out (BAD_TIMEOUT)
* 8 if one or more mutants caused tests to take twice as long (OK_SUSPICIOUS)
Exit codes 1 to 8 will be bit-ORed so that it is possible to know what
different mutant statuses occurred during mutation testing.
:param exception:
:type exception: Exception
:param config:
:type config: Config
:return: integer noting the exit code of the mutation tests.
:rtype: int
"""
code = 0
if exception is not None:
code = code | 1
if config.surviving_mutants > 0:
code = code | 2
if config.surviving_mutants_timeout > 0:
code = code | 4
if config.suspicious_mutants > 0:
code = code | 8
return code
|
Compute an exit code for mutmut mutation testing
The following exit codes are available for mutmut:
* 0 if all mutants were killed (OK_KILLED)
* 1 if a fatal error occurred
* 2 if one or more mutants survived (BAD_SURVIVED)
* 4 if one or more mutants timed out (BAD_TIMEOUT)
* 8 if one or more mutants caused tests to take twice as long (OK_SUSPICIOUS)
Exit codes 1 to 8 will be bit-ORed so that it is possible to know what
different mutant statuses occurred during mutation testing.
:param exception:
:type exception: Exception
:param config:
:type config: Config
:return: integer noting the exit code of the mutation tests.
:rtype: int
|
def _get_nd_basic_indexing(self, key):
"""This function is called when key is a slice, or an integer,
or a tuple of slices or integers"""
shape = self.shape
if isinstance(key, integer_types):
if key > shape[0] - 1:
raise IndexError(
'index {} is out of bounds for axis 0 with size {}'.format(
key, shape[0]))
return self._at(key)
elif isinstance(key, py_slice):
if key.step is not None and key.step != 1:
if key.step == 0:
raise ValueError("slice step cannot be zero")
return op.slice(self, begin=(key.start,), end=(key.stop,), step=(key.step,))
elif key.start is not None or key.stop is not None:
return self._slice(key.start, key.stop)
else:
return self
if not isinstance(key, tuple):
raise ValueError('index=%s must be a slice, or an ineger, or a tuple'
' of slices and integers to use basic indexing, received type=%s'
% (str(key), str(type(key))))
assert len(key) != 0, 'basic index cannot be an empty tuple'
begin = []
end = []
step = []
kept_axes = [] # axes where slice_i is a slice
i = -1
for i, slice_i in enumerate(key):
if isinstance(slice_i, integer_types):
begin.append(slice_i)
end.append(slice_i+1 if slice_i != -1 else self.shape[i])
step.append(1)
elif isinstance(slice_i, py_slice):
if slice_i.step == 0:
raise ValueError('basic index=%s cannot have slice=%s with step = 0'
% (str(key), str(slice_i)))
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
kept_axes.append(i)
else:
raise ValueError('basic_indexing does not support slicing with '
'index=%s of type=%s.' % (str(slice_i), str(type(slice_i))))
kept_axes.extend(range(i+1, len(shape)))
sliced_nd = op.slice(self, begin, end, step)
if len(kept_axes) == len(shape):
return sliced_nd
# squeeze sliced_shape to remove the axes indexed by integers
oshape = []
sliced_shape = sliced_nd.shape
for axis in kept_axes:
oshape.append(sliced_shape[axis])
# if key is a tuple of integers, still need to keep 1 dim
# while in Numpy, the output will become an value instead of an ndarray
if len(oshape) == 0:
oshape.append(1)
oshape = tuple(oshape)
assert np.prod(oshape) == np.prod(sliced_shape), 'oshape=%s has different size'\
' than sliced_shape=%s'\
% (oshape, sliced_shape)
return sliced_nd.reshape(oshape)
|
This function is called when key is a slice, or an integer,
or a tuple of slices or integers
|
def push_session(document, session_id=None, url='default', io_loop=None):
''' Create a session by pushing the given document to the server,
overwriting any existing server-side document.
``session.document`` in the returned session will be your supplied
document. While the connection to the server is open, changes made on the
server side will be applied to this document, and changes made on the
client side will be synced to the server.
In a production scenario, the ``session_id`` should be unique for each
browser tab, which keeps users from stomping on each other. It's neither
scalable nor secure to use predictable session IDs or to share session
IDs across users.
For a notebook running on a single machine, ``session_id`` could be
something human-readable such as ``"default"`` for convenience.
If you allow ``push_session()`` to generate a unique ``session_id``, you
can obtain the generated ID with the ``id`` property on the returned
``ClientSession``.
Args:
document : (bokeh.document.Document)
The document to be pushed and set as session.document
session_id : (string, optional)
The name of the session, None to autogenerate a random one (default: None)
url : (str, optional): The URL to a Bokeh application on a Bokeh server
can also be `"default"` which will connect to the default app URL
io_loop : (tornado.ioloop.IOLoop, optional)
The IOLoop to use for the websocket
Returns:
ClientSession
A new ClientSession connected to the server
'''
coords = _SessionCoordinates(session_id=session_id, url=url)
session = ClientSession(session_id=coords.session_id, websocket_url=websocket_url_for_server_url(coords.url), io_loop=io_loop)
session.push(document)
return session
|
Create a session by pushing the given document to the server,
overwriting any existing server-side document.
``session.document`` in the returned session will be your supplied
document. While the connection to the server is open, changes made on the
server side will be applied to this document, and changes made on the
client side will be synced to the server.
In a production scenario, the ``session_id`` should be unique for each
browser tab, which keeps users from stomping on each other. It's neither
scalable nor secure to use predictable session IDs or to share session
IDs across users.
For a notebook running on a single machine, ``session_id`` could be
something human-readable such as ``"default"`` for convenience.
If you allow ``push_session()`` to generate a unique ``session_id``, you
can obtain the generated ID with the ``id`` property on the returned
``ClientSession``.
Args:
document : (bokeh.document.Document)
The document to be pushed and set as session.document
session_id : (string, optional)
The name of the session, None to autogenerate a random one (default: None)
url : (str, optional): The URL to a Bokeh application on a Bokeh server
can also be `"default"` which will connect to the default app URL
io_loop : (tornado.ioloop.IOLoop, optional)
The IOLoop to use for the websocket
Returns:
ClientSession
A new ClientSession connected to the server
|
def repr_type(obj):
""" Return a string representation of a value and its type for readable
error messages.
"""
the_type = type(obj)
if (not py3compat.PY3) and the_type is InstanceType:
# Old-style class.
the_type = obj.__class__
msg = '%r %r' % (obj, the_type)
return msg
|
Return a string representation of a value and its type for readable
error messages.
|
def detect_types(
field_names,
field_values,
field_types=DEFAULT_TYPES,
skip_indexes=None,
type_detector=TypeDetector,
fallback_type=TextField,
*args,
**kwargs
):
"""Detect column types (or "where the magic happens")"""
# TODO: look strategy of csv.Sniffer.has_header
# TODO: may receive 'type hints'
detector = type_detector(
field_names,
field_types=field_types,
fallback_type=fallback_type,
skip_indexes=skip_indexes,
)
detector.feed(field_values)
return detector.fields
|
Detect column types (or "where the magic happens")
|
def main(args):
'''
surface_to_rubbon.main(args) can be given a list of arguments, such as sys.argv[1:]; these
arguments may include any options and must include exactly one subject id and one output
filename. Additionally one or two surface input filenames must be given. The surface files are
projected into the ribbon and written to the output filename. For more information see the
string stored in surface_to_image.info.
'''
# Parse the arguments
(args, opts) = _surface_to_ribbon_parser(args)
# First, help?
if opts['help']:
print(info, file=sys.stdout)
return 1
# and if we are verbose, lets setup a note function
verbose = opts['verbose']
def note(s):
if verbose: print(s, file=sys.stdout)
return verbose
# Add the subjects directory, if there is one
if 'subjects_dir' in opts and opts['subjects_dir'] is not None:
add_subject_path(opts['subjects_dir'])
# figure out our arguments:
(lhfl, rhfl) = (opts['lh_file'], opts['rh_file'])
if len(args) == 0:
raise ValueError('Not enough arguments provided!')
elif len(args) == 1:
# must be that the subject is in the env?
sub = find_subject_path(os.getenv('SUBJECT'))
outfl = args[0]
elif len(args) == 2:
sbpth = find_subject_path(args[0])
if sbpth is not None:
sub = sbpth
else:
sub = find_subject_path(os.getenv('SUBJECT'))
if lhfl is not None: rhfl = args[0]
elif rhfl is not None: lhfl = args[0]
else: raise ValueError('Given arg is not a subject: %s' % args[0])
outfl = args[1]
elif len(args) == 3:
sbpth0 = find_subject_path(args[0])
sbpth1 = find_subject_path(args[1])
if sbpth0 is not None:
sub = sbpth0
if lhfl is not None: rhfl = args[1]
elif rhfl is not None: lhfl = args[1]
else: raise ValueError('Too many arguments given: %s' % args[1])
elif sbpth1 is not None:
sub = sbpth1
if lhfl is not None: rhfl = args[0]
elif rhfl is not None: lhfl = args[0]
else: raise ValueError('Too many arguments given: %s' % args[0])
else:
sub = find_subject_path(os.getenv('SUBJECT'))
if lhfl is not None or rhfl is not None:
raise ValueError('Too many arguments and no subject given')
(lhfl, rhfl) = args
outfl = args[2]
elif len(args) == 4:
if lhfl is not None or rhfl is not None:
raise ValueError('Too many arguments and no subject given')
subidx = next((i for (i,a) in enumerate(args) if find_subject_path(a) is not None), None)
if subidx is None: raise ValueError('No subject given')
sub = find_subject_path(args[subidx])
del args[subidx]
(lhfl, rhfl, outfl) = args
else:
raise ValueError('Too many arguments provided!')
if sub is None: raise ValueError('No subject specified or found in $SUBJECT')
if lhfl is None and rhfl is None: raise ValueError('No surfaces provided')
# check the method
method = opts['method'].lower()
if method not in ['linear', 'lines', 'nearest', 'auto']:
raise ValueError('Unsupported method: %s' % method)
# and the datatype
if opts['dtype'] is None: dtyp = None
elif opts['dtype'].lower() == 'float': dtyp = np.float32
elif opts['dtype'].lower() == 'int': dtyp = np.int32
else: raise ValueError('Type argument must be float or int')
if method == 'auto':
if dtyp is np.float32: method = 'linear'
elif dtyp is np.int32: method = 'nearest'
else: method = 'linear'
# Now, load the data:
note('Reading surfaces...')
(lhdat, rhdat) = (None, None)
if lhfl is not None:
note(' - Reading LH file: %s' % lhfl)
lhdat = read_surf_file(lhfl)
if rhfl is not None:
note(' - Reading RH file: %s' % rhfl)
rhdat = read_surf_file(rhfl)
(dat, hemi) = (rhdat, 'rh') if lhdat is None else \
(lhdat, 'lh') if rhdat is None else \
((lhdat, rhdat), None)
sub = subject(sub)
# okay, make the volume...
note('Generating volume...')
vol = sub.cortex_to_image(dat, hemi=hemi, method=method, fill=opts['fill'], dtype=dtyp)
# and write out the file
note('Exporting volume file: %s' % outfl)
save(outfl, vol, affine=sub.voxel_to_native_matrix)
note('surface_to_image complete!')
return 0
|
surface_to_rubbon.main(args) can be given a list of arguments, such as sys.argv[1:]; these
arguments may include any options and must include exactly one subject id and one output
filename. Additionally one or two surface input filenames must be given. The surface files are
projected into the ribbon and written to the output filename. For more information see the
string stored in surface_to_image.info.
|
async def create_source_event_stream(
schema: GraphQLSchema,
document: DocumentNode,
root_value: Any = None,
context_value: Any = None,
variable_values: Dict[str, Any] = None,
operation_name: str = None,
field_resolver: GraphQLFieldResolver = None,
) -> Union[AsyncIterable[Any], ExecutionResult]:
"""Create source even stream
Implements the "CreateSourceEventStream" algorithm described in the GraphQL
specification, resolving the subscription source event stream.
Returns a coroutine that yields an AsyncIterable.
If the client provided invalid arguments, the source stream could not be created,
or the resolver did not return an AsyncIterable, this function will throw an error,
which should be caught and handled by the caller.
A Source Event Stream represents a sequence of events, each of which triggers a
GraphQL execution for that event.
This may be useful when hosting the stateful subscription service in a different
process or machine than the stateless GraphQL execution engine, or otherwise
separating these two steps. For more on this, see the "Supporting Subscriptions
at Scale" information in the GraphQL spec.
"""
# If arguments are missing or incorrectly typed, this is an internal developer
# mistake which should throw an early error.
assert_valid_execution_arguments(schema, document, variable_values)
# If a valid context cannot be created due to incorrect arguments, this will throw
# an error.
context = ExecutionContext.build(
schema,
document,
root_value,
context_value,
variable_values,
operation_name,
field_resolver,
)
# Return early errors if execution context failed.
if isinstance(context, list):
return ExecutionResult(data=None, errors=context)
type_ = get_operation_root_type(schema, context.operation)
fields = context.collect_fields(type_, context.operation.selection_set, {}, set())
response_names = list(fields)
response_name = response_names[0]
field_nodes = fields[response_name]
field_node = field_nodes[0]
field_name = field_node.name.value
field_def = get_field_def(schema, type_, field_name)
if not field_def:
raise GraphQLError(
f"The subscription field '{field_name}' is not defined.", field_nodes
)
# Call the `subscribe()` resolver or the default resolver to produce an
# AsyncIterable yielding raw payloads.
resolve_fn = field_def.subscribe or context.field_resolver
resolve_fn = cast(GraphQLFieldResolver, resolve_fn) # help mypy
path = add_path(None, response_name)
info = context.build_resolve_info(field_def, field_nodes, type_, path)
# `resolve_field_value_or_error` implements the "ResolveFieldEventStream" algorithm
# from GraphQL specification. It differs from `resolve_field_value` due to
# providing a different `resolve_fn`.
result = context.resolve_field_value_or_error(
field_def, field_nodes, resolve_fn, root_value, info
)
event_stream = await cast(Awaitable, result) if isawaitable(result) else result
# If `event_stream` is an Error, rethrow a located error.
if isinstance(event_stream, Exception):
raise located_error(event_stream, field_nodes, response_path_as_list(path))
# Assert field returned an event stream, otherwise yield an error.
if isinstance(event_stream, AsyncIterable):
return cast(AsyncIterable, event_stream)
raise TypeError(
f"Subscription field must return AsyncIterable. Received: {event_stream!r}"
)
|
Create source even stream
Implements the "CreateSourceEventStream" algorithm described in the GraphQL
specification, resolving the subscription source event stream.
Returns a coroutine that yields an AsyncIterable.
If the client provided invalid arguments, the source stream could not be created,
or the resolver did not return an AsyncIterable, this function will throw an error,
which should be caught and handled by the caller.
A Source Event Stream represents a sequence of events, each of which triggers a
GraphQL execution for that event.
This may be useful when hosting the stateful subscription service in a different
process or machine than the stateless GraphQL execution engine, or otherwise
separating these two steps. For more on this, see the "Supporting Subscriptions
at Scale" information in the GraphQL spec.
|
def upload_slice_file(self, real_file_path, slice_size, file_name, offset=0, dir_name=None):
"""
此分片上传代码由GitHub用户a270443177(https://github.com/a270443177)友情提供
:param real_file_path:
:param slice_size:
:param file_name:
:param offset:
:param dir_name:
:return:
"""
if dir_name is not None and dir_name[0] == '/':
dir_name = dir_name[1:len(dir_name)]
if dir_name is None:
dir_name = ""
self.url = 'http://' + self.config.region + '.file.myqcloud.com/files/v2/' + str(
self.config.app_id) + '/' + self.config.bucket
if dir_name is not None:
self.url = self.url + '/' + dir_name
self.url = self.url + '/' + file_name
file_size = os.path.getsize(real_file_path)
session = self._upload_slice_control(file_size=file_size, slice_size=slice_size)
with open(real_file_path, 'rb') as local_file:
while offset < file_size:
file_content = local_file.read(slice_size)
self._upload_slice_data(filecontent=file_content, session=session, offset=offset)
offset += slice_size
r = self._upload_slice_finish(session=session, file_size=file_size)
return r
|
此分片上传代码由GitHub用户a270443177(https://github.com/a270443177)友情提供
:param real_file_path:
:param slice_size:
:param file_name:
:param offset:
:param dir_name:
:return:
|
def do_loop(self, params):
"""
\x1b[1mNAME\x1b[0m
loop - Runs commands in a loop
\x1b[1mSYNOPSIS\x1b[0m
loop <repeat> <pause> <cmd1> <cmd2> ... <cmdN>
\x1b[1mDESCRIPTION\x1b[0m
Runs <cmds> <repeat> times (0 means forever), with a pause of <pause> secs inbetween
each <cmd> (0 means no pause).
\x1b[1mEXAMPLES\x1b[0m
> loop 3 0 "get /foo"
...
> loop 3 0 "get /foo" "get /bar"
...
"""
repeat = params.repeat
if repeat < 0:
self.show_output("<repeat> must be >= 0.")
return
pause = params.pause
if pause < 0:
self.show_output("<pause> must be >= 0.")
return
cmds = params.cmds
i = 0
with self.transitions_disabled():
while True:
for cmd in cmds:
try:
self.onecmd(cmd)
except Exception as ex:
self.show_output("Command failed: %s.", ex)
if pause > 0.0:
time.sleep(pause)
i += 1
if repeat > 0 and i >= repeat:
break
|
\x1b[1mNAME\x1b[0m
loop - Runs commands in a loop
\x1b[1mSYNOPSIS\x1b[0m
loop <repeat> <pause> <cmd1> <cmd2> ... <cmdN>
\x1b[1mDESCRIPTION\x1b[0m
Runs <cmds> <repeat> times (0 means forever), with a pause of <pause> secs inbetween
each <cmd> (0 means no pause).
\x1b[1mEXAMPLES\x1b[0m
> loop 3 0 "get /foo"
...
> loop 3 0 "get /foo" "get /bar"
...
|
def add_artwork_item(self, instance, item):
"""
Add an artwork item e.g. Shapes, Notes and Pixmaps
:param instance: Hypervisor instance
:param item: Item to add
"""
if 'interface' in self.old_top[instance][item]:
pass
else:
(item_type, item_id) = item.split(' ')
self.artwork[item_type][item_id] = {}
for s_item in sorted(self.old_top[instance][item]):
if self.old_top[instance][item][s_item] is not None:
s_detail = self.old_top[instance][item][s_item]
s_type = type(s_detail)
if item_type == 'NOTE' and s_type == str:
# Fix any escaped newline characters
s_detail = s_detail.replace('\\n', '\n')
if s_type == str and len(s_detail) > 1 \
and s_detail[0] == '"' and s_detail[-1] == '"':
s_detail = s_detail[1:-1]
if item_type == 'SHAPE' and s_item == 'fill_color':
s_item = 'color'
elif s_item == 'rotate':
s_item = 'rotation'
s_detail = float(s_detail)
self.artwork[item_type][item_id][s_item] = s_detail
if item_type == 'SHAPE' and \
'color' not in self.artwork[item_type][item_id]:
self.artwork[item_type][item_id]['color'] = '#ffffff'
self.artwork[item_type][item_id]['transparency'] = 0
|
Add an artwork item e.g. Shapes, Notes and Pixmaps
:param instance: Hypervisor instance
:param item: Item to add
|
def load_user(user_email):
"""Returns the currently active user as an object."""
user_obj = store.user(user_email)
user_inst = LoginUser(user_obj) if user_obj else None
return user_inst
|
Returns the currently active user as an object.
|
def setEnabled(self, state):
"""
Updates the drop shadow effect for this widget on enable/disable
state change.
:param state | <bool>
"""
super(XToolButton, self).setEnabled(state)
self.updateUi()
|
Updates the drop shadow effect for this widget on enable/disable
state change.
:param state | <bool>
|
def get_all_json_from_indexq(self):
'''
Gets all data from the todo files in indexq and returns one huge list of all data.
'''
files = self.get_all_as_list()
out = []
for efile in files:
out.extend(self._open_file(efile))
return out
|
Gets all data from the todo files in indexq and returns one huge list of all data.
|
def create_new_output_file(sampler, filename, force=False, injection_file=None,
**kwargs):
"""Creates a new output file.
If the output file already exists, an ``OSError`` will be raised. This can
be overridden by setting ``force`` to ``True``.
Parameters
----------
sampler : sampler instance
Sampler
filename : str
Name of the file to create.
force : bool, optional
Create the file even if it already exists. Default is False.
injection_file : str, optional
If an injection was added to the data, write its information.
\**kwargs :
All other keyword arguments are passed through to the file's
``write_metadata`` function.
"""
if os.path.exists(filename):
if force:
os.remove(filename)
else:
raise OSError("output-file already exists; use force if you "
"wish to overwrite it.")
logging.info("Creating file {}".format(filename))
with sampler.io(filename, "w") as fp:
# create the samples group and sampler info group
fp.create_group(fp.samples_group)
fp.create_group(fp.sampler_group)
# save the sampler's metadata
fp.write_sampler_metadata(sampler)
# save injection parameters
if injection_file is not None:
logging.info("Writing injection file to output")
# just use the first one
fp.write_injections(injection_file)
|
Creates a new output file.
If the output file already exists, an ``OSError`` will be raised. This can
be overridden by setting ``force`` to ``True``.
Parameters
----------
sampler : sampler instance
Sampler
filename : str
Name of the file to create.
force : bool, optional
Create the file even if it already exists. Default is False.
injection_file : str, optional
If an injection was added to the data, write its information.
\**kwargs :
All other keyword arguments are passed through to the file's
``write_metadata`` function.
|
def _get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None):
"""Retrieve a server certificate
Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt.
"""
if ssl_version not in (PROTOCOL_DTLS, PROTOCOL_DTLSv1, PROTOCOL_DTLSv1_2):
return _orig_get_server_certificate(addr, ssl_version, ca_certs)
if ca_certs is not None:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
af = getaddrinfo(addr[0], addr[1])[0][0]
s = ssl.wrap_socket(socket(af, SOCK_DGRAM),
ssl_version=ssl_version,
cert_reqs=cert_reqs, ca_certs=ca_certs)
s.connect(addr)
dercert = s.getpeercert(True)
s.close()
return ssl.DER_cert_to_PEM_cert(dercert)
|
Retrieve a server certificate
Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt.
|
def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False):
"""Evaluate on voc dataset.
Args:
pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
iou_thresh: iou thresh
use_07_metric: boolean
Returns:
dict represents the results
"""
assert len(gt_boxlists) == len(
pred_boxlists
), "Length of gt and pred lists need to be same."
prec, rec = calc_detection_voc_prec_rec(
pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh
)
ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
return {"ap": ap, "map": np.nanmean(ap)}
|
Evaluate on voc dataset.
Args:
pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
iou_thresh: iou thresh
use_07_metric: boolean
Returns:
dict represents the results
|
def metarate(self, func, name='values'):
"""
Set the values object to the function object's namespace
"""
setattr(func, name, self.values)
return func
|
Set the values object to the function object's namespace
|
def get_snapshots(topology):
"""
Return the paths of any snapshot topologies
:param str topology: topology file
:return: list of dicts containing snapshot topologies
:rtype: list
"""
snapshots = []
snap_dir = os.path.join(topology_dirname(topology), 'snapshots')
if os.path.exists(snap_dir):
snaps = os.listdir(snap_dir)
for directory in snaps:
snap_top = os.path.join(snap_dir, directory, 'topology.net')
if os.path.exists(snap_top):
snapshots.append({'file': snap_top,
'snapshot': True})
return snapshots
|
Return the paths of any snapshot topologies
:param str topology: topology file
:return: list of dicts containing snapshot topologies
:rtype: list
|
def find_version_by_string_lib(line): # type: (str)->Optional[str]
"""
No regex parsing. Or at least, mostly, not regex.
"""
if not line:
return None
simplified_line = simplify_line(line)
version = None
if simplified_line.startswith("version="):
if '"' not in simplified_line:
pass
# logger.debug("Weird version string, no double quote : " + unicode((full_path, line, simplified_line)))
else:
if "=" in simplified_line:
post_equals = simplified_line.split("=")[0]
if '"' in post_equals:
parts = post_equals.split('"')
if len(parts) != 3:
# logger.debug("Weird string, more than 3 parts : " + unicode((full_path, line, simplified_line)))
version = parts[0]
return version
|
No regex parsing. Or at least, mostly, not regex.
|
def on_delete(self, forced):
"""Session expiration callback
`forced`
If session item explicitly deleted, forced will be set to True. If
item expired, will be set to False.
"""
# Do not remove connection if it was not forced and there's running connection
if not forced and self.handler is not None and not self.is_closed:
self.promote()
else:
self.close()
|
Session expiration callback
`forced`
If session item explicitly deleted, forced will be set to True. If
item expired, will be set to False.
|
def ReadPreprocessingInformation(self, knowledge_base):
"""Reads preprocessing information.
The preprocessing information contains the system configuration which
contains information about various system specific configuration data,
for example the user accounts.
Args:
knowledge_base (KnowledgeBase): is used to store the preprocessing
information.
"""
generator = self._GetAttributeContainers(
self._CONTAINER_TYPE_SYSTEM_CONFIGURATION)
for stream_number, system_configuration in enumerate(generator):
# TODO: replace stream_number by session_identifier.
knowledge_base.ReadSystemConfigurationArtifact(
system_configuration, session_identifier=stream_number)
|
Reads preprocessing information.
The preprocessing information contains the system configuration which
contains information about various system specific configuration data,
for example the user accounts.
Args:
knowledge_base (KnowledgeBase): is used to store the preprocessing
information.
|
def _item_to_metric(iterator, log_metric_pb):
"""Convert a metric protobuf to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type log_metric_pb:
:class:`.logging_metrics_pb2.LogMetric`
:param log_metric_pb: Metric protobuf returned from the API.
:rtype: :class:`~google.cloud.logging.metric.Metric`
:returns: The next metric in the page.
"""
# NOTE: LogMetric message type does not have an ``Any`` field
# so `MessageToDict`` can safely be used.
resource = MessageToDict(log_metric_pb)
return Metric.from_api_repr(resource, iterator.client)
|
Convert a metric protobuf to the native object.
:type iterator: :class:`~google.api_core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type log_metric_pb:
:class:`.logging_metrics_pb2.LogMetric`
:param log_metric_pb: Metric protobuf returned from the API.
:rtype: :class:`~google.cloud.logging.metric.Metric`
:returns: The next metric in the page.
|
def _checkDimensionsListLike(arrays):
"""Check that each array in a list of arrays has the same size.
"""
dim1 = len(arrays)
dim2, dim3 = arrays[0].shape
for aa in range(1, dim1):
dim2_aa, dim3_aa = arrays[aa].shape
if (dim2_aa != dim2) or (dim3_aa != dim3):
raise _error.InvalidError(_MDPERR["obj_square"])
return dim1, dim2, dim3
|
Check that each array in a list of arrays has the same size.
|
def get_import_stacklevel(import_hook):
"""Returns the stacklevel value for warnings.warn() for when the warning
gets emitted by an imported module, but the warning should point at the
code doing the import.
Pass import_hook=True if the warning gets generated by an import hook
(warn() gets called in load_module(), see PEP302)
"""
py_version = sys.version_info[:2]
if py_version <= (3, 2):
# 2.7 included
return 4 if import_hook else 2
elif py_version == (3, 3):
return 8 if import_hook else 10
elif py_version == (3, 4):
return 10 if import_hook else 8
else:
# fixed again in 3.5+, see https://bugs.python.org/issue24305
return 4 if import_hook else 2
|
Returns the stacklevel value for warnings.warn() for when the warning
gets emitted by an imported module, but the warning should point at the
code doing the import.
Pass import_hook=True if the warning gets generated by an import hook
(warn() gets called in load_module(), see PEP302)
|
def iniedited(self, *args, **kwargs):
"""Set the current index of inimodel to modified
:returns: None
:rtype: None
:raises: None
"""
self.inimodel.set_index_edited(self.files_lv.currentIndex(), True)
|
Set the current index of inimodel to modified
:returns: None
:rtype: None
:raises: None
|
def call_handlers(self, msg):
""" Reimplemented to emit signals instead of making callbacks.
"""
# Emit the generic signal.
self.message_received.emit(msg)
# Emit signals for specialized message types.
msg_type = msg['header']['msg_type']
signal = getattr(self, msg_type + '_received', None)
if signal:
signal.emit(msg)
elif msg_type in ('stdout', 'stderr'):
self.stream_received.emit(msg)
|
Reimplemented to emit signals instead of making callbacks.
|
def check_ab(ab, verb):
r"""Check source-receiver configuration.
This check-function is called from one of the modelling routines in
:mod:`model`. Consult these modelling routines for a detailed description
of the input parameters.
Parameters
----------
ab : int
Source-receiver configuration.
verb : {0, 1, 2, 3, 4}
Level of verbosity.
Returns
-------
ab_calc : int
Adjusted source-receiver configuration using reciprocity.
msrc, mrec : bool
If True, src/rec is magnetic; if False, src/rec is electric.
"""
# Try to cast ab into an integer
try:
ab = int(ab)
except VariableCatch:
print('* ERROR :: <ab> must be an integer')
raise
# Check src and rec orientation (<ab> for alpha-beta)
# pab: all possible values that <ab> can take
pab = [11, 12, 13, 14, 15, 16, 21, 22, 23, 24, 25, 26,
31, 32, 33, 34, 35, 36, 41, 42, 43, 44, 45, 46,
51, 52, 53, 54, 55, 56, 61, 62, 63, 64, 65, 66]
if ab not in pab:
print('* ERROR :: <ab> must be one of: ' + str(pab) + ';' +
' <ab> provided: ' + str(ab))
raise ValueError('ab')
# Print input <ab>
if verb > 2:
print(" Input ab : ", ab)
# Check if src and rec are magnetic or electric
msrc = ab % 10 > 3 # If True: magnetic src
mrec = ab // 10 > 3 # If True: magnetic rec
# If rec is magnetic, switch <ab> using reciprocity.
if mrec:
if msrc:
# G^mm_ab(s, r, e, z) = -G^ee_ab(s, r, -z, -e)
ab_calc = ab - 33 # -30 : mrec->erec; -3: msrc->esrc
else:
# G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z)
ab_calc = ab % 10*10 + ab // 10 # Swap alpha/beta
else:
ab_calc = ab
# Print actual calculated <ab>
if verb > 2:
if ab in [36, 63]:
print("\n> <ab> IS "+str(ab)+" WHICH IS ZERO; returning")
else:
print(" Calculated ab : ", ab_calc)
return ab_calc, msrc, mrec
|
r"""Check source-receiver configuration.
This check-function is called from one of the modelling routines in
:mod:`model`. Consult these modelling routines for a detailed description
of the input parameters.
Parameters
----------
ab : int
Source-receiver configuration.
verb : {0, 1, 2, 3, 4}
Level of verbosity.
Returns
-------
ab_calc : int
Adjusted source-receiver configuration using reciprocity.
msrc, mrec : bool
If True, src/rec is magnetic; if False, src/rec is electric.
|
def system_monitor_mail_relay_domain_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
system_monitor_mail = ET.SubElement(config, "system-monitor-mail", xmlns="urn:brocade.com:mgmt:brocade-system-monitor")
relay = ET.SubElement(system_monitor_mail, "relay")
host_ip_key = ET.SubElement(relay, "host-ip")
host_ip_key.text = kwargs.pop('host_ip')
domain_name = ET.SubElement(relay, "domain-name")
domain_name.text = kwargs.pop('domain_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def initialize_tasks(self):
"""Load the input queue to capacity.
Overfilling causes a deadlock when `queue.put` blocks when
full, so further tasks are enqueued as results are returned.
"""
# Add a poison pill to shutdown each process.
self.tasks = chain(self.iterable, [POISON_PILL] * self.num_processes)
for task in islice(self.tasks, Q_MAX_SIZE):
log.debug('Putting %s on queue', task)
self.task_queue.put(task)
|
Load the input queue to capacity.
Overfilling causes a deadlock when `queue.put` blocks when
full, so further tasks are enqueued as results are returned.
|
def _AssertDataIsList(key, lst):
"""Assert that lst contains list data and is not structured."""
# list and tuple are supported. Not supported are direct strings
# and dictionary; these indicate too much or two little structure.
if not isinstance(lst, list) and not isinstance(lst, tuple):
raise NotAListError('%s must be a list' % key)
# each list entry must be a string
for element in lst:
if not isinstance(element, str):
raise ElementNotAStringError('Unsupported list element %s found in %s',
(element, lst))
|
Assert that lst contains list data and is not structured.
|
def get_config(repo):
"""
Get the config for the repo, merged with the default config. Returns the default config if
no config file is found.
"""
files = get_files(repo)
config = DEFAULT_CONFIG
if "config.json" in files:
# get the config file, parse JSON and merge it with the default config
config_file = repo.get_file_contents('/config.json', ref="gh-pages")
try:
repo_config = json.loads(config_file.decoded_content.decode("utf-8"))
config.update(repo_config)
except ValueError:
click.secho("WARNING: Unable to parse config file. Using defaults.", fg="yellow")
return config
|
Get the config for the repo, merged with the default config. Returns the default config if
no config file is found.
|
def _generate_footer(notebook_object, notebook_type):
"""
Internal function that is used for generation of the notebooks footer.
----------
Parameters
----------
notebook_object : notebook object
Object of "notebook" class where the header will be created.
notebook_type : str
Notebook type: - "Main_Files_Signal_Samples"
- "Main_Files_By_Category"
- "Main_Files_By_Difficulty"
- "Main_Files_By_Tag"
- "Load"
- "Record"
- "Visualise"
- "Pre-Process"
- "Detect"
- "Extract"
- "Train_and_Classify"
- "Understand"
- "Evaluate"
"""
footer_aux = FOOTER
if "Main_Files" in notebook_type:
footer_aux = footer_aux.replace("../MainFiles/", "")
# ================ Insertion of the div reserved to the Notebook Description ===================
notebook_object["cells"].append(nb.v4.new_markdown_cell(footer_aux,
**{"metadata":
{"tags": ["footer"]}}))
# ========== Code segment for application of the biosignalsnotebooks CSS style ===========
notebook_object["cells"].append(nb.v4.new_markdown_cell(AUX_CODE_MESSAGE,
**{"metadata": {"tags": ["hide_mark"]}}))
notebook_object["cells"].append(nb.v4.new_code_cell(CSS_STYLE_CODE,
**{"metadata": {"tags": ["hide_both"]}}))
|
Internal function that is used for generation of the notebooks footer.
----------
Parameters
----------
notebook_object : notebook object
Object of "notebook" class where the header will be created.
notebook_type : str
Notebook type: - "Main_Files_Signal_Samples"
- "Main_Files_By_Category"
- "Main_Files_By_Difficulty"
- "Main_Files_By_Tag"
- "Load"
- "Record"
- "Visualise"
- "Pre-Process"
- "Detect"
- "Extract"
- "Train_and_Classify"
- "Understand"
- "Evaluate"
|
def delete_external_link(self, id, **kwargs): # noqa: E501
"""Delete a specific external link # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_external_link(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerExternalLink
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_external_link_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_external_link_with_http_info(id, **kwargs) # noqa: E501
return data
|
Delete a specific external link # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_external_link(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerExternalLink
If the method is called asynchronously,
returns the request thread.
|
def version(app, appbuilder):
"""
Flask-AppBuilder package version
"""
_appbuilder = import_application(app, appbuilder)
click.echo(
click.style(
"F.A.B Version: {0}.".format(_appbuilder.version), bg="blue", fg="white"
)
)
|
Flask-AppBuilder package version
|
def listar_por_equip(self, equip_id):
"""Lista todos os ambientes por equipamento especifico.
:return: Dicionário com a seguinte estrutura:
::
{'ambiente': {'id': < id_ambiente >,
'link': < link >,
'id_divisao': < id_divisao >,
'nome_divisao': < nome_divisao >,
'id_ambiente_logico': < id_ambiente_logico >,
'nome_ambiente_logico': < nome_ambiente_logico >,
'id_grupo_l3': < id_grupo_l3 >,
'nome_grupo_l3': < nome_grupo_l3 >,
'id_filter': < id_filter >,
'filter_name': < filter_name >,
'ambiente_rede': < ambiente_rede >}}
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao gerar o XML de resposta.
"""
if equip_id is None:
raise InvalidParameterError(
u'O id do equipamento não foi informado.')
url = 'ambiente/equip/' + str(equip_id) + '/'
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml)
|
Lista todos os ambientes por equipamento especifico.
:return: Dicionário com a seguinte estrutura:
::
{'ambiente': {'id': < id_ambiente >,
'link': < link >,
'id_divisao': < id_divisao >,
'nome_divisao': < nome_divisao >,
'id_ambiente_logico': < id_ambiente_logico >,
'nome_ambiente_logico': < nome_ambiente_logico >,
'id_grupo_l3': < id_grupo_l3 >,
'nome_grupo_l3': < nome_grupo_l3 >,
'id_filter': < id_filter >,
'filter_name': < filter_name >,
'ambiente_rede': < ambiente_rede >}}
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao gerar o XML de resposta.
|
def stop(self, timeout=1.0):
"""Stop a running server (from another thread).
Parameters
----------
timeout : float or None, optional
Seconds to wait for server to have *started*.
Returns
-------
stopped : thread-safe Future
Resolves when the server is stopped
"""
if timeout:
self._running.wait(timeout)
return self._ioloop_manager.stop(callback=self._uninstall)
|
Stop a running server (from another thread).
Parameters
----------
timeout : float or None, optional
Seconds to wait for server to have *started*.
Returns
-------
stopped : thread-safe Future
Resolves when the server is stopped
|
def prepare_for_json_encoding(obj):
"""
Convert an arbitrary object into just JSON data types (list, dict, unicode str, int, bool, null).
"""
obj_type = type(obj)
if obj_type == list or obj_type == tuple:
return [prepare_for_json_encoding(item) for item in obj]
if obj_type == dict:
# alphabetizing keys lets us compare attributes for equality across runs
return OrderedDict(
(prepare_for_json_encoding(k),
prepare_for_json_encoding(obj[k])) for k in sorted(obj.keys())
)
if obj_type == six.binary_type:
return smart_unicode_decode(obj)
if obj_type == bool or obj is None or obj_type == six.text_type or isinstance(obj, numbers.Number):
return obj
if obj_type == PSLiteral:
# special case because pdfminer.six currently adds extra quotes to PSLiteral.__repr__
return u"/%s" % obj.name
return six.text_type(obj)
|
Convert an arbitrary object into just JSON data types (list, dict, unicode str, int, bool, null).
|
def statement_after(self, i):
"""Return the statement after the *i*-th one, or `None`."""
k = i + 1
o = len(self.body)
n = o + len(self.else_body)
if k > 0:
if k < o:
return self.body.statement(k)
if k > o and k < n:
return self.else_body.statement(k)
if k < 0:
if k < o - n and k > -n:
return self.body.statement(k)
if k > o - n:
return self.else_body.statement(k)
return None
|
Return the statement after the *i*-th one, or `None`.
|
def platform_to_tags(platform, interpreter):
"""Splits a "platform" like linux_x86_64-36-cp-cp36m into its components.
If a simple platform without hyphens is specified, we will fall back to using
the current interpreter's tags.
"""
if platform.count('-') >= 3:
tags = platform.rsplit('-', 3)
else:
tags = [platform, interpreter.identity.impl_ver,
interpreter.identity.abbr_impl, interpreter.identity.abi_tag]
tags[0] = tags[0].replace('.', '_').replace('-', '_')
return tags
|
Splits a "platform" like linux_x86_64-36-cp-cp36m into its components.
If a simple platform without hyphens is specified, we will fall back to using
the current interpreter's tags.
|
def add_async_sender(
self, partition=None, operation=None, send_timeout=60,
keep_alive=30, auto_reconnect=True, loop=None):
"""
Add an async sender to the client to send ~azure.eventhub.common.EventData object
to an EventHub.
:param partition: Optionally specify a particular partition to send to.
If omitted, the events will be distributed to available partitions via
round-robin.
:type partition: str
:operation: An optional operation to be appended to the hostname in the target URL.
The value must start with `/` character.
:type operation: str
:param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is
queued. Default value is 60 seconds. If set to 0, there will be no timeout.
:type send_timeout: int
:param keep_alive: The time interval in seconds between pinging the connection to keep it alive during
periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not
be pinged.
:type keep_alive: int
:param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs.
Default value is `True`.
:type auto_reconnect: bool
:rtype: ~azure.eventhub.async_ops.sender_async.SenderAsync
"""
target = "amqps://{}{}".format(self.address.hostname, self.address.path)
if operation:
target = target + operation
handler = AsyncSender(
self, target, partition=partition, send_timeout=send_timeout, keep_alive=keep_alive,
auto_reconnect=auto_reconnect, loop=loop)
self.clients.append(handler)
return handler
|
Add an async sender to the client to send ~azure.eventhub.common.EventData object
to an EventHub.
:param partition: Optionally specify a particular partition to send to.
If omitted, the events will be distributed to available partitions via
round-robin.
:type partition: str
:operation: An optional operation to be appended to the hostname in the target URL.
The value must start with `/` character.
:type operation: str
:param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is
queued. Default value is 60 seconds. If set to 0, there will be no timeout.
:type send_timeout: int
:param keep_alive: The time interval in seconds between pinging the connection to keep it alive during
periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not
be pinged.
:type keep_alive: int
:param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs.
Default value is `True`.
:type auto_reconnect: bool
:rtype: ~azure.eventhub.async_ops.sender_async.SenderAsync
|
def QA_data_day_resample(day_data, type_='w'):
"""日线降采样
Arguments:
day_data {[type]} -- [description]
Keyword Arguments:
type_ {str} -- [description] (default: {'w'})
Returns:
[type] -- [description]
"""
# return day_data_p.assign(open=day_data.open.resample(type_).first(),high=day_data.high.resample(type_).max(),low=day_data.low.resample(type_).min(),\
# vol=day_data.vol.resample(type_).sum() if 'vol' in day_data.columns else day_data.volume.resample(type_).sum(),\
# amount=day_data.amount.resample(type_).sum()).dropna().set_index('date')
try:
day_data = day_data.reset_index().set_index('date', drop=False)
except:
day_data = day_data.set_index('date', drop=False)
CONVERSION = {
'code': 'first',
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'vol': 'sum',
'amount': 'sum'
} if 'vol' in day_data.columns else {
'code': 'first',
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'sum',
'amount': 'sum'
}
return day_data.resample(
type_,
closed='right'
).apply(CONVERSION).dropna().reset_index().set_index(['date',
'code'])
|
日线降采样
Arguments:
day_data {[type]} -- [description]
Keyword Arguments:
type_ {str} -- [description] (default: {'w'})
Returns:
[type] -- [description]
|
def switch_state(request):
"""
Switch the default version state in
the session.
"""
if request.session.get(SESSION_KEY):
request.session[SESSION_KEY] = False
else:
request.session[SESSION_KEY] = True
# Get redirect location
# Don't go to non local paths
url = request.GET.get('redirect_to', '/')
if url.startswith('http'):
url = '/'
return redirect(url)
|
Switch the default version state in
the session.
|
def list_repos(remote=False):
"""
List repos
Parameters
----------
remote: Flag
"""
mgr = plugins_get_mgr()
if not remote:
repomgr = mgr.get(what='repomanager', name='git')
repos = repomgr.get_repo_list()
repos.sort()
return repos
else:
raise Exception("Not supported yet")
|
List repos
Parameters
----------
remote: Flag
|
def get(context, request, resource=None, uid=None):
"""GET
"""
# We have a UID, return the record
if uid and not resource:
return api.get_record(uid)
# we have a UID as resource, return the record
if api.is_uid(resource):
return api.get_record(resource)
portal_type = api.resource_to_portal_type(resource)
if portal_type is None:
raise APIError(404, "Not Found")
return api.get_batched(portal_type=portal_type, uid=uid, endpoint="senaite.jsonapi.v1.get")
|
GET
|
def iter_bases(bases):
"""
Performs MRO linearization of a set of base classes. Yields
each base class in turn.
"""
sequences = ([list(inspect.getmro(base)) for base in bases] +
[list(bases)])
# Loop over sequences
while True:
sequences = [seq for seq in sequences if seq]
if not sequences:
return
# Select a good head
for seq in sequences:
head = seq[0]
tails = [seq for seq in sequences if head in seq[1:]]
if not tails:
break
else:
raise TypeError('Cannot create a consistent method '
'resolution order (MRO) for bases %s' %
', '.join([base.__name__ for base in bases]))
# Yield this base class
yield head
# Remove base class from all the other sequences
for seq in sequences:
if seq[0] == head:
del seq[0]
|
Performs MRO linearization of a set of base classes. Yields
each base class in turn.
|
def _loop_use_cache(self, helper_function, num, fragment):
""" Synthesize all fragments using the cache """
self.log([u"Examining fragment %d (cache)...", num])
fragment_info = (fragment.language, fragment.filtered_text)
if self.cache.is_cached(fragment_info):
self.log(u"Fragment cached: retrieving audio data from cache")
# read data from file, whose path is in the cache
file_handler, file_path = self.cache.get(fragment_info)
self.log([u"Reading cached fragment at '%s'...", file_path])
succeeded, data = self._read_audio_data(file_path)
if not succeeded:
self.log_crit(u"An unexpected error occurred while reading cached audio file")
return (False, None)
self.log([u"Reading cached fragment at '%s'... done", file_path])
else:
self.log(u"Fragment not cached: synthesizing and caching")
# creating destination file
file_info = gf.tmp_file(suffix=u".cache.wav", root=self.rconf[RuntimeConfiguration.TMP_PATH])
file_handler, file_path = file_info
self.log([u"Synthesizing fragment to '%s'...", file_path])
# synthesize and get the duration of the output file
voice_code = self._language_to_voice_code(fragment.language)
self.log(u"Calling helper function")
succeeded, data = helper_function(
text=fragment.filtered_text,
voice_code=voice_code,
output_file_path=file_path,
return_audio_data=True
)
# check output
if not succeeded:
self.log_crit(u"An unexpected error occurred in helper_function")
return (False, None)
self.log([u"Synthesizing fragment to '%s'... done", file_path])
duration, sr_nu, enc_nu, samples = data
if duration > 0:
self.log(u"Fragment has > 0 duration, adding it to cache")
self.cache.add(fragment_info, file_info)
self.log(u"Added fragment to cache")
else:
self.log(u"Fragment has zero duration, not adding it to cache")
self.log([u"Closing file handler for cached output file path '%s'", file_path])
gf.close_file_handler(file_handler)
self.log([u"Examining fragment %d (cache)... done", num])
return (True, data)
|
Synthesize all fragments using the cache
|
def shortentext(text, minlength, placeholder='...'):
"""
Shorten some text by replacing the last part with a placeholder (such as '...')
:type text: string
:param text: The text to shorten
:type minlength: integer
:param minlength: The minimum length before a shortening will occur
:type placeholder: string
:param placeholder: The text to append after removing protruding text.
"""
return textwrap.shorten(text, minlength, placeholder=str(placeholder))
|
Shorten some text by replacing the last part with a placeholder (such as '...')
:type text: string
:param text: The text to shorten
:type minlength: integer
:param minlength: The minimum length before a shortening will occur
:type placeholder: string
:param placeholder: The text to append after removing protruding text.
|
def form_query(self, columns, options={}):
"""
:param str columns: literal sql string for list of columns
:param dict options: dict supporting a single key "direct" as in the constructor
:return: sql string
"""
from_cl = 'FROM'
direct = options.get('direct', self.direct)
if direct:
if columns != '*':
raise ProgrammingError("Column lists cannot be specified for a direct function call.")
columns = ''
from_cl = ''
if len(self.args) >= 1:
replace = ['%s' for x in range(len(self.args))]
func = "%s(" % self.query_base + ",".join(replace) + ")"
else:
func = "%s()" % self.query_base
return "SELECT %s %s %s" % (columns, from_cl, func)
|
:param str columns: literal sql string for list of columns
:param dict options: dict supporting a single key "direct" as in the constructor
:return: sql string
|
def __get_host(node, vm_):
'''
Return public IP, private IP, or hostname for the libcloud 'node' object
'''
if __get_ssh_interface(vm_) == 'private_ips' or vm_['external_ip'] is None:
ip_address = node.private_ips[0]
log.info('Salt node data. Private_ip: %s', ip_address)
else:
ip_address = node.public_ips[0]
log.info('Salt node data. Public_ip: %s', ip_address)
if ip_address:
return ip_address
return node.name
|
Return public IP, private IP, or hostname for the libcloud 'node' object
|
def shell():
'''
Return the default shell to use on this system
'''
# Provides:
# shell
if salt.utils.platform.is_windows():
env_var = 'COMSPEC'
default = r'C:\Windows\system32\cmd.exe'
else:
env_var = 'SHELL'
default = '/bin/sh'
return {'shell': os.environ.get(env_var, default)}
|
Return the default shell to use on this system
|
def calculate_dependencies():
"""Calculate test dependencies
First do a topological sorting based on the dependencies.
Then sort the different dependency groups based on priorities.
"""
order = []
for g in toposort(merge_dicts(dependencies, soft_dependencies)):
for t in sorted(g, key=lambda x: (priorities[x], x)):
order.append(t)
return order
|
Calculate test dependencies
First do a topological sorting based on the dependencies.
Then sort the different dependency groups based on priorities.
|
def Enumerate():
"""See base class."""
# Init a HID manager
hid_mgr = iokit.IOHIDManagerCreate(None, None)
if not hid_mgr:
raise errors.OsHidError('Unable to obtain HID manager reference')
iokit.IOHIDManagerSetDeviceMatching(hid_mgr, None)
# Get devices from HID manager
device_set_ref = iokit.IOHIDManagerCopyDevices(hid_mgr)
if not device_set_ref:
raise errors.OsHidError('Failed to obtain devices from HID manager')
num = iokit.CFSetGetCount(device_set_ref)
devices = (IO_HID_DEVICE_REF * num)()
iokit.CFSetGetValues(device_set_ref, devices)
# Retrieve and build descriptor dictionaries for each device
descriptors = []
for dev in devices:
d = base.DeviceDescriptor()
d.vendor_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_VENDOR_ID)
d.product_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_PRODUCT_ID)
d.product_string = GetDeviceStringProperty(dev,
HID_DEVICE_PROPERTY_PRODUCT)
d.usage = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_PRIMARY_USAGE)
d.usage_page = GetDeviceIntProperty(
dev, HID_DEVICE_PROPERTY_PRIMARY_USAGE_PAGE)
d.report_id = GetDeviceIntProperty(dev, HID_DEVICE_PROPERTY_REPORT_ID)
d.path = GetDevicePath(dev)
descriptors.append(d.ToPublicDict())
# Clean up CF objects
cf.CFRelease(device_set_ref)
cf.CFRelease(hid_mgr)
return descriptors
|
See base class.
|
def delete_template(self, temp_id=None, params={}, callback=None, **kwargs):
"""
Delete a search template.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg temp_id: Template ID
"""
url = self.mk_url(*['_search', 'template', temp_id])
self.client.fetch(
self.mk_req(url, method='DELETE', **kwargs),
callback = callback
)
|
Delete a search template.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg temp_id: Template ID
|
def text2lm(text, output_file, vocab_file=None, text2idngram_kwargs={}, idngram2lm_kwargs={}):
"""
Convienience function to directly convert text (and vocabulary) into a language model.
"""
if vocab_file:
used_vocab_file = vocab_file
else:
# Create temporary vocab file
with tempfile.NamedTemporaryFile(suffix='.vocab', delete=False) as f:
used_vocab_file = f.name
text2vocab(text, used_vocab_file)
# Create temporary idngram file
with tempfile.NamedTemporaryFile(suffix='.idngram', delete=False) as f:
idngram_file = f.name
try:
output1 = text2idngram(text, vocab_file=used_vocab_file, output_file=idngram_file, **text2idngram_kwargs)
output2 = idngram2lm(idngram_file, vocab_file=used_vocab_file, output_file=output_file, **idngram2lm_kwargs)
except ConversionError:
output = (None, None)
raise
else:
output = (output1, output2)
finally:
# Remove temporary files
if not vocab_file:
os.remove(used_vocab_file)
os.remove(idngram_file)
return output
|
Convienience function to directly convert text (and vocabulary) into a language model.
|
def oneshot(self, query, **params):
"""Run a oneshot search and returns a streaming handle to the results.
The ``InputStream`` object streams XML fragments from the server. To
parse this stream into usable Python objects,
pass the handle to :class:`splunklib.results.ResultsReader`::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
rr = results.ResultsReader(service.jobs.oneshot("search * | head 5"))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
assert rr.is_preview == False
The ``oneshot`` method makes a single roundtrip to the server (as opposed
to two for :meth:`create` followed by :meth:`results`), plus at most two more
if the ``autologin`` field of :func:`connect` is set to ``True``.
:raises ValueError: Raised for invalid queries.
:param query: The search query.
:type query: ``string``
:param params: Additional arguments (optional):
- "output_mode": Specifies the output format of the results (XML,
JSON, or CSV).
- "earliest_time": Specifies the earliest time in the time range to
search. The time string can be a UTC time (with fractional seconds),
a relative time specifier (to now), or a formatted time string.
- "latest_time": Specifies the latest time in the time range to
search. The time string can be a UTC time (with fractional seconds),
a relative time specifier (to now), or a formatted time string.
- "rf": Specifies one or more fields to add to the search.
:type params: ``dict``
:return: The ``InputStream`` IO handle to raw XML returned from the server.
"""
if "exec_mode" in params:
raise TypeError("Cannot specify an exec_mode to oneshot.")
params['segmentation'] = params.get('segmentation', 'none')
return self.post(search=query,
exec_mode="oneshot",
**params).body
|
Run a oneshot search and returns a streaming handle to the results.
The ``InputStream`` object streams XML fragments from the server. To
parse this stream into usable Python objects,
pass the handle to :class:`splunklib.results.ResultsReader`::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
rr = results.ResultsReader(service.jobs.oneshot("search * | head 5"))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
assert rr.is_preview == False
The ``oneshot`` method makes a single roundtrip to the server (as opposed
to two for :meth:`create` followed by :meth:`results`), plus at most two more
if the ``autologin`` field of :func:`connect` is set to ``True``.
:raises ValueError: Raised for invalid queries.
:param query: The search query.
:type query: ``string``
:param params: Additional arguments (optional):
- "output_mode": Specifies the output format of the results (XML,
JSON, or CSV).
- "earliest_time": Specifies the earliest time in the time range to
search. The time string can be a UTC time (with fractional seconds),
a relative time specifier (to now), or a formatted time string.
- "latest_time": Specifies the latest time in the time range to
search. The time string can be a UTC time (with fractional seconds),
a relative time specifier (to now), or a formatted time string.
- "rf": Specifies one or more fields to add to the search.
:type params: ``dict``
:return: The ``InputStream`` IO handle to raw XML returned from the server.
|
def on(self, left_speed, right_speed):
"""
Start rotating the motors according to ``left_speed`` and ``right_speed`` forever.
Speeds can be percentages or any SpeedValue implementation.
"""
(left_speed_native_units, right_speed_native_units) = self._unpack_speeds_to_native_units(left_speed, right_speed)
# Set all parameters
self.left_motor.speed_sp = int(round(left_speed_native_units))
self.right_motor.speed_sp = int(round(right_speed_native_units))
# This debug involves disk I/O to pull speed_sp so only uncomment
# if you need to troubleshoot in more detail.
# log.debug("%s: on at left-speed %s, right-speed %s" %
# (self, self.left_motor.speed_sp, self.right_motor.speed_sp))
# Start the motors
self.left_motor.run_forever()
self.right_motor.run_forever()
|
Start rotating the motors according to ``left_speed`` and ``right_speed`` forever.
Speeds can be percentages or any SpeedValue implementation.
|
def get_nni_installation_path():
''' Find nni lib from the following locations in order
Return nni root directory if it exists
'''
def try_installation_path_sequentially(*sitepackages):
'''Try different installation path sequentially util nni is found.
Return None if nothing is found
'''
def _generate_installation_path(sitepackages_path):
python_dir = get_python_dir(sitepackages_path)
entry_file = os.path.join(python_dir, 'nni', 'main.js')
if os.path.isfile(entry_file):
return python_dir
return None
for sitepackage in sitepackages:
python_dir = _generate_installation_path(sitepackage)
if python_dir:
return python_dir
return None
if os.getenv('VIRTUAL_ENV'):
# if 'virtualenv' package is used, `site` has not attr getsitepackages, so we will instead use VIRTUAL_ENV
# Note that conda venv will not have VIRTUAL_ENV
python_dir = os.getenv('VIRTUAL_ENV')
else:
python_sitepackage = site.getsitepackages()[0]
# If system-wide python is used, we will give priority to using `local sitepackage`--"usersitepackages()" given that nni exists there
if python_sitepackage.startswith('/usr') or python_sitepackage.startswith('/Library'):
python_dir = try_installation_path_sequentially(site.getusersitepackages(), site.getsitepackages()[0])
else:
python_dir = try_installation_path_sequentially(site.getsitepackages()[0], site.getusersitepackages())
if python_dir:
entry_file = os.path.join(python_dir, 'nni', 'main.js')
if os.path.isfile(entry_file):
return os.path.join(python_dir, 'nni')
print_error('Fail to find nni under python library')
exit(1)
|
Find nni lib from the following locations in order
Return nni root directory if it exists
|
def unlock(self):
"""Unlock a mutex. If the queue is not empty, call the next
function with its argument."""
if self.queue:
function, argument = self.queue.popleft()
function(argument)
else:
self.locked = False
|
Unlock a mutex. If the queue is not empty, call the next
function with its argument.
|
def apply_args(job, inputs, optional_inputs=None):
"""
This function is error checking before the job gets
updated.
:param job: Must be a valid job
:param inputs: Must be a tuple type
:param optional_inputs: optional for OptionalInputs
:return: job
"""
_apply_args_loop(job, inputs, INPUT_FIELD)
_apply_args_loop(job, optional_inputs, OPTIONAL_FIELD)
return job
|
This function is error checking before the job gets
updated.
:param job: Must be a valid job
:param inputs: Must be a tuple type
:param optional_inputs: optional for OptionalInputs
:return: job
|
def get_resource_bin_session(self, proxy):
"""Gets the session for retrieving resource to bin mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.resource.ResourceBinSession) - a
``ResourceBinSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_resource_bin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_resource_bin()`` is ``true``.*
"""
if not self.supports_resource_bin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ResourceBinSession(proxy=proxy, runtime=self._runtime)
|
Gets the session for retrieving resource to bin mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.resource.ResourceBinSession) - a
``ResourceBinSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_resource_bin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_resource_bin()`` is ``true``.*
|
def _bin_op(instance, opnode, op, other, context, reverse=False):
"""Get an inference callable for a normal binary operation.
If *reverse* is True, then the reflected method will be used instead.
"""
if reverse:
method_name = protocols.REFLECTED_BIN_OP_METHOD[op]
else:
method_name = protocols.BIN_OP_METHOD[op]
return functools.partial(
_invoke_binop_inference,
instance=instance,
op=op,
opnode=opnode,
other=other,
context=context,
method_name=method_name,
)
|
Get an inference callable for a normal binary operation.
If *reverse* is True, then the reflected method will be used instead.
|
def bounded_by_sigmas(self, sigmas=3, square=False):
"""Returns a bounded subspace (SubspaceBounded) with limits given by Subspace.limits_sigma()
:rtype: SubspaceBounded
"""
bounds = self.limits_sigma(sigmas=sigmas, square=square)
return SubspaceBounded(self, bounds)
|
Returns a bounded subspace (SubspaceBounded) with limits given by Subspace.limits_sigma()
:rtype: SubspaceBounded
|
def get_conn(self, urlparsed=None):
"""Returns an HTTPConnection based on the urlparse result given or the
default Swift cluster (internal url) urlparse result.
:param urlparsed: The result from urlparse.urlparse or None to use the
default Swift cluster's value
"""
if not urlparsed:
urlparsed = self.dsc_parsed2
if urlparsed.scheme == 'http':
return HTTPConnection(urlparsed.netloc)
else:
return HTTPSConnection(urlparsed.netloc)
|
Returns an HTTPConnection based on the urlparse result given or the
default Swift cluster (internal url) urlparse result.
:param urlparsed: The result from urlparse.urlparse or None to use the
default Swift cluster's value
|
def estimate(s1, s2):
"""
Estimate the spacial relationship by
examining the position of the bounding boxes.
Parameters
----------
s1 : HandwrittenData
s2 : HandwrittenData
Returns
-------
dict of probabilities
{'bottom': 0.1,
'subscript': 0.2,
'right': 0.3,
'superscript': 0.3,
'top': 0.1}
"""
s1bb = s1.get_bounding_box()
s2bb = s2.get_bounding_box()
total_area = ((s2bb['maxx'] - s2bb['minx']+1) *
(s2bb['maxy'] - s2bb['miny']+1))
total_area = float(total_area)
top_area = 0.0
superscript_area = 0.0
right_area = 0.0
subscript_area = 0.0
bottom_area = 0.0
# bottom
if s2bb['maxy'] > s1bb['maxy'] and s2bb['minx'] < s1bb['maxx']:
miny = max(s2bb['miny'], s1bb['maxy'])
maxy = s2bb['maxy']
minx = max(s2bb['minx'], s1bb['minx'])
maxx = min(s2bb['maxx'], s1bb['maxx'])
bottom_area = float((maxx-minx)*(maxy-miny))
# Subscript
if s2bb['maxy'] > s1bb['maxy'] and s2bb['maxx'] > s1bb['maxx']:
miny = max(s2bb['miny'], s1bb['maxy'])
maxy = s2bb['maxy']
minx = max(s2bb['minx'], s1bb['maxx'])
maxx = s2bb['maxx']
subscript_area = (maxx-minx)*(maxy-miny)
# right
if s2bb['miny'] < s1bb['maxy'] and s2bb['maxy'] > s1bb['miny'] \
and s2bb['maxx'] > s1bb['maxx']:
miny = max(s1bb['miny'], s2bb['miny'])
maxy = min(s1bb['maxy'], s2bb['maxy'])
minx = max(s1bb['maxx'], s2bb['minx'])
maxx = s2bb['maxx']
right_area = (maxx-minx)*(maxy-miny)
# superscript
if s2bb['miny'] < s1bb['miny'] and s2bb['maxx'] > s1bb['maxx']:
miny = s2bb['miny']
maxy = min(s1bb['miny'], s2bb['maxy'])
minx = max(s1bb['maxx'], s2bb['minx'])
maxx = s2bb['maxx']
superscript_area = (maxx-minx)*(maxy-miny)
# top
if s2bb['miny'] < s1bb['miny'] and s2bb['minx'] < s1bb['maxx']:
miny = s2bb['miny']
maxy = min(s1bb['miny'], s2bb['maxy'])
minx = max(s1bb['minx'], s2bb['minx'])
maxx = min(s1bb['maxx'], s2bb['maxx'])
top_area = (maxx-minx)*(maxy-miny)
return {'bottom': bottom_area/total_area,
'subscript': subscript_area/total_area,
'right': right_area/total_area,
'superscript': superscript_area/total_area,
'top': top_area/total_area}
|
Estimate the spacial relationship by
examining the position of the bounding boxes.
Parameters
----------
s1 : HandwrittenData
s2 : HandwrittenData
Returns
-------
dict of probabilities
{'bottom': 0.1,
'subscript': 0.2,
'right': 0.3,
'superscript': 0.3,
'top': 0.1}
|
def imbalance_metrics(data):
""" Computes imbalance metric for a given dataset.
Imbalance metric is equal to 0 when a dataset is perfectly balanced (i.e. number of in each class is exact).
:param data : pandas.DataFrame
A dataset in a panda's data frame
:returns int
A value of imbalance metric, where zero means that the dataset is perfectly balanced and the higher the value, the more imbalanced the dataset.
"""
if not data:
return 0
#imb - shows measure of inbalance within a dataset
imb = 0
num_classes=float(len(Counter(data)))
for x in Counter(data).values():
p_x = float(x)/len(data)
if p_x > 0:
imb += (p_x - 1/num_classes)*(p_x - 1/num_classes)
#worst case scenario: all but 1 examplars in 1st class, the remaining one in 2nd class
worst_case=(num_classes-1)*pow(1/num_classes,2) + pow(1-1/num_classes,2)
return (num_classes,imb/worst_case)
|
Computes imbalance metric for a given dataset.
Imbalance metric is equal to 0 when a dataset is perfectly balanced (i.e. number of in each class is exact).
:param data : pandas.DataFrame
A dataset in a panda's data frame
:returns int
A value of imbalance metric, where zero means that the dataset is perfectly balanced and the higher the value, the more imbalanced the dataset.
|
def get_indicator(self, resource):
"""Return the modification time and size of a `Resource`."""
path = resource.real_path
# on dos, mtime does not change for a folder when files are added
if os.name != 'posix' and os.path.isdir(path):
return (os.path.getmtime(path),
len(os.listdir(path)),
os.path.getsize(path))
return (os.path.getmtime(path),
os.path.getsize(path))
|
Return the modification time and size of a `Resource`.
|
def j0(x, context=None):
"""
Return the value of the first kind Bessel function of order 0 at x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_j0,
(BigFloat._implicit_convert(x),),
context,
)
|
Return the value of the first kind Bessel function of order 0 at x.
|
async def data(
self, message: Union[str, bytes], timeout: DefaultNumType = _default
) -> SMTPResponse:
"""
Send an SMTP DATA command, followed by the message given.
This method transfers the actual email content to the server.
:raises SMTPDataError: on unexpected server response code
:raises SMTPServerDisconnected: connection lost
"""
await self._ehlo_or_helo_if_needed()
# As data accesses protocol directly, some handling is required
self._raise_error_if_disconnected()
if timeout is _default:
timeout = self.timeout # type: ignore
if isinstance(message, str):
message = message.encode("ascii")
async with self._command_lock:
start_response = await self.execute_command(b"DATA", timeout=timeout)
if start_response.code != SMTPStatus.start_input:
raise SMTPDataError(start_response.code, start_response.message)
try:
await self.protocol.write_message_data( # type: ignore
message, timeout=timeout
)
response = await self.protocol.read_response( # type: ignore
timeout=timeout
)
except SMTPServerDisconnected as exc:
self.close()
raise exc
if response.code != SMTPStatus.completed:
raise SMTPDataError(response.code, response.message)
return response
|
Send an SMTP DATA command, followed by the message given.
This method transfers the actual email content to the server.
:raises SMTPDataError: on unexpected server response code
:raises SMTPServerDisconnected: connection lost
|
def cmd_slow_requests(self):
"""List all requests that took a certain amount of time to be
processed.
.. warning::
By now hardcoded to 1 second (1000 milliseconds), improve the
command line interface to allow to send parameters to each command
or globally.
"""
slow_requests = [
line.time_wait_response
for line in self._valid_lines
if line.time_wait_response > 1000
]
return slow_requests
|
List all requests that took a certain amount of time to be
processed.
.. warning::
By now hardcoded to 1 second (1000 milliseconds), improve the
command line interface to allow to send parameters to each command
or globally.
|
def objective_fun(theta, hamiltonian=None,
quantum_resource=QVMConnection(sync_endpoint='http://localhost:5000')):
"""
Evaluate the Hamiltonian bny operator averaging
:param theta:
:param hamiltonian:
:return:
"""
if hamiltonian is None:
# Hamiltonian is Identity
return 1.0
if isinstance(hamiltonian, PauliSum):
result = estimate_locally_commuting_operator(ucc_circuit(theta), hamiltonian,
1.0E-6, quantum_resource=quantum_resource)
result = result[0][0].real # first output is expected value, second is variance, third is shots
elif isinstance(hamiltonian, np.ndarray) and isinstance(quantum_resource, QVMConnection):
wf = quantum_resource.wavefunction(ucc_circuit(theta))
wf = wf.amplitudes.reshape((-1, 1))
result = np.conj(wf).T.dot(hamiltonian).dot(wf)[0, 0].real
print(result)
else:
raise TypeError("type of hamiltonian or qvm is unrecognized")
return result
|
Evaluate the Hamiltonian bny operator averaging
:param theta:
:param hamiltonian:
:return:
|
def _pos(self, idx):
"""Convert an index into a pair (alpha, beta) that can be used to access
the corresponding _lists[alpha][beta] position.
Most queries require the index be built. Details of the index are
described in self._build_index.
Indexing requires traversing the tree to a leaf node. Each node has
two children which are easily computable. Given an index, pos, the
left-child is at pos * 2 + 1 and the right-child is at pos * 2 + 2.
When the index is less than the left-child, traversal moves to the
left sub-tree. Otherwise, the index is decremented by the left-child
and traversal moves to the right sub-tree.
At a child node, the indexing pair is computed from the relative
position of the child node as compared with the offset and the remaining
index.
For example, using the index from self._build_index:
_index = 14 5 9 3 2 4 5
_offset = 3
Tree:
14
5 9
3 2 4 5
Indexing position 8 involves iterating like so:
1. Starting at the root, position 0, 8 is compared with the left-child
node (5) which it is greater than. When greater the index is
decremented and the position is updated to the right child node.
2. At node 9 with index 3, we again compare the index to the left-child
node with value 4. Because the index is the less than the left-child
node, we simply traverse to the left.
3. At node 4 with index 3, we recognize that we are at a leaf node and
stop iterating.
4. To compute the sublist index, we subtract the offset from the index
of the leaf node: 5 - 3 = 2. To compute the index in the sublist, we
simply use the index remaining from iteration. In this case, 3.
The final index pair from our example is (2, 3) which corresponds to
index 8 in the sorted list.
"""
if idx < 0:
last_len = len(self._lists[-1])
if (-idx) <= last_len:
return len(self._lists) - 1, last_len + idx
idx += self._len
if idx < 0:
raise IndexError('list index out of range')
elif idx >= self._len:
raise IndexError('list index out of range')
if idx < len(self._lists[0]):
return 0, idx
_index = self._index
if not _index:
self._build_index()
pos = 0
child = 1
len_index = len(_index)
while child < len_index:
index_child = _index[child]
if idx < index_child:
pos = child
else:
idx -= index_child
pos = child + 1
child = (pos << 1) + 1
return (pos - self._offset, idx)
|
Convert an index into a pair (alpha, beta) that can be used to access
the corresponding _lists[alpha][beta] position.
Most queries require the index be built. Details of the index are
described in self._build_index.
Indexing requires traversing the tree to a leaf node. Each node has
two children which are easily computable. Given an index, pos, the
left-child is at pos * 2 + 1 and the right-child is at pos * 2 + 2.
When the index is less than the left-child, traversal moves to the
left sub-tree. Otherwise, the index is decremented by the left-child
and traversal moves to the right sub-tree.
At a child node, the indexing pair is computed from the relative
position of the child node as compared with the offset and the remaining
index.
For example, using the index from self._build_index:
_index = 14 5 9 3 2 4 5
_offset = 3
Tree:
14
5 9
3 2 4 5
Indexing position 8 involves iterating like so:
1. Starting at the root, position 0, 8 is compared with the left-child
node (5) which it is greater than. When greater the index is
decremented and the position is updated to the right child node.
2. At node 9 with index 3, we again compare the index to the left-child
node with value 4. Because the index is the less than the left-child
node, we simply traverse to the left.
3. At node 4 with index 3, we recognize that we are at a leaf node and
stop iterating.
4. To compute the sublist index, we subtract the offset from the index
of the leaf node: 5 - 3 = 2. To compute the index in the sublist, we
simply use the index remaining from iteration. In this case, 3.
The final index pair from our example is (2, 3) which corresponds to
index 8 in the sorted list.
|
def convert_to_node(instance, xml_node: XmlNode, node_globals: InheritedDict = None)\
-> InstanceNode:
'''Wraps passed instance with InstanceNode'''
return InstanceNode(instance, xml_node, node_globals)
|
Wraps passed instance with InstanceNode
|
def get_items_for_config_file_output(self, source_to_settings,
parsed_namespace):
"""Converts the given settings back to a dictionary that can be passed
to ConfigFormatParser.serialize(..).
Args:
source_to_settings: the dictionary described in parse_known_args()
parsed_namespace: namespace object created within parse_known_args()
Returns:
an OrderedDict where keys are strings and values are either strings
or lists
"""
config_file_items = OrderedDict()
for source, settings in source_to_settings.items():
if source == _COMMAND_LINE_SOURCE_KEY:
_, existing_command_line_args = settings['']
for action in self._actions:
config_file_keys = self.get_possible_config_keys(action)
if config_file_keys and not action.is_positional_arg and \
already_on_command_line(existing_command_line_args,
action.option_strings):
value = getattr(parsed_namespace, action.dest, None)
if value is not None:
if isinstance(value, bool):
value = str(value).lower()
config_file_items[config_file_keys[0]] = value
elif source == _ENV_VAR_SOURCE_KEY:
for key, (action, value) in settings.items():
config_file_keys = self.get_possible_config_keys(action)
if config_file_keys:
value = getattr(parsed_namespace, action.dest, None)
if value is not None:
config_file_items[config_file_keys[0]] = value
elif source.startswith(_CONFIG_FILE_SOURCE_KEY):
for key, (action, value) in settings.items():
config_file_items[key] = value
elif source == _DEFAULTS_SOURCE_KEY:
for key, (action, value) in settings.items():
config_file_keys = self.get_possible_config_keys(action)
if config_file_keys:
value = getattr(parsed_namespace, action.dest, None)
if value is not None:
config_file_items[config_file_keys[0]] = value
return config_file_items
|
Converts the given settings back to a dictionary that can be passed
to ConfigFormatParser.serialize(..).
Args:
source_to_settings: the dictionary described in parse_known_args()
parsed_namespace: namespace object created within parse_known_args()
Returns:
an OrderedDict where keys are strings and values are either strings
or lists
|
def decode_struct_fields(self, ins, fields, obj):
"""
Args:
ins: An instance of the class representing the data type being decoded.
The object will have its fields set.
fields: A tuple of (field_name: str, field_validator: Validator)
obj (dict): JSON-compatible dict that is being decoded.
strict (bool): See :func:`json_compat_obj_decode`.
Returns:
None: `ins` has its fields set based on the contents of `obj`.
"""
for name, field_data_type in fields:
if name in obj:
try:
v = self.json_compat_obj_decode_helper(field_data_type, obj[name])
setattr(ins, name, v)
except bv.ValidationError as e:
e.add_parent(name)
raise
elif field_data_type.has_default():
setattr(ins, name, field_data_type.get_default())
|
Args:
ins: An instance of the class representing the data type being decoded.
The object will have its fields set.
fields: A tuple of (field_name: str, field_validator: Validator)
obj (dict): JSON-compatible dict that is being decoded.
strict (bool): See :func:`json_compat_obj_decode`.
Returns:
None: `ins` has its fields set based on the contents of `obj`.
|
def get_data_file_attachment(self, identifier, resource_id):
"""Get path to attached data file with given resource identifer. If no
data file with given id exists the result will be None.
Raise ValueError if an image archive with the given resource identifier
is attached to the model run instead of a data file.
Parameters
----------
identifier : string
Unique model run identifier
resource_id : string
Unique attachment identifier
Returns
-------
string, string
Path to attached data file on disk and attachments MIME type
"""
# Get model run to ensure that it exists. If not return None
model_run = self.get_object(identifier)
if model_run is None:
return None, None
# Ensure that attachment with given resource identifier exists.
if not resource_id in model_run.attachments:
return None, None
# Raise an exception if the attached resource is not a data file
attachment = model_run.attachments[resource_id]
filename = os.path.join(model_run.attachment_directory, resource_id)
return filename, attachment.mime_type
|
Get path to attached data file with given resource identifer. If no
data file with given id exists the result will be None.
Raise ValueError if an image archive with the given resource identifier
is attached to the model run instead of a data file.
Parameters
----------
identifier : string
Unique model run identifier
resource_id : string
Unique attachment identifier
Returns
-------
string, string
Path to attached data file on disk and attachments MIME type
|
def update(ctx, migrate=False):
'''Perform a development update'''
msg = 'Update all dependencies'
if migrate:
msg += ' and migrate data'
header(msg)
info('Updating Python dependencies')
lrun('pip install -r requirements/develop.pip')
lrun('pip install -e .')
info('Updating JavaScript dependencies')
lrun('npm install')
if migrate:
info('Migrating database')
lrun('udata db migrate')
|
Perform a development update
|
def regularpage(foldername=None, pagename=None):
"""
Route not found by the other routes above. May point to a static template.
"""
if foldername is None and pagename is None:
raise ExperimentError('page_not_found')
if foldername is None and pagename is not None:
return render_template(pagename)
else:
return render_template(foldername+"/"+pagename)
|
Route not found by the other routes above. May point to a static template.
|
def db_putString(self, db_name, key, value):
"""https://github.com/ethereum/wiki/wiki/JSON-RPC#db_putstring
DEPRECATED
"""
warnings.warn('deprecated', DeprecationWarning)
return (yield from self.rpc_call('db_putString',
[db_name, key, value]))
|
https://github.com/ethereum/wiki/wiki/JSON-RPC#db_putstring
DEPRECATED
|
def find_editor() -> str:
"""Find a reasonable editor to use by default for the system that the cmd2 application is running on."""
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform[:3] == 'win':
editor = 'notepad'
else:
# Favor command-line editors first so we don't leave the terminal to edit
for editor in ['vim', 'vi', 'emacs', 'nano', 'pico', 'gedit', 'kate', 'subl', 'geany', 'atom']:
if which(editor):
break
return editor
|
Find a reasonable editor to use by default for the system that the cmd2 application is running on.
|
def load_genomic_CDR3_anchor_pos_and_functionality(anchor_pos_file_name):
"""Read anchor position and functionality from file.
Parameters
----------
anchor_pos_file_name : str
File name for the functionality and position of a conserved residue
that defines the CDR3 region for each V or J germline sequence.
Returns
-------
anchor_pos_and_functionality : dict
Residue anchor position and functionality for each gene/allele.
"""
anchor_pos_and_functionality = {}
anchor_pos_file = open(anchor_pos_file_name, 'r')
first_line = True
for line in anchor_pos_file:
if first_line:
first_line = False
continue
split_line = line.split(',')
split_line = [x.strip() for x in split_line]
anchor_pos_and_functionality[split_line[0]] = [int(split_line[1]), split_line[2].strip().strip('()')]
return anchor_pos_and_functionality
|
Read anchor position and functionality from file.
Parameters
----------
anchor_pos_file_name : str
File name for the functionality and position of a conserved residue
that defines the CDR3 region for each V or J germline sequence.
Returns
-------
anchor_pos_and_functionality : dict
Residue anchor position and functionality for each gene/allele.
|
def _process_file(self):
'''Process rebase file into dict with name and cut site information.'''
print 'Processing file'
with open(self._rebase_file, 'r') as f:
raw = f.readlines()
names = [line.strip()[3:] for line in raw if line.startswith('<1>')]
seqs = [line.strip()[3:] for line in raw if line.startswith('<5>')]
if len(names) != len(seqs):
raise Exception('Found different number of enzyme names and '
'sequences.')
self._enzyme_dict = {}
for name, seq in zip(names, seqs):
if '?' in seq:
# Is unknown sequence, don't keep it
pass
elif seq.startswith('(') and seq.endswith(')'):
# Has four+ cut sites, don't keep it
pass
elif '^' in seq:
# Has reasonable internal cut sites, keep it
top_cut = seq.index('^')
bottom_cut = len(seq) - top_cut - 1
site = seq.replace('^', '')
self._enzyme_dict[name] = (site, (top_cut, bottom_cut))
elif seq.endswith(')'):
# Has reasonable external cut sites, keep it
# (4-cutter also starts with '(')
# separate site and cut locations
site, cuts = seq.split('(')
cuts = cuts.replace(')', '')
top_cut, bottom_cut = [int(x) + len(site) for x in
cuts.split('/')]
self._enzyme_dict[name] = (site, (top_cut, bottom_cut))
shutil.rmtree(self._tmpdir)
|
Process rebase file into dict with name and cut site information.
|
def add_additional_options(cls, parser):
"""
Override in subclass if required.
"""
group = OptionGroup(parser, "Target Engine Options",
"These options are not required, but may be "
"provided if a specific "
"BPMN application engine is targeted.")
group.add_option("-e", "--target-engine", dest="target_engine",
help="target the specified BPMN application engine")
group.add_option(
"-t", "--target-version", dest="target_engine_version",
help="target the specified version of the BPMN application engine")
parser.add_option_group(group)
|
Override in subclass if required.
|
def read_stat():
"""
Returns the system stat information.
:returns: The system stat information.
:rtype: list
"""
data = []
with open("/proc/stat", "rb") as stat_file:
for line in stat_file:
cpu_stat = line.split()
if cpu_stat[0][:3] != b"cpu":
break
# First cpu line is aggregation of following lines, skip it
if len(cpu_stat[0]) == 3:
continue
data.append(
{
"times": {
"user": int(cpu_stat[1]),
"nice": int(cpu_stat[2]),
"sys": int(cpu_stat[3]),
"idle": int(cpu_stat[4]),
"irq": int(cpu_stat[6]),
}
}
)
return data
|
Returns the system stat information.
:returns: The system stat information.
:rtype: list
|
def project_closed(self, project):
"""
Called when a project is closed.
:param project: Project instance
"""
yield from super().project_closed(project)
hdd_files_to_close = yield from self._find_inaccessible_hdd_files()
for hdd_file in hdd_files_to_close:
log.info("Closing VirtualBox VM disk file {}".format(os.path.basename(hdd_file)))
try:
yield from self.execute("closemedium", ["disk", hdd_file])
except VirtualBoxError as e:
log.warning("Could not close VirtualBox VM disk file {}: {}".format(os.path.basename(hdd_file), e))
continue
|
Called when a project is closed.
:param project: Project instance
|
def sha_github_file(cls, config, repo_file, repository_api, repository_branch):
""" Return the GitHub SHA for a file in the repository """
repo_file_sha = None
cfg = config.get_conf()
github_token = cfg['sortinghat']['identities_api_token']
headers = {"Authorization": "token " + github_token}
url_dir = repository_api + "/git/trees/" + repository_branch
logger.debug("Gettting sha data from tree: %s", url_dir)
raw_repo_file_info = requests.get(url_dir, headers=headers)
raw_repo_file_info.raise_for_status()
for rfile in raw_repo_file_info.json()['tree']:
if rfile['path'] == repo_file:
logger.debug("SHA found: %s, ", rfile["sha"])
repo_file_sha = rfile["sha"]
break
return repo_file_sha
|
Return the GitHub SHA for a file in the repository
|
def parse_param_signature(sig):
""" Parse a parameter signature of the form: type name (= default)? """
match = PARAM_SIG_RE.match(sig.strip())
if not match:
raise RuntimeError('Parameter signature invalid, got ' + sig)
groups = match.groups()
modifiers = groups[0].split()
typ, name, _, default = groups[-4:]
return ParamTuple(name=name, typ=typ,
default=default, modifiers=modifiers)
|
Parse a parameter signature of the form: type name (= default)?
|
def nz(value, none_value, strict=True):
''' This function is named after an old VBA function. It returns a default
value if the passed in value is None. If strict is False it will
treat an empty string as None as well.
example:
x = None
nz(x,"hello")
--> "hello"
nz(x,"")
--> ""
y = ""
nz(y,"hello")
--> ""
nz(y,"hello", False)
--> "hello" '''
if not DEBUG:
debug = False
else:
debug = False
if debug: print("START nz frameworkutilities.py ----------------------\n")
if value is None and strict:
return_val = none_value
elif strict and value is not None:
return_val = value
elif not strict and not is_not_null(value):
return_val = none_value
else:
return_val = value
if debug: print("value: %s | none_value: %s | return_val: %s" %
(value, none_value, return_val))
if debug: print("END nz frameworkutilities.py ----------------------\n")
return return_val
|
This function is named after an old VBA function. It returns a default
value if the passed in value is None. If strict is False it will
treat an empty string as None as well.
example:
x = None
nz(x,"hello")
--> "hello"
nz(x,"")
--> ""
y = ""
nz(y,"hello")
--> ""
nz(y,"hello", False)
--> "hello"
|
def get_imap_capabilities(server):
"""
Returns a list of an IMAP server's capabilities
Args:
server (imapclient.IMAPClient): An instance of imapclient.IMAPClient
Returns (list): A list of capabilities
"""
capabilities = list(map(str, list(server.capabilities())))
for i in range(len(capabilities)):
capabilities[i] = str(capabilities[i]).replace("b'",
"").replace("'",
"")
logger.debug("IMAP server supports: {0}".format(capabilities))
return capabilities
|
Returns a list of an IMAP server's capabilities
Args:
server (imapclient.IMAPClient): An instance of imapclient.IMAPClient
Returns (list): A list of capabilities
|
def quantile_for_single_value(self, **kwargs):
"""Returns quantile of each column or row.
Returns:
A new QueryCompiler object containing the quantile of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().quantile_for_single_value(**kwargs)
axis = kwargs.get("axis", 0)
q = kwargs.get("q", 0.5)
assert type(q) is float
def quantile_builder(df, **kwargs):
try:
return pandas.DataFrame.quantile(df, **kwargs)
except ValueError:
return pandas.Series()
func = self._build_mapreduce_func(quantile_builder, **kwargs)
result = self._full_axis_reduce(axis, func)
if axis == 0:
result.index = [q]
else:
result.columns = [q]
return result
|
Returns quantile of each column or row.
Returns:
A new QueryCompiler object containing the quantile of each column or row.
|
def install_package(tar_url, folder, md5_url='{tar_url}.md5',
on_download=lambda: None, on_complete=lambda: None):
"""
Install or update a tar package that has an md5
Args:
tar_url (str): URL of package to download
folder (str): Location to extract tar. Will be created if doesn't exist
md5_url (str): URL of md5 to use to check for updates
on_download (Callable): Function that gets called when downloading a new update
on_complete (Callable): Function that gets called when a new download is complete
Returns:
bool: Whether the package was updated
"""
data_file = join(folder, basename(tar_url))
md5_url = md5_url.format(tar_url=tar_url)
try:
remote_md5 = download(md5_url).decode('utf-8').split(' ')[0]
except (UnicodeDecodeError, URLError):
raise ValueError('Invalid MD5 url: ' + md5_url)
if remote_md5 != calc_md5(data_file):
on_download()
if isfile(data_file):
try:
with tarfile.open(data_file) as tar:
for i in reversed(list(tar)):
try:
os.remove(join(folder, i.path))
except OSError:
pass
except (OSError, EOFError):
pass
download_extract_tar(tar_url, folder, data_file)
on_complete()
if remote_md5 != calc_md5(data_file):
raise ValueError('MD5 url does not match tar: ' + md5_url)
return True
return False
|
Install or update a tar package that has an md5
Args:
tar_url (str): URL of package to download
folder (str): Location to extract tar. Will be created if doesn't exist
md5_url (str): URL of md5 to use to check for updates
on_download (Callable): Function that gets called when downloading a new update
on_complete (Callable): Function that gets called when a new download is complete
Returns:
bool: Whether the package was updated
|
def get(self, reference, country, target=datetime.date.today()):
"""
Get the inflation/deflation value change for the target date based
on the reference date. Target defaults to today and the instance's
reference and country will be used if they are not provided as
parameters
"""
# Set country & reference to object's country & reference respectively
reference = self.reference if reference is None else reference
# Get the reference and target indices (values) from the source
reference_value = self.data.get(reference, country).value
target_value = self.data.get(target, country).value
# Compute the inflation value and return it
return self._compute_inflation(target_value, reference_value)
|
Get the inflation/deflation value change for the target date based
on the reference date. Target defaults to today and the instance's
reference and country will be used if they are not provided as
parameters
|
def ext(self):
"""Return the file extension for this video, e.g. 'mp4'.
The extension is that from the actual filename if known. Otherwise
it is the lowercase canonical extension for the video's MIME type.
'vid' is used if the MIME type is 'video/unknown'.
"""
if self._filename:
return os.path.splitext(self._filename)[1].lstrip('.')
return {
CT.ASF: 'asf',
CT.AVI: 'avi',
CT.MOV: 'mov',
CT.MP4: 'mp4',
CT.MPG: 'mpg',
CT.MS_VIDEO: 'avi',
CT.SWF: 'swf',
CT.WMV: 'wmv',
CT.X_MS_VIDEO: 'avi',
}.get(self._mime_type, 'vid')
|
Return the file extension for this video, e.g. 'mp4'.
The extension is that from the actual filename if known. Otherwise
it is the lowercase canonical extension for the video's MIME type.
'vid' is used if the MIME type is 'video/unknown'.
|
def is_flapping(self, alert, window=1800, count=2):
"""
Return true if alert severity has changed more than X times in Y seconds
"""
pipeline = [
{'$match': {
'environment': alert.environment,
'resource': alert.resource,
'event': alert.event,
'customer': alert.customer
}},
{'$unwind': '$history'},
{'$match': {
'history.updateTime': {'$gt': datetime.utcnow() - timedelta(seconds=window)},
'history.type': 'severity'
}},
{'$group': {'_id': '$history.type', 'count': {'$sum': 1}}}
]
responses = self.get_db().alerts.aggregate(pipeline)
for r in responses:
if r['count'] > count:
return True
return False
|
Return true if alert severity has changed more than X times in Y seconds
|
def statistical_inefficiency(X, truncate_acf=True):
""" Estimates the statistical inefficiency from univariate time series X
The statistical inefficiency [1]_ is a measure of the correlatedness of samples in a signal.
Given a signal :math:`{x_t}` with :math:`N` samples and statistical inefficiency :math:`I \in (0,1]`, there are
only :math:`I \cdot N` effective or uncorrelated samples in the signal. This means that :math:`I \cdot N` should
be used in order to compute statistical uncertainties. See [2]_ for a review.
The statistical inefficiency is computed as :math:`I = (2 \tau)^{-1}` using the damped autocorrelation time
..1: \tau = \frac{1}{2}+\sum_{K=1}^{N} A(k) \left(1-\frac{k}{N}\right)
where
..1: A(k) = \frac{\langle x_t x_{t+k} \rangle_t - \langle x^2 \rangle_t}{\mathrm{var}(x)}
is the autocorrelation function of the signal :math:`{x_t}`, which is computed either for a single or multiple
trajectories.
Parameters
----------
X : float array or list of float arrays
Univariate time series (single or multiple trajectories)
truncate_acf : bool, optional, default=True
When the normalized autocorrelation function passes through 0, it is truncated in order to avoid integrating
random noise
References
----------
.. [1] Anderson, T. W.: The Statistical Analysis of Time Series (Wiley, New York, 1971)
.. [2] Janke, W: Statistical Analysis of Simulations: Data Correlations and Error Estimation
Quantum Simulations of Complex Many-Body Systems: From Theory to Algorithms, Lecture Notes,
J. Grotendorst, D. Marx, A. Muramatsu (Eds.), John von Neumann Institute for Computing, Juelich
NIC Series 10, pp. 423-445, 2002.
"""
# check input
assert np.ndim(X[0]) == 1, 'Data must be 1-dimensional'
N = _maxlength(X) # length
# mean-free data
xflat = np.concatenate(X)
Xmean = np.mean(xflat)
X0 = [x-Xmean for x in X]
# moments
x2m = np.mean(xflat ** 2)
# integrate damped autocorrelation
corrsum = 0.0
for lag in range(N):
acf = 0.0
n = 0.0
for x in X0:
Nx = len(x) # length of this trajectory
if (Nx > lag): # only use trajectories that are long enough
acf += np.sum(x[0:Nx-lag] * x[lag:Nx])
n += float(Nx-lag)
acf /= n
if acf <= 0 and truncate_acf: # zero autocorrelation. Exit
break
elif lag > 0: # start integrating at lag 1 (effect of lag 0 is contained in the 0.5 below
corrsum += acf * (1.0 - (float(lag)/float(N)))
# compute damped correlation time
corrtime = 0.5 + corrsum / x2m
# return statistical inefficiency
return 1.0 / (2 * corrtime)
|
Estimates the statistical inefficiency from univariate time series X
The statistical inefficiency [1]_ is a measure of the correlatedness of samples in a signal.
Given a signal :math:`{x_t}` with :math:`N` samples and statistical inefficiency :math:`I \in (0,1]`, there are
only :math:`I \cdot N` effective or uncorrelated samples in the signal. This means that :math:`I \cdot N` should
be used in order to compute statistical uncertainties. See [2]_ for a review.
The statistical inefficiency is computed as :math:`I = (2 \tau)^{-1}` using the damped autocorrelation time
..1: \tau = \frac{1}{2}+\sum_{K=1}^{N} A(k) \left(1-\frac{k}{N}\right)
where
..1: A(k) = \frac{\langle x_t x_{t+k} \rangle_t - \langle x^2 \rangle_t}{\mathrm{var}(x)}
is the autocorrelation function of the signal :math:`{x_t}`, which is computed either for a single or multiple
trajectories.
Parameters
----------
X : float array or list of float arrays
Univariate time series (single or multiple trajectories)
truncate_acf : bool, optional, default=True
When the normalized autocorrelation function passes through 0, it is truncated in order to avoid integrating
random noise
References
----------
.. [1] Anderson, T. W.: The Statistical Analysis of Time Series (Wiley, New York, 1971)
.. [2] Janke, W: Statistical Analysis of Simulations: Data Correlations and Error Estimation
Quantum Simulations of Complex Many-Body Systems: From Theory to Algorithms, Lecture Notes,
J. Grotendorst, D. Marx, A. Muramatsu (Eds.), John von Neumann Institute for Computing, Juelich
NIC Series 10, pp. 423-445, 2002.
|
def create_embedded_unclaimed_draft(self, test_mode=False, client_id=None, is_for_embedded_signing=False, requester_email_address=None, files=None, file_urls=None, draft_type=None, subject=None, message=None, signers=None, cc_email_addresses=None, signing_redirect_url=None, requesting_redirect_url=None, form_fields_per_document=None, metadata=None, use_preexisting_fields=False, allow_decline=False):
''' Creates a new Draft to be used for embedded requesting
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app used to create the embedded draft.
is_for_embedded_signing (bool, optional): Whether this is also for embedded signing. Defaults to False.
requester_email_address (str): Email address of the requester.
files (list of str): The uploaded file(s) to send for signature.
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional.
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent.
form_fields_per_document (str, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
metadata (dict, optional): Metadata to associate with the draft
use_preexisting_fields (bool): Whether to use preexisting PDF fields
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
Returns:
An UnclaimedDraft object
'''
self._check_required_fields({
'client_id': client_id,
'requester_email_address': requester_email_address,
'draft_type': draft_type
}, [{
"files": files,
"file_urls": file_urls
}]
)
params = {
'test_mode': test_mode,
'client_id': client_id,
'requester_email_address': requester_email_address,
'is_for_embedded_signing': is_for_embedded_signing,
'files': files,
'file_urls': file_urls,
'draft_type': draft_type,
'subject': subject,
'message': message,
'signing_redirect_url': signing_redirect_url,
'requesting_redirect_url': requesting_redirect_url,
'signers': signers,
'cc_email_addresses': cc_email_addresses,
'form_fields_per_document': form_fields_per_document,
'metadata': metadata,
'use_preexisting_fields': use_preexisting_fields,
'allow_decline': allow_decline
}
return self._create_unclaimed_draft(**params)
|
Creates a new Draft to be used for embedded requesting
Args:
test_mode (bool, optional): Whether this is a test, the signature request created from this draft will not be legally binding if set to True. Defaults to False.
client_id (str): Client id of the app used to create the embedded draft.
is_for_embedded_signing (bool, optional): Whether this is also for embedded signing. Defaults to False.
requester_email_address (str): Email address of the requester.
files (list of str): The uploaded file(s) to send for signature.
file_urls (list of str): URLs of the file for HelloSign to download to send for signature. Use either `files` or `file_urls`
draft_type (str): The type of unclaimed draft to create. Use "send_document" to create a claimable file, and "request_signature" for a claimable signature request. If the type is "request_signature" then signers name and email_address are not optional.
subject (str, optional): The subject in the email that will be sent to the signers
message (str, optional): The custom message in the email that will be sent to the signers
signers (list of dict): A list of signers, which each has the following attributes:
name (str): The name of the signer
email_address (str): Email address of the signer
order (str, optional): The order the signer is required to sign in
cc_email_addresses (list of str, optional): A list of email addresses that should be CC'd
signing_redirect_url (str, optional): The URL you want the signer redirected to after they successfully sign.
requesting_redirect_url (str, optional): The URL you want the signer to be redirected to after the request has been sent.
form_fields_per_document (str, optional): The fields that should appear on the document, expressed as a serialized JSON data structure which is a list of lists of the form fields. Please refer to the API reference of HelloSign for more details (https://www.hellosign.com/api/reference#SignatureRequest)
metadata (dict, optional): Metadata to associate with the draft
use_preexisting_fields (bool): Whether to use preexisting PDF fields
allow_decline (bool, optional): Allows signers to decline to sign a document if set to 1. Defaults to 0.
Returns:
An UnclaimedDraft object
|
def save_namespace(self, filename):
"""Save namespace into filename"""
from spyder_kernels.utils.nsview import get_remote_data
from spyder_kernels.utils.iofuncs import iofunctions
ns = self._get_current_namespace()
settings = self.namespace_view_settings
data = get_remote_data(ns, settings, mode='picklable',
more_excluded_names=EXCLUDED_NAMES).copy()
return iofunctions.save(data, filename)
|
Save namespace into filename
|
def raise_for_missing_namespace(self, line: str, position: int, namespace: str, name: str) -> None:
"""Raise an exception if the namespace is not defined."""
if not self.has_namespace(namespace):
raise UndefinedNamespaceWarning(self.get_line_number(), line, position, namespace, name)
|
Raise an exception if the namespace is not defined.
|
def write(self, pack_uri, blob):
"""
Write *blob* to this zip package with the membername corresponding to
*pack_uri*.
"""
self._zipf.writestr(pack_uri.membername, blob)
|
Write *blob* to this zip package with the membername corresponding to
*pack_uri*.
|
def from_function(cls, function):
"""Create a FunctionDescriptor from a function instance.
This function is used to create the function descriptor from
a python function. If a function is a class function, it should
not be used by this function.
Args:
cls: Current class which is required argument for classmethod.
function: the python function used to create the function
descriptor.
Returns:
The FunctionDescriptor instance created according to the function.
"""
module_name = function.__module__
function_name = function.__name__
class_name = ""
function_source_hasher = hashlib.sha1()
try:
# If we are running a script or are in IPython, include the source
# code in the hash.
source = inspect.getsource(function)
if sys.version_info[0] >= 3:
source = source.encode()
function_source_hasher.update(source)
function_source_hash = function_source_hasher.digest()
except (IOError, OSError, TypeError):
# Source code may not be available:
# e.g. Cython or Python interpreter.
function_source_hash = b""
return cls(module_name, function_name, class_name,
function_source_hash)
|
Create a FunctionDescriptor from a function instance.
This function is used to create the function descriptor from
a python function. If a function is a class function, it should
not be used by this function.
Args:
cls: Current class which is required argument for classmethod.
function: the python function used to create the function
descriptor.
Returns:
The FunctionDescriptor instance created according to the function.
|
def param_array(self):
"""
Array representing the parameters of this class.
There is only one copy of all parameters in memory, two during optimization.
!WARNING!: setting the parameter array MUST always be done in memory:
m.param_array[:] = m_copy.param_array
"""
if (self.__dict__.get('_param_array_', None) is None) or (self._param_array_.size != self.size):
self._param_array_ = np.empty(self.size, dtype=np.float64)
return self._param_array_
|
Array representing the parameters of this class.
There is only one copy of all parameters in memory, two during optimization.
!WARNING!: setting the parameter array MUST always be done in memory:
m.param_array[:] = m_copy.param_array
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.