code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one."""
mock._mock_parent = None
mock._mock_new_parent = None
mock._mock_name = ''
mock._mock_new_name = None
setattr(self, attribute, mock)
|
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one.
|
def _jars_to_directories(self, target):
"""Extracts and maps jars to directories containing their contents.
:returns: a set of filepaths to directories containing the contents of jar.
"""
files = set()
jar_import_products = self.context.products.get_data(JarImportProducts)
imports = jar_import_products.imports(target)
for coordinate, jar in imports:
files.add(self._extract_jar(coordinate, jar))
return files
|
Extracts and maps jars to directories containing their contents.
:returns: a set of filepaths to directories containing the contents of jar.
|
def update_factor(self, name, body):
"""Update Guardian factor
Useful to enable / disable factor
Args:
name (str): Either push-notification or sms
body (dict): Attributes to modify.
See: https://auth0.com/docs/api/management/v2#!/Guardian/put_factors_by_name
"""
url = self._url('factors/{}'.format(name))
return self.client.put(url, data=body)
|
Update Guardian factor
Useful to enable / disable factor
Args:
name (str): Either push-notification or sms
body (dict): Attributes to modify.
See: https://auth0.com/docs/api/management/v2#!/Guardian/put_factors_by_name
|
def connect(self):
"""
Method automatically called by the run() method of the AgentProxyThread
"""
if ('SSH_AUTH_SOCK' in os.environ) and (sys.platform != 'win32'):
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: conn.connect(os.environ['SSH_AUTH_SOCK']))
except:
# probably a dangling env var: the ssh agent is gone
return
elif sys.platform == 'win32':
import win_pageant
if win_pageant.can_talk_to_agent():
conn = win_pageant.PageantConnection()
else:
return
else:
# no agent support
return
self._conn = conn
|
Method automatically called by the run() method of the AgentProxyThread
|
def verify_high(self, high):
'''
Verify that the high data is viable and follows the data structure
'''
errors = []
if not isinstance(high, dict):
errors.append('High data is not a dictionary and is invalid')
reqs = OrderedDict()
for name, body in six.iteritems(high):
if name.startswith('__'):
continue
if not isinstance(name, six.string_types):
errors.append(
'ID \'{0}\' in SLS \'{1}\' is not formed as a string, but '
'is a {2}'.format(
name,
body['__sls__'],
type(name).__name__
)
)
if not isinstance(body, dict):
err = ('The type {0} in {1} is not formatted as a dictionary'
.format(name, body))
errors.append(err)
continue
for state in body:
if state.startswith('__'):
continue
if not isinstance(body[state], list):
errors.append(
'State \'{0}\' in SLS \'{1}\' is not formed as a list'
.format(name, body['__sls__'])
)
else:
fun = 0
if '.' in state:
fun += 1
for arg in body[state]:
if isinstance(arg, six.string_types):
fun += 1
if ' ' in arg.strip():
errors.append(('The function "{0}" in state '
'"{1}" in SLS "{2}" has '
'whitespace, a function with whitespace is '
'not supported, perhaps this is an argument '
'that is missing a ":"').format(
arg,
name,
body['__sls__']))
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst in ('require', 'watch', 'prereq', 'onchanges'):
if not isinstance(arg[argfirst], list):
errors.append(('The {0}'
' statement in state \'{1}\' in SLS \'{2}\' '
'needs to be formed as a list').format(
argfirst,
name,
body['__sls__']
))
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {'state': state}
for req in arg[argfirst]:
if isinstance(req, six.string_types):
req = {'id': req}
if not isinstance(req, dict):
err = ('Requisite declaration {0}'
' in SLS {1} is not formed as a'
' single key dictionary').format(
req,
body['__sls__'])
errors.append(err)
continue
req_key = next(iter(req))
req_val = req[req_key]
if '.' in req_key:
errors.append((
'Invalid requisite type \'{0}\' '
'in state \'{1}\', in SLS '
'\'{2}\'. Requisite types must '
'not contain dots, did you '
'mean \'{3}\'?'.format(
req_key,
name,
body['__sls__'],
req_key[:req_key.find('.')]
)
))
if not ishashable(req_val):
errors.append((
'Illegal requisite "{0}", '
'is SLS {1}\n'
).format(
six.text_type(req_val),
body['__sls__']))
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if reqs[req_val]['state'] == reqs[name][req_val]:
err = ('A recursive '
'requisite was found, SLS '
'"{0}" ID "{1}" ID "{2}"'
).format(
body['__sls__'],
name,
req_val
)
errors.append(err)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(('Multiple dictionaries '
'defined in argument of state \'{0}\' in SLS'
' \'{1}\'').format(
name,
body['__sls__']))
if not fun:
if state == 'require' or state == 'watch':
continue
errors.append(('No function declared in state \'{0}\' in'
' SLS \'{1}\'').format(state, body['__sls__']))
elif fun > 1:
errors.append(
'Too many functions declared in state \'{0}\' in '
'SLS \'{1}\''.format(state, body['__sls__'])
)
return errors
|
Verify that the high data is viable and follows the data structure
|
def register_activity_type(domain=None, name=None, version=None, description=None, defaultTaskStartToCloseTimeout=None, defaultTaskHeartbeatTimeout=None, defaultTaskList=None, defaultTaskPriority=None, defaultTaskScheduleToStartTimeout=None, defaultTaskScheduleToCloseTimeout=None):
"""
Registers a new activity type along with its configuration settings in the specified domain.
Access Control
You can use IAM policies to control this action's access to Amazon SWF resources as follows:
If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows .
See also: AWS API Documentation
:example: response = client.register_activity_type(
domain='string',
name='string',
version='string',
description='string',
defaultTaskStartToCloseTimeout='string',
defaultTaskHeartbeatTimeout='string',
defaultTaskList={
'name': 'string'
},
defaultTaskPriority='string',
defaultTaskScheduleToStartTimeout='string',
defaultTaskScheduleToCloseTimeout='string'
)
:type domain: string
:param domain: [REQUIRED]
The name of the domain in which this activity is to be registered.
:type name: string
:param name: [REQUIRED]
The name of the activity type within the domain.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
:type version: string
:param version: [REQUIRED]
The version of the activity type.
Note
The activity type consists of the name and version, the combination of which must be unique within the domain.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
:type description: string
:param description: A textual description of the activity type.
:type defaultTaskStartToCloseTimeout: string
:param defaultTaskStartToCloseTimeout: If set, specifies the default maximum duration that a worker can take to process tasks of this activity type. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value 'NONE' can be used to specify unlimited duration.
:type defaultTaskHeartbeatTimeout: string
:param defaultTaskHeartbeatTimeout: If set, specifies the default maximum time before which a worker processing a task of this type must report progress by calling RecordActivityTaskHeartbeat . If the timeout is exceeded, the activity task is automatically timed out. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision. If the activity worker subsequently attempts to record a heartbeat or returns a result, the activity worker receives an UnknownResource fault. In this case, Amazon SWF no longer considers the activity task to be valid; the activity worker should clean up the activity task.
The duration is specified in seconds; an integer greater than or equal to 0. The value 'NONE' can be used to specify unlimited duration.
:type defaultTaskList: dict
:param defaultTaskList: If set, specifies the default task list to use for scheduling tasks of this activity type. This default task list is used if a task list is not provided when a task is scheduled through the ScheduleActivityTask decision.
name (string) -- [REQUIRED]The name of the task list.
:type defaultTaskPriority: string
:param defaultTaskPriority: The default task priority to assign to the activity type. If not assigned, then '0' will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.
For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide .
:type defaultTaskScheduleToStartTimeout: string
:param defaultTaskScheduleToStartTimeout: If set, specifies the default maximum duration that a task of this activity type can wait before being assigned to a worker. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value 'NONE' can be used to specify unlimited duration.
:type defaultTaskScheduleToCloseTimeout: string
:param defaultTaskScheduleToCloseTimeout: If set, specifies the default maximum duration for a task of this activity type. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value 'NONE' can be used to specify unlimited duration.
:returns:
domain (string) -- [REQUIRED]
The name of the domain in which this activity is to be registered.
name (string) -- [REQUIRED]
The name of the activity type within the domain.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
version (string) -- [REQUIRED]
The version of the activity type.
Note
The activity type consists of the name and version, the combination of which must be unique within the domain.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
description (string) -- A textual description of the activity type.
defaultTaskStartToCloseTimeout (string) -- If set, specifies the default maximum duration that a worker can take to process tasks of this activity type. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value "NONE" can be used to specify unlimited duration.
defaultTaskHeartbeatTimeout (string) -- If set, specifies the default maximum time before which a worker processing a task of this type must report progress by calling RecordActivityTaskHeartbeat . If the timeout is exceeded, the activity task is automatically timed out. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision. If the activity worker subsequently attempts to record a heartbeat or returns a result, the activity worker receives an UnknownResource fault. In this case, Amazon SWF no longer considers the activity task to be valid; the activity worker should clean up the activity task.
The duration is specified in seconds; an integer greater than or equal to 0. The value "NONE" can be used to specify unlimited duration.
defaultTaskList (dict) -- If set, specifies the default task list to use for scheduling tasks of this activity type. This default task list is used if a task list is not provided when a task is scheduled through the ScheduleActivityTask decision.
name (string) -- [REQUIRED]The name of the task list.
defaultTaskPriority (string) -- The default task priority to assign to the activity type. If not assigned, then "0" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.
For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide .
defaultTaskScheduleToStartTimeout (string) -- If set, specifies the default maximum duration that a task of this activity type can wait before being assigned to a worker. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value "NONE" can be used to specify unlimited duration.
defaultTaskScheduleToCloseTimeout (string) -- If set, specifies the default maximum duration for a task of this activity type. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value "NONE" can be used to specify unlimited duration.
"""
pass
|
Registers a new activity type along with its configuration settings in the specified domain.
Access Control
You can use IAM policies to control this action's access to Amazon SWF resources as follows:
If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows .
See also: AWS API Documentation
:example: response = client.register_activity_type(
domain='string',
name='string',
version='string',
description='string',
defaultTaskStartToCloseTimeout='string',
defaultTaskHeartbeatTimeout='string',
defaultTaskList={
'name': 'string'
},
defaultTaskPriority='string',
defaultTaskScheduleToStartTimeout='string',
defaultTaskScheduleToCloseTimeout='string'
)
:type domain: string
:param domain: [REQUIRED]
The name of the domain in which this activity is to be registered.
:type name: string
:param name: [REQUIRED]
The name of the activity type within the domain.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
:type version: string
:param version: [REQUIRED]
The version of the activity type.
Note
The activity type consists of the name and version, the combination of which must be unique within the domain.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
:type description: string
:param description: A textual description of the activity type.
:type defaultTaskStartToCloseTimeout: string
:param defaultTaskStartToCloseTimeout: If set, specifies the default maximum duration that a worker can take to process tasks of this activity type. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value 'NONE' can be used to specify unlimited duration.
:type defaultTaskHeartbeatTimeout: string
:param defaultTaskHeartbeatTimeout: If set, specifies the default maximum time before which a worker processing a task of this type must report progress by calling RecordActivityTaskHeartbeat . If the timeout is exceeded, the activity task is automatically timed out. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision. If the activity worker subsequently attempts to record a heartbeat or returns a result, the activity worker receives an UnknownResource fault. In this case, Amazon SWF no longer considers the activity task to be valid; the activity worker should clean up the activity task.
The duration is specified in seconds; an integer greater than or equal to 0. The value 'NONE' can be used to specify unlimited duration.
:type defaultTaskList: dict
:param defaultTaskList: If set, specifies the default task list to use for scheduling tasks of this activity type. This default task list is used if a task list is not provided when a task is scheduled through the ScheduleActivityTask decision.
name (string) -- [REQUIRED]The name of the task list.
:type defaultTaskPriority: string
:param defaultTaskPriority: The default task priority to assign to the activity type. If not assigned, then '0' will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.
For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide .
:type defaultTaskScheduleToStartTimeout: string
:param defaultTaskScheduleToStartTimeout: If set, specifies the default maximum duration that a task of this activity type can wait before being assigned to a worker. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value 'NONE' can be used to specify unlimited duration.
:type defaultTaskScheduleToCloseTimeout: string
:param defaultTaskScheduleToCloseTimeout: If set, specifies the default maximum duration for a task of this activity type. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value 'NONE' can be used to specify unlimited duration.
:returns:
domain (string) -- [REQUIRED]
The name of the domain in which this activity is to be registered.
name (string) -- [REQUIRED]
The name of the activity type within the domain.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
version (string) -- [REQUIRED]
The version of the activity type.
Note
The activity type consists of the name and version, the combination of which must be unique within the domain.
The specified string must not start or end with whitespace. It must not contain a : (colon), / (slash), | (vertical bar), or any control characters (u0000-u001f | u007f - u009f). Also, it must not contain the literal string quotarnquot.
description (string) -- A textual description of the activity type.
defaultTaskStartToCloseTimeout (string) -- If set, specifies the default maximum duration that a worker can take to process tasks of this activity type. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value "NONE" can be used to specify unlimited duration.
defaultTaskHeartbeatTimeout (string) -- If set, specifies the default maximum time before which a worker processing a task of this type must report progress by calling RecordActivityTaskHeartbeat . If the timeout is exceeded, the activity task is automatically timed out. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision. If the activity worker subsequently attempts to record a heartbeat or returns a result, the activity worker receives an UnknownResource fault. In this case, Amazon SWF no longer considers the activity task to be valid; the activity worker should clean up the activity task.
The duration is specified in seconds; an integer greater than or equal to 0. The value "NONE" can be used to specify unlimited duration.
defaultTaskList (dict) -- If set, specifies the default task list to use for scheduling tasks of this activity type. This default task list is used if a task list is not provided when a task is scheduled through the ScheduleActivityTask decision.
name (string) -- [REQUIRED]The name of the task list.
defaultTaskPriority (string) -- The default task priority to assign to the activity type. If not assigned, then "0" will be used. Valid values are integers that range from Java's Integer.MIN_VALUE (-2147483648) to Integer.MAX_VALUE (2147483647). Higher numbers indicate higher priority.
For more information about setting task priority, see Setting Task Priority in the Amazon Simple Workflow Developer Guide .
defaultTaskScheduleToStartTimeout (string) -- If set, specifies the default maximum duration that a task of this activity type can wait before being assigned to a worker. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value "NONE" can be used to specify unlimited duration.
defaultTaskScheduleToCloseTimeout (string) -- If set, specifies the default maximum duration for a task of this activity type. This default can be overridden when scheduling an activity task using the ScheduleActivityTask decision.
The duration is specified in seconds; an integer greater than or equal to 0. The value "NONE" can be used to specify unlimited duration.
|
def pdf_from_post(self):
"""Returns a pdf stream with the stickers
"""
html = self.request.form.get("html")
style = self.request.form.get("style")
reporthtml = "<html><head>{0}</head><body>{1}</body></html>"
reporthtml = reporthtml.format(style, html)
reporthtml = safe_unicode(reporthtml).encode("utf-8")
pdf_fn = tempfile.mktemp(suffix=".pdf")
pdf_file = createPdf(htmlreport=reporthtml, outfile=pdf_fn)
return pdf_file
|
Returns a pdf stream with the stickers
|
def _launch_editor(starting_text=''):
"Launch editor, let user write text, then return that text."
# TODO: What is a reasonable default for windows? Does this approach even
# make sense on windows?
editor = os.environ.get('EDITOR', 'vim')
with tempfile.TemporaryDirectory() as dirname:
filename = pathlib.Path(dirname) / 'metadata.yml'
with filename.open(mode='wt') as handle:
handle.write(starting_text)
subprocess.call([editor, filename])
with filename.open(mode='rt') as handle:
text = handle.read()
return text
|
Launch editor, let user write text, then return that text.
|
def to_python(self, value):
"""
Strips any dodgy HTML tags from the input
"""
if value in self.empty_values:
try:
return self.empty_value
except AttributeError:
# CharField.empty_value was introduced in Django 1.11; in prior
# versions a unicode string was returned for empty values in
# all cases.
return u''
return bleach.clean(value, **self.bleach_options)
|
Strips any dodgy HTML tags from the input
|
def compute_return(self, start_date, end_date, rate="MID"):
"""
Compute the return of the currency between two dates
"""
if rate not in ["MID", "ASK", "BID"]:
raise ValueError("Unknown rate type (%s)- must be 'MID', 'ASK' or 'BID'" % str(rate))
if end_date <= start_date:
raise ValueError("End date must be on or after start date")
df = self.generate_dataframe(start_date=start_date, end_date=end_date)
start_price = df.ix[start_date][rate]
end_price = df.ix[end_date][rate]
currency_return = (end_price / start_price) - 1.0
return currency_return
|
Compute the return of the currency between two dates
|
def merge_pot1_files(self, delete_source=True):
"""
This method is called when all the q-points have been computed.
It runs `mrgdvdb` in sequential on the local machine to produce
the final DVDB file in the outdir of the `Work`.
Args:
delete_source: True if POT1 files should be removed after (successful) merge.
Returns:
path to the output DVDB file. None if not DFPT POT file is found.
"""
natom = len(self[0].input.structure)
max_pertcase = 3 * natom
pot1_files = []
for task in self:
if not isinstance(task, DfptTask): continue
paths = task.outdir.list_filepaths(wildcard="*_POT*")
for path in paths:
# Include only atomic perturbations i.e. files whose ext <= 3 * natom
i = path.rindex("_POT")
pertcase = int(path[i+4:].replace(".nc", ""))
if pertcase <= max_pertcase:
pot1_files.append(path)
# prtpot = 0 disables the output of the DFPT POT files so an empty list is not fatal here.
if not pot1_files: return None
self.history.info("Will call mrgdvdb to merge %s files:" % len(pot1_files))
# Final DDB file will be produced in the outdir of the work.
out_dvdb = self.outdir.path_in("out_DVDB")
if len(pot1_files) == 1:
# Avoid the merge. Just move the DDB file to the outdir of the work
shutil.copy(pot1_files[0], out_dvdb)
else:
# FIXME: The merge may require a non-negligible amount of memory if lots of qpts.
# Besides there are machines such as lemaitre3 that are problematic when
# running MPI applications on the front-end
mrgdvdb = wrappers.Mrgdvdb(manager=self[0].manager, verbose=0)
mrgdvdb.merge(self.outdir.path, pot1_files, out_dvdb, delete_source=delete_source)
return out_dvdb
|
This method is called when all the q-points have been computed.
It runs `mrgdvdb` in sequential on the local machine to produce
the final DVDB file in the outdir of the `Work`.
Args:
delete_source: True if POT1 files should be removed after (successful) merge.
Returns:
path to the output DVDB file. None if not DFPT POT file is found.
|
def writeB1logfile(filename, data):
"""Write a header structure into a B1 logfile.
Inputs:
filename: name of the file.
data: header dictionary
Notes:
exceptions pass through to the caller.
"""
allkeys = list(data.keys())
f = open(filename, 'wt', encoding='utf-8')
for ld in _logfile_data: # process each line
linebegin = ld[0]
fieldnames = ld[1]
# set the default formatter if it is not given
if len(ld) < 3:
formatter = str
elif ld[2] is None:
formatter = str
else:
formatter = ld[2]
# this will contain the formatted values.
formatted = ''
if isinstance(fieldnames, str):
# scalar field name, just one field. Formatter should be a
# callable.
if fieldnames not in allkeys:
# this field has already been processed
continue
try:
formatted = formatter(data[fieldnames])
except KeyError:
# field not found in param structure
continue
elif isinstance(fieldnames, tuple):
# more than one field names in a tuple. In this case, formatter can
# be a tuple of callables...
if all([(fn not in allkeys) for fn in fieldnames]):
# if all the fields have been processed:
continue
if isinstance(formatter, tuple) and len(formatter) == len(fieldnames):
formatted = ' '.join([ft(data[fn])
for ft, fn in zip(formatter, fieldnames)])
# ...or a single callable...
elif not isinstance(formatter, tuple):
formatted = formatter([data[fn] for fn in fieldnames])
# ...otherwise raise an exception.
else:
raise SyntaxError('Programming error: formatter should be a scalar or a tuple\
of the same length as the field names in logfile_data.')
else: # fieldnames is neither a string, nor a tuple.
raise SyntaxError(
'Invalid syntax (programming error) in logfile_data in writeparamfile().')
# try to get the values
linetowrite = linebegin + ':\t' + formatted + '\n'
f.write(linetowrite)
if isinstance(fieldnames, tuple):
for fn in fieldnames: # remove the params treated.
if fn in allkeys:
allkeys.remove(fn)
else:
if fieldnames in allkeys:
allkeys.remove(fieldnames)
# write untreated params
for k in allkeys:
linetowrite = k + ':\t' + str(data[k]) + '\n'
f.write(linetowrite)
f.close()
|
Write a header structure into a B1 logfile.
Inputs:
filename: name of the file.
data: header dictionary
Notes:
exceptions pass through to the caller.
|
def transform_api_header_authorization(param, value):
"""Transform a username:password value into a base64 string."""
try:
username, password = value.split(":", 1)
except ValueError:
raise click.BadParameter(
"Authorization header needs to be Authorization=username:password",
param=param,
)
value = "%s:%s" % (username.strip(), password)
value = base64.b64encode(bytes(value.encode()))
return "Basic %s" % value.decode("utf-8")
|
Transform a username:password value into a base64 string.
|
def add_mip_obj(model):
"""Add a mixed-integer version of a minimal medium to the model.
Changes the optimization objective to finding the medium with the least
components::
minimize size(R) where R part of import_reactions
Arguments
---------
model : cobra.model
The model to modify.
"""
if len(model.variables) > 1e4:
LOGGER.warning("the MIP version of minimal media is extremely slow for"
" models that large :(")
exchange_rxns = find_boundary_types(model, "exchange")
big_m = max(abs(b) for r in exchange_rxns for b in r.bounds)
prob = model.problem
coefs = {}
to_add = []
for rxn in exchange_rxns:
export = len(rxn.reactants) == 1
indicator = prob.Variable("ind_" + rxn.id, lb=0, ub=1, type="binary")
if export:
vrv = rxn.reverse_variable
indicator_const = prob.Constraint(
vrv - indicator * big_m, ub=0, name="ind_constraint_" + rxn.id)
else:
vfw = rxn.forward_variable
indicator_const = prob.Constraint(
vfw - indicator * big_m, ub=0, name="ind_constraint_" + rxn.id)
to_add.extend([indicator, indicator_const])
coefs[indicator] = 1
model.add_cons_vars(to_add)
model.solver.update()
model.objective.set_linear_coefficients(coefs)
model.objective.direction = "min"
|
Add a mixed-integer version of a minimal medium to the model.
Changes the optimization objective to finding the medium with the least
components::
minimize size(R) where R part of import_reactions
Arguments
---------
model : cobra.model
The model to modify.
|
def file_type(self, file):
"""Use python-magic to determine file type.
Returns 'png' or 'jpg' on success, nothing on failure.
"""
try:
magic_text = magic.from_file(file)
if (isinstance(magic_text, bytes)):
# In python2 and travis python3 (?!) decode to get unicode string
magic_text = magic_text.decode('utf-8')
except (TypeError, IOError):
return
if (re.search('PNG image data', magic_text)):
return('png')
elif (re.search('JPEG image data', magic_text)):
return('jpg')
# failed
return
|
Use python-magic to determine file type.
Returns 'png' or 'jpg' on success, nothing on failure.
|
def get_limits(self, coord='data'):
"""Get the bounding box of the viewer extents.
Returns
-------
limits : tuple
Bounding box in coordinates of type `coord` in the form of
``(ll_pt, ur_pt)``.
"""
limits = self.t_['limits']
if limits is None:
# No user defined limits. If there is an image loaded
# use its dimensions as the limits
image = self.get_image()
if image is not None:
wd, ht = image.get_size()
limits = ((self.data_off, self.data_off),
(float(wd - 1 + self.data_off),
float(ht - 1 + self.data_off)))
else:
# Calculate limits based on plotted points, if any
canvas = self.get_canvas()
pts = canvas.get_points()
if len(pts) > 0:
limits = trcalc.get_bounds(pts)
else:
# No limits found, go to default
limits = ((0.0, 0.0), (0.0, 0.0))
# convert to desired coordinates
crdmap = self.get_coordmap(coord)
limits = crdmap.data_to(limits)
return limits
|
Get the bounding box of the viewer extents.
Returns
-------
limits : tuple
Bounding box in coordinates of type `coord` in the form of
``(ll_pt, ur_pt)``.
|
def zoom_in(self):
"""Increase zoom factor and redraw TimeLine"""
index = self._zoom_factors.index(self._zoom_factor)
if index + 1 == len(self._zoom_factors):
# Already zoomed in all the way
return
self._zoom_factor = self._zoom_factors[index + 1]
if self._zoom_factors.index(self.zoom_factor) + 1 == len(self._zoom_factors):
self._button_zoom_in.config(state=tk.DISABLED)
self._button_zoom_out.config(state=tk.NORMAL)
self.draw_timeline()
|
Increase zoom factor and redraw TimeLine
|
def load_image(self, idx):
"""
Load input image and preprocess for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
"""
im = Image.open('{}/Images/spatial_envelope_256x256_static_8outdoorcategories/{}.jpg'.format(self.siftflow_dir, idx))
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
in_ -= self.mean
in_ = in_.transpose((2,0,1))
return in_
|
Load input image and preprocess for Caffe:
- cast to float
- switch channels RGB -> BGR
- subtract mean
- transpose to channel x height x width order
|
def get_activity_lookup_session(self, proxy, *args, **kwargs):
"""Gets the ``OsidSession`` associated with the activity lookup service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ActivityLookupSession``
:rtype: ``osid.learning.ActivityLookupSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_activity_lookup()`` is ``true``.*
"""
if not self.supports_activity_lookup():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.ActivityLookupSession(proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session
|
Gets the ``OsidSession`` associated with the activity lookup service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ActivityLookupSession``
:rtype: ``osid.learning.ActivityLookupSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_activity_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_activity_lookup()`` is ``true``.*
|
def create_call(self, raw_request, **kwargs):
"""create a call object that has endpoints understandable request and response
instances"""
req = self.create_request(raw_request, **kwargs)
res = self.create_response(**kwargs)
rou = self.create_router(**kwargs)
c = self.call_class(req, res, rou)
return c
|
create a call object that has endpoints understandable request and response
instances
|
def _define(self):
"""
gate cu3(theta,phi,lambda) c, t
{ u1((lambda-phi)/2) t; cx c,t;
u3(-theta/2,0,-(phi+lambda)/2) t; cx c,t;
u3(theta/2,phi,0) t;
}
"""
definition = []
q = QuantumRegister(2, "q")
rule = [
(U1Gate((self.params[2] - self.params[1]) / 2), [q[1]], []),
(CnotGate(), [q[0], q[1]], []),
(U3Gate(-self.params[0] / 2, 0, -(self.params[1] + self.params[2]) / 2), [q[1]], []),
(CnotGate(), [q[0], q[1]], []),
(U3Gate(self.params[0] / 2, self.params[1], 0), [q[1]], [])
]
for inst in rule:
definition.append(inst)
self.definition = definition
|
gate cu3(theta,phi,lambda) c, t
{ u1((lambda-phi)/2) t; cx c,t;
u3(-theta/2,0,-(phi+lambda)/2) t; cx c,t;
u3(theta/2,phi,0) t;
}
|
def _set_axis_ticks(self, axis, ticks, log=False, rotation=0):
"""
Allows setting the ticks for a particular axis either with
a tuple of ticks, a tick locator object, an integer number
of ticks, a list of tuples containing positions and labels
or a list of positions. Also supports enabling log ticking
if an integer number of ticks is supplied and setting a
rotation for the ticks.
"""
if isinstance(ticks, (list, tuple)) and all(isinstance(l, list) for l in ticks):
axis.set_ticks(ticks[0])
axis.set_ticklabels(ticks[1])
elif isinstance(ticks, ticker.Locator):
axis.set_major_locator(ticks)
elif not ticks and ticks is not None:
axis.set_ticks([])
elif isinstance(ticks, int):
if log:
locator = ticker.LogLocator(numticks=ticks,
subs=range(1,10))
else:
locator = ticker.MaxNLocator(ticks)
axis.set_major_locator(locator)
elif isinstance(ticks, (list, tuple)):
labels = None
if all(isinstance(t, tuple) for t in ticks):
ticks, labels = zip(*ticks)
axis.set_ticks(ticks)
if labels:
axis.set_ticklabels(labels)
for tick in axis.get_ticklabels():
tick.set_rotation(rotation)
|
Allows setting the ticks for a particular axis either with
a tuple of ticks, a tick locator object, an integer number
of ticks, a list of tuples containing positions and labels
or a list of positions. Also supports enabling log ticking
if an integer number of ticks is supplied and setting a
rotation for the ticks.
|
def _add_var(var, value):
'''
Add a new var to the make.conf. If using layman, the source line
for the layman make.conf needs to be at the very end of the
config. This ensures that the new var will be above the source
line.
'''
makeconf = _get_makeconf()
layman = 'source /var/lib/layman/make.conf'
fullvar = '{0}="{1}"'.format(var, value)
if __salt__['file.contains'](makeconf, layman):
# TODO perhaps make this a function in the file module?
cmd = ['sed', '-i', r'/{0}/ i\{1}'.format(
layman.replace('/', '\\/'),
fullvar),
makeconf]
__salt__['cmd.run'](cmd)
else:
__salt__['file.append'](makeconf, fullvar)
|
Add a new var to the make.conf. If using layman, the source line
for the layman make.conf needs to be at the very end of the
config. This ensures that the new var will be above the source
line.
|
def full_text(self, level: int = 1) -> str:
"""
Returns text of the current section as well as all its subsections.
:param level: indentation level
:return: text of the current section as well as all its subsections
"""
res = ""
if self.wiki.extract_format == ExtractFormat.WIKI:
res += self.title
elif self.wiki.extract_format == ExtractFormat.HTML:
res += "<h{}>{}</h{}>".format(level, self.title, level)
else:
raise NotImplementedError("Unknown ExtractFormat type")
res += "\n"
res += self._text
if len(self._text) > 0:
res += "\n\n"
for sec in self.sections:
res += sec.full_text(level + 1)
return res
|
Returns text of the current section as well as all its subsections.
:param level: indentation level
:return: text of the current section as well as all its subsections
|
def QA_fetch_index_list_adv(collections=DATABASE.index_list):
'''
'获取股票列表'
:param collections: mongodb 数据库
:return: DataFrame
'''
index_list_items = QA_fetch_index_list(collections)
if len(index_list_items) == 0:
print("QA Error QA_fetch_index_list_adv call item for item in collections.find() return 0 item, maybe the DATABASE.index_list is empty!")
return None
return index_list_items
|
'获取股票列表'
:param collections: mongodb 数据库
:return: DataFrame
|
def is_active(self, timeout=2):
"""
:param timeout: int
:return: boolean
"""
try:
result = Result(*self.perform_request('HEAD', '/', params={'request_timeout': timeout}))
except ConnectionError:
return False
except TransportError:
return False
if result.response.status_code == 200:
return True
return False
|
:param timeout: int
:return: boolean
|
def step(self, observations):
""" Sample action from an action space for given state """
log_histogram = self(observations)
actions = self.q_head.sample(log_histogram)
return {
'actions': actions,
'log_histogram': log_histogram
}
|
Sample action from an action space for given state
|
def print_square(row_queue, t):
"""
Prints a row queue as its conceptual square array.
"""
occupied_rows = {y: row for _, y, row in row_queue}
empty_row = ', '.join('...' for _ in range(t))
for y in range(t):
print('|', end=' ')
if y not in occupied_rows:
print(empty_row, end=' ')
else:
row = dict(occupied_rows[y])
all_cols = ('%3d' % row[x] if x in row else '...'
for x in range(t))
print(', '.join(all_cols), end=' ')
print("|")
|
Prints a row queue as its conceptual square array.
|
def match_regexp(self, value, q, strict=False):
"""if value matches a regexp q"""
value = stringify(value)
mr = re.compile(q)
if value is not None:
if mr.match(value):
return
self.shout('%r not matching the regexp %r', strict, value, q)
|
if value matches a regexp q
|
def populate(self, priority, address, rtr, data):
"""
:return: None
"""
assert isinstance(data, bytes)
self.needs_low_priority(priority)
self.needs_no_rtr(rtr)
#self.needs_data(data, 6)
self.set_attributes(priority, address, rtr)
self.module_type = data[0]
self.sub_address_1 = data[3]
self.sub_address_2 = data[4]
self.sub_address_3 = data[5]
self.sub_address_4 = data[6]
|
:return: None
|
def rec_edit(self, zone, record_type, record_id, name, content, ttl=1, service_mode=None, priority=None,
service=None, service_name=None, protocol=None, weight=None, port=None, target=None):
"""
Edit a DNS record for the given zone.
:param zone: domain name
:type zone: str
:param record_type: Type of DNS record. Valid values are [A/CNAME/MX/TXT/SPF/AAAA/NS/SRV/LOC]
:type record_type: str
:param record_id: DNS Record ID. Available by using the rec_load_all call.
:type record_id: int
:param name: Name of the DNS record
:type name: str
:param content: The content of the DNS record, will depend on the the type of record being added
:type content: str
:param ttl: TTL of record in seconds. 1 = Automatic, otherwise, value must in between 120 and 4,294,967,295
seconds.
:type ttl: int
:param service_mode: [applies to A/AAAA/CNAME] Status of CloudFlare Proxy, 1 = orange cloud, 0 = grey cloud.
:type service_mode: int
:param priority: [applies to MX/SRV] MX record priority.
:type priority: int
:param service: Service for SRV record
:type service: str
:param service_name: Service Name for SRV record
:type service_name: str
:param protocol: Protocol for SRV record. Values are [_tcp/_udp/_tls].
:type protocol: str
:param weight: Weight for SRV record.
:type weight: int
:param port: Port for SRV record
:type port: int
:param target: Target for SRV record
:type target: str
:return:
:rtype: dict
"""
params = {
'a': 'rec_edit',
'z': zone,
'type': record_type,
'id': record_id,
'name': name,
'content': content,
'ttl': ttl
}
if service_mode is not None:
params['service_mode'] = service_mode
if priority is not None:
params['prio'] = priority
if service is not None:
params['service'] = service
if service_name is not None:
params['srvname'] = service_name
if protocol is not None:
params['protocol'] = protocol
if weight is not None:
params['weight'] = weight
if port is not None:
params['port'] = port
if target is not None:
params['target'] = target
return self._request(params)
|
Edit a DNS record for the given zone.
:param zone: domain name
:type zone: str
:param record_type: Type of DNS record. Valid values are [A/CNAME/MX/TXT/SPF/AAAA/NS/SRV/LOC]
:type record_type: str
:param record_id: DNS Record ID. Available by using the rec_load_all call.
:type record_id: int
:param name: Name of the DNS record
:type name: str
:param content: The content of the DNS record, will depend on the the type of record being added
:type content: str
:param ttl: TTL of record in seconds. 1 = Automatic, otherwise, value must in between 120 and 4,294,967,295
seconds.
:type ttl: int
:param service_mode: [applies to A/AAAA/CNAME] Status of CloudFlare Proxy, 1 = orange cloud, 0 = grey cloud.
:type service_mode: int
:param priority: [applies to MX/SRV] MX record priority.
:type priority: int
:param service: Service for SRV record
:type service: str
:param service_name: Service Name for SRV record
:type service_name: str
:param protocol: Protocol for SRV record. Values are [_tcp/_udp/_tls].
:type protocol: str
:param weight: Weight for SRV record.
:type weight: int
:param port: Port for SRV record
:type port: int
:param target: Target for SRV record
:type target: str
:return:
:rtype: dict
|
def V_horiz_spherical(D, L, a, h, headonly=False):
r'''Calculates volume of a tank with spherical heads, according to [1]_.
.. math::
V_f = A_fL + \frac{\pi a}{6}(3R^2 + a^2),\;\; h = R, |a|\le R
.. math::
V_f = A_fL + \frac{\pi a}{3}(3R^2 + a^2),\;\; h = D, |a|\le R
.. math::
V_f = A_fL + \pi a h^2\left(1 - \frac{h}{3R}\right),\;\; h = 0,
\text{ or } |a| = 0, R, -R
.. math::
V_f = A_fL + \frac{a}{|a|}\left\{\frac{2r^3}{3}\left[\cos^{-1}
\frac{R^2 - rw}{R(w-r)} + \cos^{-1}\frac{R^2 + rw}{R(w+r)}
- \frac{z}{r}\left(2 + \left(\frac{R}{r}\right)^2\right)
\cos^{-1}\frac{w}{R}\right] - 2\left(wr^2 - \frac{w^3}{3}\right)
\tan^{-1}\frac{y}{z} + \frac{4wyz}{3}\right\}
,\;\; h \ne R, D; a \ne 0, R, -R, |a| \ge 0.01D
.. math::
V_f = A_fL + \frac{a}{|a|}\left[2\int_w^R(r^2 - x^2)\tan^{-1}
\sqrt{\frac{R^2-x^2}{r^2-R^2}}dx - A_f z\right]
,\;\; h \ne R, D; a \ne 0, R, -R, |a| < 0.01D
.. math::
Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2}
.. math::
r = \frac{a^2 + R^2}{2|a|}
.. math::
w = R - h
.. math::
y = \sqrt{2Rh-h^2}
.. math::
z = \sqrt{r^2 - R^2}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
L : float
Length of the main cylindrical section, [m]
a : float
Distance the spherical head extends on one side, [m]
h : float
Height, as measured up to where the fluid ends, [m]
headonly : bool, optional
Function returns only the volume of a single head side if True
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_horiz_spherical(D=108., L=156., a=42., h=36)/231.
2303.9615116986183
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF'''
R = D/2.
r = (a**2 + R**2)/2./abs(a)
w = R - h
y = (2*R*h - h**2)**0.5
z = (r**2 - R**2)**0.5
Af = R**2*acos((R-h)/R) - (R-h)*(2*R*h - h**2)**0.5
if h == R and abs(a) <= R:
Vf = pi*a/6*(3*R**2 + a**2)
elif h == D and abs(a) <= R:
Vf = pi*a/3*(3*R**2 + a**2)
elif h == 0 or a == 0 or a == R or a == -R:
Vf = pi*a*h**2*(1 - h/3./R)
elif abs(a) >= 0.01*D:
Vf = a/abs(a)*(
2*r**3/3.*(acos((R**2 - r*w)/(R*(w-r))) + acos((R**2+r*w)/(R*(w+r)))
- z/r*(2+(R/r)**2)*acos(w/R))
- 2*(w*r**2 - w**3/3)*atan(y/z) + 4*w*y*z/3)
else:
def V_horiz_spherical_toint(x):
return (r**2 - x**2)*atan(((R**2 - x**2)/(r**2 - R**2))**0.5)
from scipy.integrate import quad
integrated = quad(V_horiz_spherical_toint, w, R)[0]
Vf = a/abs(a)*(2*integrated - Af*z)
if headonly:
Vf = Vf/2.
else:
Vf += Af*L
return Vf
|
r'''Calculates volume of a tank with spherical heads, according to [1]_.
.. math::
V_f = A_fL + \frac{\pi a}{6}(3R^2 + a^2),\;\; h = R, |a|\le R
.. math::
V_f = A_fL + \frac{\pi a}{3}(3R^2 + a^2),\;\; h = D, |a|\le R
.. math::
V_f = A_fL + \pi a h^2\left(1 - \frac{h}{3R}\right),\;\; h = 0,
\text{ or } |a| = 0, R, -R
.. math::
V_f = A_fL + \frac{a}{|a|}\left\{\frac{2r^3}{3}\left[\cos^{-1}
\frac{R^2 - rw}{R(w-r)} + \cos^{-1}\frac{R^2 + rw}{R(w+r)}
- \frac{z}{r}\left(2 + \left(\frac{R}{r}\right)^2\right)
\cos^{-1}\frac{w}{R}\right] - 2\left(wr^2 - \frac{w^3}{3}\right)
\tan^{-1}\frac{y}{z} + \frac{4wyz}{3}\right\}
,\;\; h \ne R, D; a \ne 0, R, -R, |a| \ge 0.01D
.. math::
V_f = A_fL + \frac{a}{|a|}\left[2\int_w^R(r^2 - x^2)\tan^{-1}
\sqrt{\frac{R^2-x^2}{r^2-R^2}}dx - A_f z\right]
,\;\; h \ne R, D; a \ne 0, R, -R, |a| < 0.01D
.. math::
Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2}
.. math::
r = \frac{a^2 + R^2}{2|a|}
.. math::
w = R - h
.. math::
y = \sqrt{2Rh-h^2}
.. math::
z = \sqrt{r^2 - R^2}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
L : float
Length of the main cylindrical section, [m]
a : float
Distance the spherical head extends on one side, [m]
h : float
Height, as measured up to where the fluid ends, [m]
headonly : bool, optional
Function returns only the volume of a single head side if True
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_horiz_spherical(D=108., L=156., a=42., h=36)/231.
2303.9615116986183
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF
|
def simple_ins_from_obs(obsnames, insfilename='model.output.ins'):
"""
writes an instruction file that assumes wanting to read the values names in obsnames in order
one per line from a model output file
Args:
obsnames: list of obsnames to read in
insfilename: filename for INS file (default: model.output.ins)
Returns:
writes a file <insfilename> with each observation read off a line
"""
with open(insfilename, 'w') as ofp:
ofp.write('pif ~\n')
[ofp.write('!{0}!\n'.format(cob)) for cob in obsnames]
|
writes an instruction file that assumes wanting to read the values names in obsnames in order
one per line from a model output file
Args:
obsnames: list of obsnames to read in
insfilename: filename for INS file (default: model.output.ins)
Returns:
writes a file <insfilename> with each observation read off a line
|
def build_rank_score_dict(rank_scores):
"""
Take a list with annotated rank scores for each family and returns a
dictionary with family_id as key and a list of genetic models as value.
Args:
rank_scores : A list on the form ['1:12','2:20']
Returns:
scores : A dictionary with family id:s as key and scores as value
{
'1':'12',
'2':'20'
}
"""
logger = getLogger(__name__)
logger.debug("Checking rank scores: {0}".format(rank_scores))
scores = {}
for family in rank_scores:
entry = family.split(':')
try:
family_id = entry[0]
logger.debug("Extracting rank score for family:{0}".format(family_id))
score = entry[1]
logger.debug("Score:{0}".format(score))
except Exception:
raise SyntaxError("Malformed rank score input")
scores[family_id] = score
return scores
|
Take a list with annotated rank scores for each family and returns a
dictionary with family_id as key and a list of genetic models as value.
Args:
rank_scores : A list on the form ['1:12','2:20']
Returns:
scores : A dictionary with family id:s as key and scores as value
{
'1':'12',
'2':'20'
}
|
def _remove(self, removeList, selfValue):
'''Remove elements from a list by matching the elements in the other list.
This method only looks inside current instance's value, not recursive.
There is no need for a recursive one anyway.
Match by == operation.
Args:
removeList (list): The list of matching elements.
selfValue (list): The list you remove value from. Usually ``self.value``
'''
for removeValue in removeList:
print(removeValue, removeList)
# if removeValue equal to selfValue, remove
removeEverything(removeValue, selfValue)
|
Remove elements from a list by matching the elements in the other list.
This method only looks inside current instance's value, not recursive.
There is no need for a recursive one anyway.
Match by == operation.
Args:
removeList (list): The list of matching elements.
selfValue (list): The list you remove value from. Usually ``self.value``
|
def write_compounds(self, stream, compounds, properties=None):
"""Write iterable of compounds as YAML object to stream.
Args:
stream: File-like object.
compounds: Iterable of compound entries.
properties: Set of compound properties to output (or None to output
all).
"""
self._write_entries(
stream, compounds, self.convert_compound_entry, properties)
|
Write iterable of compounds as YAML object to stream.
Args:
stream: File-like object.
compounds: Iterable of compound entries.
properties: Set of compound properties to output (or None to output
all).
|
def parse_bind(bind):
"""Parses a connection string and creates SQL trace metadata"""
if isinstance(bind, Connection):
engine = bind.engine
else:
engine = bind
m = re.match(r"Engine\((.*?)\)", str(engine))
if m is not None:
u = urlparse(m.group(1))
# Add Scheme to uses_netloc or // will be missing from url.
uses_netloc.append(u.scheme)
safe_url = ""
if u.password is None:
safe_url = u.geturl()
else:
# Strip password from URL
host_info = u.netloc.rpartition('@')[-1]
parts = u._replace(netloc='{}@{}'.format(u.username, host_info))
safe_url = parts.geturl()
sql = {}
sql['database_type'] = u.scheme
sql['url'] = safe_url
if u.username is not None:
sql['user'] = "{}".format(u.username)
return sql
|
Parses a connection string and creates SQL trace metadata
|
def draw(self):
'''
Draws samples from the `true` distribution.
Returns:
`np.ndarray` of samples.
'''
if self.__conditional_flag is True:
return np.concatenate((self.__create_samples(), self.__create_samples()), axis=1)
else:
return self.__create_samples()
|
Draws samples from the `true` distribution.
Returns:
`np.ndarray` of samples.
|
def replace_parameter(self, name, value=None):
""" Replace a query parameter values with a new value. If a new value does not match current
specifications, then exception is raised
:param name: parameter name to replace
:param value: new parameter value. None is for empty (null) value
:return: None
"""
spec = self.__specs[name] if name in self.__specs else None
if self.extra_parameters() is False and spec is None:
raise ValueError('Extra parameters are forbidden for this WStrictURIQuery object')
if spec is not None and spec.nullable() is False and value is None:
raise ValueError('Nullable values is forbidden for parameter "%s"' % name)
if spec is not None and value is not None:
re_obj = spec.re_obj()
if re_obj is not None and re_obj.match(value) is None:
raise ValueError('Value does not match regular expression')
WURIQuery.replace_parameter(self, name, value=value)
|
Replace a query parameter values with a new value. If a new value does not match current
specifications, then exception is raised
:param name: parameter name to replace
:param value: new parameter value. None is for empty (null) value
:return: None
|
def run_analysis(self, argv):
"""Run this analysis"""
args = self._parser.parse_args(argv)
# Read the input maps
ccube_dirty = HpxMap.create_from_fits(args.ccube_dirty, hdu='SKYMAP')
bexpcube_dirty = HpxMap.create_from_fits(args.bexpcube_dirty, hdu='HPXEXPOSURES')
ccube_clean = HpxMap.create_from_fits(args.ccube_clean, hdu='SKYMAP')
bexpcube_clean = HpxMap.create_from_fits(args.bexpcube_clean, hdu='HPXEXPOSURES')
# Decide what HEALPix order to work at
if args.hpx_order:
hpx_order = args.hpx_order
else:
hpx_order = ccube_dirty.hpx.order
# Cast all the input maps to match ccube_clean
cube_dict = ResidualCR._match_cubes(ccube_clean, ccube_dirty,
bexpcube_clean, bexpcube_dirty, hpx_order)
# Intenstiy maps
intensity_clean = ResidualCR._compute_intensity(cube_dict['ccube_clean'],
cube_dict['bexpcube_clean'])
intensity_dirty = ResidualCR._compute_intensity(cube_dict['ccube_dirty'],
cube_dict['bexpcube_dirty'])
# Mean & ratio of intensity maps
intensity_mean = ResidualCR._compute_mean(intensity_dirty,
intensity_clean)
intensity_ratio = ResidualCR._compute_ratio(intensity_dirty,
intensity_clean)
# Selecting the bright pixels for Aeff correction and to mask when filling output map
bright_pixel_select = ResidualCR._make_bright_pixel_mask(intensity_mean,
args.select_factor)
bright_pixel_mask = ResidualCR._make_bright_pixel_mask(intensity_mean,
args.mask_factor)
# Compute thte Aeff corrections using the brightest pixels
aeff_corrections = ResidualCR._get_aeff_corrections(intensity_ratio,
bright_pixel_select)
# Apply the Aeff corrections and get the intensity residual
corrected_dirty = ResidualCR._apply_aeff_corrections(intensity_dirty,
aeff_corrections)
corrected_ratio = ResidualCR._compute_ratio(corrected_dirty,
intensity_clean)
intensity_resid = ResidualCR._compute_diff(corrected_dirty,
intensity_clean)
# Replace the masked pixels with the map mean to avoid features associates with sources
filled_resid = ResidualCR._fill_masked_intensity_resid(intensity_resid,
bright_pixel_mask)
# Smooth the map
smooth_resid = ResidualCR._smooth_hpx_map(filled_resid,
args.sigma)
# Convert to a differential map
out_model = ResidualCR._intergral_to_differential(smooth_resid)
# Make the ENERGIES HDU
out_energies = ccube_dirty.hpx.make_energies_hdu()
# Write the maps
cubes = dict(SKYMAP=out_model)
fits_utils.write_maps(None, cubes,
args.outfile, energy_hdu=out_energies)
if args.full_output:
# Some diagnostics
check = ResidualCR._differential_to_integral(out_model)
check_resid = ResidualCR._compute_diff(smooth_resid, check)
counts_resid =\
ResidualCR._compute_counts_from_intensity(intensity_resid,
cube_dict['bexpcube_dirty'])
pred_counts\
= ResidualCR._compute_counts_from_model(out_model,
cube_dict['bexpcube_dirty'])
pred_resid = ResidualCR._compute_diff(pred_counts, counts_resid)
out_ebounds = ccube_dirty.hpx.make_energy_bounds_hdu()
cubes = dict(INTENSITY_CLEAN=intensity_clean,
INTENSITY_DIRTY=intensity_dirty,
INTENSITY_RATIO=intensity_ratio,
CORRECTED_DIRTY=corrected_dirty,
CORRECTED_RATIO=corrected_ratio,
INTENSITY_RESID=intensity_resid,
PIXEL_SELECT=bright_pixel_select,
PIXEL_MASK=bright_pixel_mask,
FILLED_RESID=filled_resid,
SMOOTH_RESID=smooth_resid,
CHECK=check,
CHECK_RESID=check_resid,
COUNTS_RESID=counts_resid,
PRED_COUNTS=pred_counts,
PRED_RESID=pred_resid)
fits_utils.write_maps(None, cubes,
args.outfile.replace('.fits', '_full.fits'),
energy_hdu=out_ebounds)
|
Run this analysis
|
def import_certificate(
ctx, slot, management_key, pin, cert, password, verify):
"""
Import a X.509 certificate.
Write a certificate to one of the slots on the YubiKey.
\b
SLOT PIV slot to import the certificate to.
CERTIFICATE File containing the certificate. Use '-' to use stdin.
"""
controller = ctx.obj['controller']
_ensure_authenticated(ctx, controller, pin, management_key)
data = cert.read()
while True:
if password is not None:
password = password.encode()
try:
certs = parse_certificates(data, password)
except (ValueError, TypeError):
if password is None:
password = click.prompt(
'Enter password to decrypt certificate',
default='', hide_input=True,
show_default=False,
err=True)
continue
else:
password = None
click.echo('Wrong password.')
continue
break
if len(certs) > 1:
# If multiple certs, only import leaf.
# Leaf is the cert with a subject that is not an issuer in the chain.
leafs = get_leaf_certificates(certs)
cert_to_import = leafs[0]
else:
cert_to_import = certs[0]
def do_import(retry=True):
try:
controller.import_certificate(
slot, cert_to_import, verify=verify,
touch_callback=prompt_for_touch)
except KeypairMismatch:
ctx.fail('This certificate is not tied to the private key in the '
'{} slot.'.format(slot.name))
except APDUError as e:
if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED and retry:
_verify_pin(ctx, controller, pin)
do_import(retry=False)
else:
raise
do_import()
|
Import a X.509 certificate.
Write a certificate to one of the slots on the YubiKey.
\b
SLOT PIV slot to import the certificate to.
CERTIFICATE File containing the certificate. Use '-' to use stdin.
|
def get_num_confirmations(tx_hash, coin_symbol='btc', api_key=None):
'''
Given a tx_hash, return the number of confirmations that transactions has.
Answer is going to be from 0 - current_block_height.
'''
return get_transaction_details(tx_hash=tx_hash, coin_symbol=coin_symbol,
limit=1, api_key=api_key).get('confirmations')
|
Given a tx_hash, return the number of confirmations that transactions has.
Answer is going to be from 0 - current_block_height.
|
def _processMsg(self, type, msg):
""" Process Debug Messages """
now = datetime.datetime.now()
# Check If Path not provided
if self.LOG_FILE_PATH == '':
self.LOG_FILE_PATH = os.path.dirname(os.path.abspath(__file__)) + '/'
# Build absolute Path
log_file = self.LOG_FILE_PATH + now.strftime(self.LOG_FILE_FORMAT) + '.log'
# Add General Vars
msg = self.LOG_MESSAGE_FORMAT.format(
TYPE=type.upper(),
DATE=now.strftime(self.DATES_FORMAT),
DATETIME=now.strftime(self.DATETIME_FORMAT),
MESSAGE=msg,
)
# Check if to add platform data
if self.PLATFORM_DATA:
# Add Platform Specific Vars
msg = msg.format(
PL_TYPE=platform.machine(),
PL_NAME=platform.node(),
PL_PROCESSOR=platform.processor(),
PL_PY_BUILD_DATE=platform.python_build()[1],
PL_PY_COMPILER=platform.python_compiler(),
PL_PY_RELEASE=platform.release(),
PL_OS=platform.system(),
PL_TIMEZONE=strftime("%z", gmtime())
)
# Create Storage Instance
self._STORAGE = Storage(log_file)
# Write Storage
return self._STORAGE.write(msg)
|
Process Debug Messages
|
def create_cursor(self, name=None):
"""Creates a cursor. Assumes that a connection is established."""
cursor = self.connection.cursor()
cursor.tzinfo_factory = self.tzinfo_factory
return cursor
|
Creates a cursor. Assumes that a connection is established.
|
def on_drag_data_received(self, widget, context, x, y, data, info, time):
"""Receives state_id from LibraryTree and moves the state to the position of the mouse
:param widget:
:param context:
:param x: Integer: x-position of mouse
:param y: Integer: y-position of mouse
:param data: SelectionData: contains state_id
:param info:
:param time:
"""
state_id_insert = data.get_text()
parent_m = self.model.selection.get_selected_state()
if not isinstance(parent_m, ContainerStateModel):
return
state_v = self.canvas.get_view_for_model(parent_m.states[state_id_insert])
pos_start = state_v.model.get_meta_data_editor()['rel_pos']
motion = InMotion(state_v, self.view.editor)
motion.start_move(self.view.editor.get_matrix_i2v(state_v).transform_point(pos_start[0], pos_start[1]))
motion.move((x, y))
motion.stop_move()
state_v.model.set_meta_data_editor('rel_pos', motion.item.position)
self.canvas.wait_for_update(trigger_update=True)
self._meta_data_changed(None, state_v.model, 'append_to_last_change', True)
|
Receives state_id from LibraryTree and moves the state to the position of the mouse
:param widget:
:param context:
:param x: Integer: x-position of mouse
:param y: Integer: y-position of mouse
:param data: SelectionData: contains state_id
:param info:
:param time:
|
def close(self):
"""
Closes this QEMU VM.
"""
if not (yield from super().close()):
return False
self.acpi_shutdown = False
yield from self.stop()
for adapter in self._ethernet_adapters:
if adapter is not None:
for nio in adapter.ports.values():
if nio and isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
for udp_tunnel in self._local_udp_tunnels.values():
self.manager.port_manager.release_udp_port(udp_tunnel[0].lport, self._project)
self.manager.port_manager.release_udp_port(udp_tunnel[1].lport, self._project)
self._local_udp_tunnels = {}
|
Closes this QEMU VM.
|
def select_newest_project(dx_project_ids):
"""
Given a list of DNAnexus project IDs, returns the one that is newest as determined by creation date.
Args:
dx_project_ids: `list` of DNAnexus project IDs.
Returns:
`str`.
"""
if len(dx_project_ids) == 1:
return dx_project_ids[0]
projects = [dxpy.DXProject(x) for x in dx_project_ids]
created_times = [x.describe()["created"] for x in projects]
paired = zip(created_times,projects)
paired.sort(reverse=True)
return paired[0][0]
|
Given a list of DNAnexus project IDs, returns the one that is newest as determined by creation date.
Args:
dx_project_ids: `list` of DNAnexus project IDs.
Returns:
`str`.
|
def activateRandomLocation(self):
"""
Set the location to a random point.
"""
self.activePhases = np.array([np.random.random(2)])
if self.anchoringMethod == "discrete":
# Need to place the phase in the middle of a cell
self.activePhases = np.floor(
self.activePhases * self.cellDimensions)/self.cellDimensions
self._computeActiveCells()
|
Set the location to a random point.
|
def save(self, output_file, overwrite=False):
"""Save the model to disk"""
if os.path.exists(output_file) and overwrite is False:
raise ModelFileExists("The file %s exists already. If you want to overwrite it, use the 'overwrite=True' "
"options as 'model.save(\"%s\", overwrite=True)'. " % (output_file, output_file))
else:
data = self.to_dict_with_types()
# Write it to disk
try:
# Get the YAML representation of the data
representation = my_yaml.dump(data, default_flow_style=False)
with open(output_file, "w+") as f:
# Add a new line at the end of each voice (just for clarity)
f.write(representation.replace("\n", "\n\n"))
except IOError:
raise CannotWriteModel(os.path.dirname(os.path.abspath(output_file)),
"Could not write model file %s. Check your permissions to write or the "
"report on the free space which follows: " % output_file)
|
Save the model to disk
|
def urlunparse(data):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment = data
if params:
url = "%s;%s" % (url, params)
return urlunsplit((scheme, netloc, url, query, fragment))
|
Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent).
|
def get_exchange_rates(self, **params):
"""https://developers.coinbase.com/api/v2#exchange-rates"""
response = self._get('v2', 'exchange-rates', params=params)
return self._make_api_object(response, APIObject)
|
https://developers.coinbase.com/api/v2#exchange-rates
|
def edit_custom_examples(program, config):
"""
Edit custom examples for the given program, creating the file if it does
not exist.
"""
if (not config.custom_dir) or (not os.path.exists(config.custom_dir)):
_inform_cannot_edit_no_custom_dir()
return
# resolve aliases
resolved_program = get_resolved_program(program, config)
custom_file_paths = get_file_paths_for_program(
resolved_program,
config.custom_dir
)
if (len(custom_file_paths) > 0):
path_to_edit = custom_file_paths[0]
else:
# A new file.
path_to_edit = os.path.join(config.custom_dir, resolved_program + '.md')
# Edit the first. Handles the base case.
subprocess.call([config.editor_cmd, path_to_edit])
|
Edit custom examples for the given program, creating the file if it does
not exist.
|
def _match_net(self, net):
"""Match a query for a specific network/list of networks"""
if self.network:
return match_list(self.network, net)
else:
return True
|
Match a query for a specific network/list of networks
|
def _make_scaled_srcmap(self):
"""Make an exposure cube with the same binning as the counts map."""
self.logger.info('Computing scaled source map.')
bexp0 = fits.open(self.files['bexpmap_roi'])
bexp1 = fits.open(self.config['gtlike']['bexpmap'])
srcmap = fits.open(self.config['gtlike']['srcmap'])
if bexp0[0].data.shape != bexp1[0].data.shape:
raise Exception('Wrong shape for input exposure map file.')
bexp_ratio = bexp0[0].data / bexp1[0].data
self.logger.info(
'Min/Med/Max exposure correction: %f %f %f' % (np.min(bexp_ratio),
np.median(
bexp_ratio),
np.max(bexp_ratio)))
for hdu in srcmap[1:]:
if hdu.name == 'GTI':
continue
if hdu.name == 'EBOUNDS':
continue
hdu.data *= bexp_ratio
srcmap.writeto(self.files['srcmap'], overwrite=True)
|
Make an exposure cube with the same binning as the counts map.
|
def removeCallback(cls, eventType, func, record=None):
"""
Removes a callback from the model's event callbacks.
:param eventType: <str>
:param func: <callable>
"""
callbacks = cls.callbacks()
callbacks.setdefault(eventType, [])
for i in xrange(len(callbacks[eventType])):
my_func, my_record, _ = callbacks[eventType][i]
if func == my_func and record == my_record:
del callbacks[eventType][i]
break
|
Removes a callback from the model's event callbacks.
:param eventType: <str>
:param func: <callable>
|
def fetch(dbconn, tablename, n=1, uuid=None, end=True):
"""
Returns `n` rows from the table's start or end
:param dbconn: database connection
:param tablename: name of the table
:param n: number of rows to return from the end of the table
:param uuid: Optional UUID to select from
:return: If n > 1, a list of rows. If n=1, a single row
"""
cur = dbconn.cursor()
order = 'DESC' if end else 'ASC'
try:
if uuid:
cur.execute("SELECT * FROM '{}' WHERE UUID='{}' ORDER BY ROWID {} LIMIT {};".format(tablename, uuid, order, n))
else:
cur.execute("SELECT * FROM '{}' ORDER BY ROWID {} LIMIT {};".format(tablename, order, n))
except sqlite3.OperationalError as e:
if 'no such table' not in getattr(e, 'message', ''):
# Suppress logging of errors generated when no table exists
logger.error(e)
return []
rows = cur.fetchall()
return rows
|
Returns `n` rows from the table's start or end
:param dbconn: database connection
:param tablename: name of the table
:param n: number of rows to return from the end of the table
:param uuid: Optional UUID to select from
:return: If n > 1, a list of rows. If n=1, a single row
|
def _get_fields(self, event, pull, message=None):
"""Constructs a dictionary of fields and replacement values based on the
specified event and the status of the pull request.
:arg event: one of ["start", "error", "finish"].
:arg pull: an instance of PullRequest that has details about the current
status of the pull request testing etc.
:arg message: an additional contextual message to add in the __message__ field.
"""
result = pull.fields_general(event)
if message is not None:
result["__message__"] = message
return result
|
Constructs a dictionary of fields and replacement values based on the
specified event and the status of the pull request.
:arg event: one of ["start", "error", "finish"].
:arg pull: an instance of PullRequest that has details about the current
status of the pull request testing etc.
:arg message: an additional contextual message to add in the __message__ field.
|
def find_ent_endurance_tier_price(package, tier_level):
"""Find the price in the given package with the specified tier level
:param package: The Enterprise (Endurance) product package
:param tier_level: The endurance tier for which a price is desired
:return: Returns the price for the given tier, or an error if not found
"""
for item in package['items']:
for attribute in item.get('attributes', []):
if int(attribute['value']) == ENDURANCE_TIERS.get(tier_level):
break
else:
continue
price_id = _find_price_id(item['prices'], 'storage_tier_level')
if price_id:
return price_id
raise ValueError("Could not find price for endurance tier level")
|
Find the price in the given package with the specified tier level
:param package: The Enterprise (Endurance) product package
:param tier_level: The endurance tier for which a price is desired
:return: Returns the price for the given tier, or an error if not found
|
def repr2(x):
"""Analogous to repr(), but will suppress 'u' prefix when repr-ing a unicode string."""
s = repr(x)
if len(s) >= 2 and s[0] == "u" and (s[1] == "'" or s[1] == '"'):
s = s[1:]
return s
|
Analogous to repr(), but will suppress 'u' prefix when repr-ing a unicode string.
|
def send_to_redshift(
instance,
data,
replace=True,
batch_size=1000,
types=None,
primary_key=(),
create_boolean=False):
"""
data = {
"table_name" : 'name_of_the_redshift_schema' + '.' + 'name_of_the_redshift_table' #Must already exist,
"columns_name" : [first_column_name,second_column_name,...,last_column_name],
"rows" : [[first_raw_value,second_raw_value,...,last_raw_value],...]
}
"""
connection_kwargs = redshift_credentials.credential(instance)
print("Initiate send_to_redshift...")
print("Test to know if the table exists...")
if (not create.existing_test(instance, data["table_name"])) or (types is not None) or (primary_key != ()):
create_boolean = True
print("Test to know if the table exists...OK")
if create_boolean:
create.create_table(instance, data, primary_key, types)
# Create an SSH tunnel
ssh_host = os.environ.get("SSH_%s_HOST" % instance)
ssh_user = os.environ.get("SSH_%s_USER" % instance)
ssh_path_private_key = os.environ.get("SSH_%s_PATH_PRIVATE_KEY" % instance)
if ssh_host:
tunnel = SSHTunnelForwarder(
(ssh_host, 22),
ssh_username=ssh_user,
ssh_private_key=ssh_path_private_key,
remote_bind_address=(
os.environ.get("RED_%s_HOST" % instance), int(os.environ.get("RED_%s_PORT" % instance))),
local_bind_address=('localhost', 6543), # could be any available port
)
# Start the tunnel
try:
tunnel.start()
print("Tunnel opened!")
except sshtunnel.HandlerSSHTunnelForwarderError:
pass
connection_kwargs["host"] = "localhost"
connection_kwargs["port"] = 6543
con = psycopg2.connect(**connection_kwargs)
cursor = con.cursor()
if replace:
cleaning_request = '''DELETE FROM ''' + data["table_name"] + ''';'''
print("Cleaning")
cursor.execute(cleaning_request)
print("Cleaning Done")
boolean = True
index = 0
total_nb_batchs = len(data["rows"]) // batch_size + 1
while boolean:
temp_row = []
for i in range(batch_size):
if not data["rows"]:
boolean = False
continue
temp_row.append(data["rows"].pop())
final_data = []
for x in temp_row:
for y in x:
final_data.append(y)
temp_string = ','.join(map(lambda a: '(' + ','.join(map(lambda b: '%s', a)) + ')', tuple(temp_row)))
inserting_request = '''INSERT INTO ''' + data["table_name"] + ''' (''' + ", ".join(
data["columns_name"]) + ''') VALUES ''' + temp_string + ''';'''
if final_data:
cursor.execute(inserting_request, final_data)
index = index + 1
percent = round(index * 100 / total_nb_batchs, 2)
if percent < 100:
print("\r %s / %s (%s %%)" % (str(index), total_nb_batchs, str(percent)), end='\r')
else:
print("\r %s / %s (%s %%)" % (str(index), total_nb_batchs, str(percent)))
con.commit()
cursor.close()
con.close()
if ssh_host:
tunnel.close()
print("Tunnel closed!")
print("data sent to redshift")
return 0
|
data = {
"table_name" : 'name_of_the_redshift_schema' + '.' + 'name_of_the_redshift_table' #Must already exist,
"columns_name" : [first_column_name,second_column_name,...,last_column_name],
"rows" : [[first_raw_value,second_raw_value,...,last_raw_value],...]
}
|
def embed_snippet(views,
drop_defaults=True,
state=None,
indent=2,
embed_url=None,
requirejs=True,
cors=True
):
"""Return a snippet that can be embedded in an HTML file.
Parameters
----------
{views_attribute}
{embed_kwargs}
Returns
-------
A unicode string with an HTML snippet containing several `<script>` tags.
"""
data = embed_data(views, drop_defaults=drop_defaults, state=state)
widget_views = u'\n'.join(
widget_view_template.format(view_spec=escape_script(json.dumps(view_spec)))
for view_spec in data['view_specs']
)
if embed_url is None:
embed_url = DEFAULT_EMBED_REQUIREJS_URL if requirejs else DEFAULT_EMBED_SCRIPT_URL
load = load_requirejs_template if requirejs else load_template
use_cors = ' crossorigin="anonymous"' if cors else ''
values = {
'load': load.format(embed_url=embed_url, use_cors=use_cors),
'json_data': escape_script(json.dumps(data['manager_state'], indent=indent)),
'widget_views': widget_views,
}
return snippet_template.format(**values)
|
Return a snippet that can be embedded in an HTML file.
Parameters
----------
{views_attribute}
{embed_kwargs}
Returns
-------
A unicode string with an HTML snippet containing several `<script>` tags.
|
def private_vlan_mode(self, **kwargs):
"""Set PVLAN mode (promiscuous, host, trunk).
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
mode (str): The switchport PVLAN mode.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `mode` is not specified.
ValueError: if `int_type`, `name`, or `mode` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> int_type = 'tengigabitethernet'
>>> name = '225/0/38'
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.enable_switchport(int_type,
... name)
... output = dev.interface.private_vlan_mode(
... int_type=int_type, name=name, mode='trunk_host')
... dev.interface.private_vlan_mode()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
mode = kwargs.pop('mode').lower()
callback = kwargs.pop('callback', self._callback)
int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel']
valid_modes = ['host', 'promiscuous', 'trunk_host',
'trunk_basic', 'trunk_promiscuous']
if int_type not in int_types:
raise ValueError("Incorrect int_type value.")
if mode not in valid_modes:
raise ValueError('%s must be one of: %s' % (mode, valid_modes))
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
pvlan_args = dict(name=name)
if 'trunk' in mode:
pvlan_mode = getattr(self._interface,
'interface_%s_switchport_mode_'
'private_vlan_private_vlan_trunk_%s' %
(int_type, mode))
else:
pvlan_mode = getattr(self._interface,
'interface_%s_switchport_mode_'
'private_vlan_%s' % (int_type, mode))
config = pvlan_mode(**pvlan_args)
return callback(config)
|
Set PVLAN mode (promiscuous, host, trunk).
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
mode (str): The switchport PVLAN mode.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, or `mode` is not specified.
ValueError: if `int_type`, `name`, or `mode` is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> int_type = 'tengigabitethernet'
>>> name = '225/0/38'
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.enable_switchport(int_type,
... name)
... output = dev.interface.private_vlan_mode(
... int_type=int_type, name=name, mode='trunk_host')
... dev.interface.private_vlan_mode()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
|
def load(self):
"""Load the projects config data from local path
Returns:
Dict: project_name -> project_data
"""
projects = {}
path = os.path.expanduser(self.path)
if not os.path.isdir(path):
return projects
logger.debug("Load project configs from %s", path)
for filename in os.listdir(path):
filename_parts = os.path.splitext(filename)
if filename_parts[1][1:] != PROJECT_CONFIG_EXTENSION:
continue
name = filename_parts[0]
try:
project_file_path = os.path.join(path, filename)
with open(project_file_path) as f:
data = yaml.load(f)
projects[name] = data
except ValueError:
continue
logger.debug("Project '{}' config readed from {}".format(name, project_file_path))
return projects
|
Load the projects config data from local path
Returns:
Dict: project_name -> project_data
|
def allow_network_access_grading(self):
""" Return True if the grading container should have access to the network """
vals = self._hook_manager.call_hook('task_network_grading', course=self.get_course(), task=self, default=self._network_grading)
return vals[0] if len(vals) else self._network_grading
|
Return True if the grading container should have access to the network
|
def create_package(self, output=None):
"""
Ensure that the package can be properly configured,
and then create it.
"""
# Create the Lambda zip package (includes project and virtualenvironment)
# Also define the path the handler file so it can be copied to the zip
# root for Lambda.
current_file = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
handler_file = os.sep.join(current_file.split(os.sep)[0:]) + os.sep + 'handler.py'
# Create the zip file(s)
if self.stage_config.get('slim_handler', False):
# Create two zips. One with the application and the other with just the handler.
# https://github.com/Miserlou/Zappa/issues/510
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
use_precompiled_packages=self.stage_config.get('use_precompiled_packages', True),
exclude=self.stage_config.get('exclude', []),
disable_progress=self.disable_progress,
archive_format='tarball'
)
# Make sure the normal venv is not included in the handler's zip
exclude = self.stage_config.get('exclude', [])
cur_venv = self.zappa.get_current_venv()
exclude.append(cur_venv.split('/')[-1])
self.handler_path = self.zappa.create_lambda_zip(
prefix='handler_{0!s}'.format(self.lambda_name),
venv=self.zappa.create_handler_venv(),
handler_file=handler_file,
slim_handler=True,
exclude=exclude,
output=output,
disable_progress=self.disable_progress
)
else:
# Custom excludes for different versions.
# Related: https://github.com/kennethreitz/requests/issues/3985
if sys.version_info[0] < 3:
# Exclude packages already builtin to the python lambda environment
# Related: https://github.com/Miserlou/Zappa/issues/556
exclude = self.stage_config.get(
'exclude', [
"boto3",
"dateutil",
"botocore",
"s3transfer",
"six.py",
"jmespath",
"concurrent"
])
else:
# This could be python3.6 optimized.
exclude = self.stage_config.get(
'exclude', [
"boto3",
"dateutil",
"botocore",
"s3transfer",
"concurrent"
])
# Create a single zip that has the handler and application
self.zip_path = self.zappa.create_lambda_zip(
prefix=self.lambda_name,
handler_file=handler_file,
use_precompiled_packages=self.stage_config.get('use_precompiled_packages', True),
exclude=exclude,
output=output,
disable_progress=self.disable_progress
)
# Warn if this is too large for Lambda.
file_stats = os.stat(self.zip_path)
if file_stats.st_size > 52428800: # pragma: no cover
print('\n\nWarning: Application zip package is likely to be too large for AWS Lambda. '
'Try setting "slim_handler" to true in your Zappa settings file.\n\n')
# Throw custom settings into the zip that handles requests
if self.stage_config.get('slim_handler', False):
handler_zip = self.handler_path
else:
handler_zip = self.zip_path
with zipfile.ZipFile(handler_zip, 'a') as lambda_zip:
settings_s = "# Generated by Zappa\n"
if self.app_function:
if '.' not in self.app_function: # pragma: no cover
raise ClickException("Your " + click.style("app_function", fg='red', bold=True) + " value is not a modular path." +
" It needs to be in the format `" + click.style("your_module.your_app_object", bold=True) + "`.")
app_module, app_function = self.app_function.rsplit('.', 1)
settings_s = settings_s + "APP_MODULE='{0!s}'\nAPP_FUNCTION='{1!s}'\n".format(app_module, app_function)
if self.exception_handler:
settings_s += "EXCEPTION_HANDLER='{0!s}'\n".format(self.exception_handler)
else:
settings_s += "EXCEPTION_HANDLER=None\n"
if self.debug:
settings_s = settings_s + "DEBUG=True\n"
else:
settings_s = settings_s + "DEBUG=False\n"
settings_s = settings_s + "LOG_LEVEL='{0!s}'\n".format((self.log_level))
if self.binary_support:
settings_s = settings_s + "BINARY_SUPPORT=True\n"
else:
settings_s = settings_s + "BINARY_SUPPORT=False\n"
head_map_dict = {}
head_map_dict.update(dict(self.context_header_mappings))
settings_s = settings_s + "CONTEXT_HEADER_MAPPINGS={0}\n".format(
head_map_dict
)
# If we're on a domain, we don't need to define the /<<env>> in
# the WSGI PATH
if self.domain:
settings_s = settings_s + "DOMAIN='{0!s}'\n".format((self.domain))
else:
settings_s = settings_s + "DOMAIN=None\n"
if self.base_path:
settings_s = settings_s + "BASE_PATH='{0!s}'\n".format((self.base_path))
else:
settings_s = settings_s + "BASE_PATH=None\n"
# Pass through remote config bucket and path
if self.remote_env:
settings_s = settings_s + "REMOTE_ENV='{0!s}'\n".format(
self.remote_env
)
# DEPRECATED. use remove_env instead
elif self.remote_env_bucket and self.remote_env_file:
settings_s = settings_s + "REMOTE_ENV='s3://{0!s}/{1!s}'\n".format(
self.remote_env_bucket, self.remote_env_file
)
# Local envs
env_dict = {}
if self.aws_region:
env_dict['AWS_REGION'] = self.aws_region
env_dict.update(dict(self.environment_variables))
# Environment variable keys must be ascii
# https://github.com/Miserlou/Zappa/issues/604
# https://github.com/Miserlou/Zappa/issues/998
try:
env_dict = dict((k.encode('ascii').decode('ascii'), v) for (k, v) in env_dict.items())
except Exception:
raise ValueError("Environment variable keys must be ascii.")
settings_s = settings_s + "ENVIRONMENT_VARIABLES={0}\n".format(
env_dict
)
# We can be environment-aware
settings_s = settings_s + "API_STAGE='{0!s}'\n".format((self.api_stage))
settings_s = settings_s + "PROJECT_NAME='{0!s}'\n".format((self.project_name))
if self.settings_file:
settings_s = settings_s + "SETTINGS_FILE='{0!s}'\n".format((self.settings_file))
else:
settings_s = settings_s + "SETTINGS_FILE=None\n"
if self.django_settings:
settings_s = settings_s + "DJANGO_SETTINGS='{0!s}'\n".format((self.django_settings))
else:
settings_s = settings_s + "DJANGO_SETTINGS=None\n"
# If slim handler, path to project zip
if self.stage_config.get('slim_handler', False):
settings_s += "ARCHIVE_PATH='s3://{0!s}/{1!s}_{2!s}_current_project.tar.gz'\n".format(
self.s3_bucket_name, self.api_stage, self.project_name)
# since includes are for slim handler add the setting here by joining arbitrary list from zappa_settings file
# and tell the handler we are the slim_handler
# https://github.com/Miserlou/Zappa/issues/776
settings_s += "SLIM_HANDLER=True\n"
include = self.stage_config.get('include', [])
if len(include) >= 1:
settings_s += "INCLUDE=" + str(include) + '\n'
# AWS Events function mapping
event_mapping = {}
events = self.stage_config.get('events', [])
for event in events:
arn = event.get('event_source', {}).get('arn')
function = event.get('function')
if arn and function:
event_mapping[arn] = function
settings_s = settings_s + "AWS_EVENT_MAPPING={0!s}\n".format(event_mapping)
# Map Lext bot events
bot_events = self.stage_config.get('bot_events', [])
bot_events_mapping = {}
for bot_event in bot_events:
event_source = bot_event.get('event_source', {})
intent = event_source.get('intent')
invocation_source = event_source.get('invocation_source')
function = bot_event.get('function')
if intent and invocation_source and function:
bot_events_mapping[str(intent) + ':' + str(invocation_source)] = function
settings_s = settings_s + "AWS_BOT_EVENT_MAPPING={0!s}\n".format(bot_events_mapping)
# Map cognito triggers
cognito_trigger_mapping = {}
cognito_config = self.stage_config.get('cognito', {})
triggers = cognito_config.get('triggers', [])
for trigger in triggers:
source = trigger.get('source')
function = trigger.get('function')
if source and function:
cognito_trigger_mapping[source] = function
settings_s = settings_s + "COGNITO_TRIGGER_MAPPING={0!s}\n".format(cognito_trigger_mapping)
# Authorizer config
authorizer_function = self.authorizer.get('function', None)
if authorizer_function:
settings_s += "AUTHORIZER_FUNCTION='{0!s}'\n".format(authorizer_function)
# Copy our Django app into root of our package.
# It doesn't work otherwise.
if self.django_settings:
base = __file__.rsplit(os.sep, 1)[0]
django_py = ''.join(os.path.join(base, 'ext', 'django_zappa.py'))
lambda_zip.write(django_py, 'django_zappa_app.py')
# async response
async_response_table = self.stage_config.get('async_response_table', '')
settings_s += "ASYNC_RESPONSE_TABLE='{0!s}'\n".format(async_response_table)
# Lambda requires a specific chmod
temp_settings = tempfile.NamedTemporaryFile(delete=False)
os.chmod(temp_settings.name, 0o644)
temp_settings.write(bytes(settings_s, "utf-8"))
temp_settings.close()
lambda_zip.write(temp_settings.name, 'zappa_settings.py')
os.unlink(temp_settings.name)
|
Ensure that the package can be properly configured,
and then create it.
|
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
# In FSL we have a single thresholding (extent, height) applied to all
# contrasts
# FIXME: Deal with two-tailed inference?
atts = (
(PROV['type'], self.type),
(PROV['label'], self.label),
(NIDM_HAS_ALTERNATIVE_HYPOTHESIS, self.tail))
if self.partial_degree is not None:
atts += (
(SPM_PARTIAL_CONJUNCTION_DEGREE, self.partial_degree),)
self.add_attributes(atts)
|
Create prov entities and activities.
|
def visit_ClassDef(self, node): # pylint: disable=invalid-name
"""Visit top-level classes."""
# Resolve everything as root scope contains everything from the process module.
for base in node.bases:
# Cover `from resolwe.process import ...`.
if isinstance(base, ast.Name) and isinstance(base.ctx, ast.Load):
base = getattr(runtime, base.id, None)
# Cover `from resolwe import process`.
elif isinstance(base, ast.Attribute) and isinstance(base.ctx, ast.Load):
base = getattr(runtime, base.attr, None)
else:
continue
if issubclass(base, runtime.Process):
break
else:
return
descriptor = ProcessDescriptor(source=self.source)
# Available embedded classes.
embedded_class_fields = {
runtime.PROCESS_INPUTS_NAME: descriptor.inputs,
runtime.PROCESS_OUTPUTS_NAME: descriptor.outputs,
}
# Parse metadata in class body.
for item in node.body:
if isinstance(item, ast.Assign):
# Possible metadata.
if (len(item.targets) == 1 and isinstance(item.targets[0], ast.Name)
and isinstance(item.targets[0].ctx, ast.Store)
and item.targets[0].id in PROCESS_METADATA):
# Try to get the metadata value.
value = PROCESS_METADATA[item.targets[0].id].get_value(item.value)
setattr(descriptor.metadata, item.targets[0].id, value)
elif (isinstance(item, ast.Expr) and isinstance(item.value, ast.Str)
and descriptor.metadata.description is None):
# Possible description string.
descriptor.metadata.description = item.value.s
elif isinstance(item, ast.ClassDef) and item.name in embedded_class_fields.keys():
# Possible input/output declaration.
self.visit_field_class(item, descriptor, embedded_class_fields[item.name])
descriptor.validate()
self.processes.append(descriptor)
|
Visit top-level classes.
|
def get_users_in_project(self, projectname):
""" Get list of users in project from MAM. """
ds_project = self.get_project(projectname)
if ds_project is None:
logger.error(
"Project '%s' does not exist in MAM" % projectname)
raise RuntimeError(
"Project '%s' does not exist in MAM" % projectname)
user_list = []
if ds_project["Users"] != "":
user_list = ds_project["Users"].lower().split(",")
return user_list
|
Get list of users in project from MAM.
|
def get_dhcp_options(dhcp_options_name=None, dhcp_options_id=None,
region=None, key=None, keyid=None, profile=None):
'''
Return a dict with the current values of the requested DHCP options set
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.get_dhcp_options 'myfunnydhcpoptionsname'
.. versionadded:: 2016.3.0
'''
if not any((dhcp_options_name, dhcp_options_id)):
raise SaltInvocationError('At least one of the following must be specified: '
'dhcp_options_name, dhcp_options_id.')
if not dhcp_options_id and dhcp_options_name:
dhcp_options_id = _get_resource_id('dhcp_options', dhcp_options_name,
region=region, key=key,
keyid=keyid, profile=profile)
if not dhcp_options_id:
return {'dhcp_options': {}}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = conn.get_all_dhcp_options(dhcp_options_ids=[dhcp_options_id])
except BotoServerError as e:
return {'error': __utils__['boto.get_error'](e)}
if not r:
return {'dhcp_options': None}
keys = ('domain_name', 'domain_name_servers', 'ntp_servers',
'netbios_name_servers', 'netbios_node_type')
return {'dhcp_options': dict((k, r[0].options.get(k)) for k in keys)}
|
Return a dict with the current values of the requested DHCP options set
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.get_dhcp_options 'myfunnydhcpoptionsname'
.. versionadded:: 2016.3.0
|
def present_active_subjunctive(self):
"""
Strong verbs
I
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["líta", "lítr", "leit", "litu", "litinn"])
>>> verb.present_active_subjunctive()
['líta', 'lítir', 'líti', 'lítim', 'lítið', 'líti']
II
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["bjóða", "býðr", "bauð", "buðu", "boðinn"])
>>> verb.present_active_subjunctive()
['bjóða', 'bjóðir', 'bjóði', 'bjóðim', 'bjóðið', 'bjóði']
III
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["verða", "verðr", "varð", "urðu", "orðinn"])
>>> verb.present_active_subjunctive()
['verða', 'verðir', 'verði', 'verðim', 'verðið', 'verði']
IV
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["bera", "berr", "bar", "báru", "borinn"])
>>> verb.present_active_subjunctive()
['bera', 'berir', 'beri', 'berim', 'berið', 'beri']
V
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["gefa", "gefr", "gaf", "gáfu", "gefinn"])
>>> verb.present_active_subjunctive()
['gefa', 'gefir', 'gefi', 'gefim', 'gefið', 'gefi']
VI
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["fara", "ferr", "fór", "fóru", "farinn"])
>>> verb.present_active_subjunctive()
['fara', 'farir', 'fari', 'farim', 'farið', 'fari']
VII
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["ráða", "ræðr", "réð", "réðu", "ráðinn"])
>>> verb.present_active_subjunctive()
['ráða', 'ráðir', 'ráði', 'ráðim', 'ráðið', 'ráði']
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["vera", "a", "a", "a", "a"])
>>> verb.present_active_subjunctive()
['sé', 'sér', 'sé', 'sém', 'séð', 'sé']
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["sjá", "a", "a", "a", "a"])
>>> verb.present_active_subjunctive()
['sjá', 'sér', 'sé', 'sém', 'séð', 'sé']
:return:
"""
if self.sng == "vera":
forms = ["sé", "sér", "sé", "sém", "séð", "sé"]
return forms
elif self.sng == "sjá":
forms = ["sjá", "sér", "sé", "sém", "séð", "sé"]
return forms
else:
subjunctive_root = self.sng[:-1] if self.sng[-1] == "a" else self.sng
forms = [subjunctive_root + "a"]
subjunctive_root = subjunctive_root[:-1] if subjunctive_root[-1] == "j" else subjunctive_root
forms.append(subjunctive_root + "ir")
forms.append(subjunctive_root + "i")
forms.append(subjunctive_root + "im")
forms.append(subjunctive_root + "ið")
forms.append(subjunctive_root + "i")
return forms
|
Strong verbs
I
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["líta", "lítr", "leit", "litu", "litinn"])
>>> verb.present_active_subjunctive()
['líta', 'lítir', 'líti', 'lítim', 'lítið', 'líti']
II
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["bjóða", "býðr", "bauð", "buðu", "boðinn"])
>>> verb.present_active_subjunctive()
['bjóða', 'bjóðir', 'bjóði', 'bjóðim', 'bjóðið', 'bjóði']
III
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["verða", "verðr", "varð", "urðu", "orðinn"])
>>> verb.present_active_subjunctive()
['verða', 'verðir', 'verði', 'verðim', 'verðið', 'verði']
IV
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["bera", "berr", "bar", "báru", "borinn"])
>>> verb.present_active_subjunctive()
['bera', 'berir', 'beri', 'berim', 'berið', 'beri']
V
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["gefa", "gefr", "gaf", "gáfu", "gefinn"])
>>> verb.present_active_subjunctive()
['gefa', 'gefir', 'gefi', 'gefim', 'gefið', 'gefi']
VI
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["fara", "ferr", "fór", "fóru", "farinn"])
>>> verb.present_active_subjunctive()
['fara', 'farir', 'fari', 'farim', 'farið', 'fari']
VII
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["ráða", "ræðr", "réð", "réðu", "ráðinn"])
>>> verb.present_active_subjunctive()
['ráða', 'ráðir', 'ráði', 'ráðim', 'ráðið', 'ráði']
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["vera", "a", "a", "a", "a"])
>>> verb.present_active_subjunctive()
['sé', 'sér', 'sé', 'sém', 'séð', 'sé']
>>> verb = StrongOldNorseVerb()
>>> verb.set_canonic_forms(["sjá", "a", "a", "a", "a"])
>>> verb.present_active_subjunctive()
['sjá', 'sér', 'sé', 'sém', 'séð', 'sé']
:return:
|
def get_spaces(self, space_key=None, expand=None, start=None, limit=None, callback=None):
"""
Returns information about the spaces present in the Confluence instance.
:param space_key (string): OPTIONAL: A list of space keys to filter on. Default: None.
:param expand (string): OPTIONAL: A comma separated list of properties to expand on the spaces.
Default: Empty
:param start (int): OPTIONAL: The start point of the collection to return. Default: 0.
:param limit (int): OPTIONAL: A limit of the number of spaces to return, this could be restricted by fixed
system limits. Default: 25.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the space endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
"""
params = {}
if space_key:
params["spaceKey"] = space_key
if expand:
params["expand"] = expand
if start is not None:
params["start"] = int(start)
if limit is not None:
params["limit"] = int(limit)
return self._service_get_request("rest/api/space", params=params, callback=callback)
|
Returns information about the spaces present in the Confluence instance.
:param space_key (string): OPTIONAL: A list of space keys to filter on. Default: None.
:param expand (string): OPTIONAL: A comma separated list of properties to expand on the spaces.
Default: Empty
:param start (int): OPTIONAL: The start point of the collection to return. Default: 0.
:param limit (int): OPTIONAL: A limit of the number of spaces to return, this could be restricted by fixed
system limits. Default: 25.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the space endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
|
def ext_pillar(hyper_id, pillar, name, key):
'''
Accept the key for the VM on the hyper, if authorized.
'''
vk = salt.utils.virt.VirtKey(hyper_id, name, __opts__)
ok = vk.accept(key)
pillar['virtkey'] = {name: ok}
return {}
|
Accept the key for the VM on the hyper, if authorized.
|
def default_logging(grab_log=None, # '/tmp/grab.log',
network_log=None, # '/tmp/grab.network.log',
level=logging.DEBUG, mode='a',
propagate_network_logger=False):
"""
Customize logging output to display all log messages
except grab network logs.
Redirect grab network logs into file.
"""
logging.basicConfig(level=level)
network_logger = logging.getLogger('grab.network')
network_logger.propagate = propagate_network_logger
if network_log:
hdl = logging.FileHandler(network_log, mode)
network_logger.addHandler(hdl)
network_logger.setLevel(level)
grab_logger = logging.getLogger('grab')
if grab_log:
hdl = logging.FileHandler(grab_log, mode)
grab_logger.addHandler(hdl)
grab_logger.setLevel(level)
|
Customize logging output to display all log messages
except grab network logs.
Redirect grab network logs into file.
|
def output_xml(self, text):
"""
Output results in JSON format
"""
# Create the main document nodes
document = Element('results')
comment = Comment('Generated by TrueSight Pulse measurement-get CLI')
document.append(comment)
aggregates = SubElement(document, 'aggregates')
aggregate = SubElement(aggregates, 'aggregate')
measurements = SubElement(aggregate, 'measurements')
# Parse the JSON result so we can translate to XML
payload = json.loads(text)
# Current only support a single metric, if we move to the batch API then
# we can handle multiple
metric_name = self._metric_name
# Loop through the aggregates one row per timestamp, and 1 or more source/value pairs
for r in payload['result']['aggregates']['key']:
timestamp = self._format_timestamp(r[0][0])
for s in r[1]:
# Each timestamp, metric, source, values is placed in a measure tag
measure_node = SubElement(measurements, 'measure')
source = s[0]
value = str(s[1])
ts_node = SubElement(measure_node, 'timestamp')
ts_node.text = str(timestamp)
metric_node = SubElement(measure_node, 'metric')
metric_node.text = metric_name
metric_node = SubElement(measure_node, 'aggregate')
metric_node.text = self.aggregate
source_node = SubElement(measure_node, 'source')
source_node.text = source
value_node = SubElement(measure_node, 'value')
value_node.text = value
rough_string = ElementTree.tostring(document, 'utf-8')
reparse = minidom.parseString(rough_string)
output = reparse.toprettyxml(indent=" ")
print(self.colorize_xml(output))
|
Output results in JSON format
|
def make_generic_c_patterns(keywords, builtins,
instance=None, define=None, comment=None):
"Strongly inspired from idlelib.ColorDelegator.make_pat"
kw = r"\b" + any("keyword", keywords.split()) + r"\b"
builtin = r"\b" + any("builtin", builtins.split()+C_TYPES.split()) + r"\b"
if comment is None:
comment = any("comment", [r"//[^\n]*", r"\/\*(.*?)\*\/"])
comment_start = any("comment_start", [r"\/\*"])
comment_end = any("comment_end", [r"\*\/"])
if instance is None:
instance = any("instance", [r"\bthis\b"])
number = any("number",
[r"\b[+-]?[0-9]+[lL]?\b",
r"\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b",
r"\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b"])
sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*'?"
dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*"?'
string = any("string", [sqstring, dqstring])
if define is None:
define = any("define", [r"#[^\n]*"])
return "|".join([instance, kw, comment, string, number,
comment_start, comment_end, builtin,
define, any("SYNC", [r"\n"])])
|
Strongly inspired from idlelib.ColorDelegator.make_pat
|
def memberness(context):
'''The likelihood that the context is a "member".'''
if context:
texts = context.xpath('.//*[local-name()="explicitMember"]/text()').extract()
text = str(texts).lower()
if len(texts) > 1:
return 2
elif 'country' in text:
return 2
elif 'member' not in text:
return 0
elif 'successor' in text:
# 'SuccessorMember' is a rare case that shouldn't be treated as member
return 1
elif 'parent' in text:
return 2
return 3
|
The likelihood that the context is a "member".
|
def _load_model(self):
"""
Loads the peg and the hole models.
"""
super()._load_model()
self.mujoco_robot.set_base_xpos([0, 0, 0])
# Add arena and robot
self.model = MujocoWorldBase()
self.arena = EmptyArena()
if self.use_indicator_object:
self.arena.add_pos_indicator()
self.model.merge(self.arena)
self.model.merge(self.mujoco_robot)
# Load hole object
self.hole_obj = self.hole.get_collision(name="hole", site=True)
self.hole_obj.set("quat", "0 0 0.707 0.707")
self.hole_obj.set("pos", "0.11 0 0.18")
self.model.merge_asset(self.hole)
self.model.worldbody.find(".//body[@name='left_hand']").append(self.hole_obj)
# Load cylinder object
self.cyl_obj = self.cylinder.get_collision(name="cylinder", site=True)
self.cyl_obj.set("pos", "0 0 0.15")
self.model.merge_asset(self.cylinder)
self.model.worldbody.find(".//body[@name='right_hand']").append(self.cyl_obj)
self.model.worldbody.find(".//geom[@name='cylinder']").set("rgba", "0 1 0 1")
|
Loads the peg and the hole models.
|
def set_(device, **kwargs):
'''
Calls out to setquota, for a specific user or group
CLI Example:
.. code-block:: bash
salt '*' quota.set /media/data user=larry block-soft-limit=1048576
salt '*' quota.set /media/data group=painters file-hard-limit=1000
'''
empty = {'block-soft-limit': 0, 'block-hard-limit': 0,
'file-soft-limit': 0, 'file-hard-limit': 0}
current = None
cmd = 'setquota'
if 'user' in kwargs:
cmd += ' -u {0} '.format(kwargs['user'])
parsed = _parse_quota(device, '-u')
if kwargs['user'] in parsed:
current = parsed['Users'][kwargs['user']]
else:
current = empty
ret = 'User: {0}'.format(kwargs['user'])
if 'group' in kwargs:
if 'user' in kwargs:
raise SaltInvocationError(
'Please specify a user or group, not both.'
)
cmd += ' -g {0} '.format(kwargs['group'])
parsed = _parse_quota(device, '-g')
if kwargs['group'] in parsed:
current = parsed['Groups'][kwargs['group']]
else:
current = empty
ret = 'Group: {0}'.format(kwargs['group'])
if not current:
raise CommandExecutionError('A valid user or group was not found')
for limit in ('block-soft-limit', 'block-hard-limit',
'file-soft-limit', 'file-hard-limit'):
if limit in kwargs:
current[limit] = kwargs[limit]
cmd += '{0} {1} {2} {3} {4}'.format(current['block-soft-limit'],
current['block-hard-limit'],
current['file-soft-limit'],
current['file-hard-limit'],
device)
result = __salt__['cmd.run_all'](cmd, python_shell=False)
if result['retcode'] != 0:
raise CommandExecutionError(
'Unable to set desired quota. Error follows: \n{0}'
.format(result['stderr'])
)
return {ret: current}
|
Calls out to setquota, for a specific user or group
CLI Example:
.. code-block:: bash
salt '*' quota.set /media/data user=larry block-soft-limit=1048576
salt '*' quota.set /media/data group=painters file-hard-limit=1000
|
def _decode_argv(self, argv, enc=None):
"""decode argv if bytes, using stin.encoding, falling back on default enc"""
uargv = []
if enc is None:
enc = DEFAULT_ENCODING
for arg in argv:
if not isinstance(arg, unicode):
# only decode if not already decoded
arg = arg.decode(enc)
uargv.append(arg)
return uargv
|
decode argv if bytes, using stin.encoding, falling back on default enc
|
def run(self):
"""Main thread for processing messages."""
self.OnStartup()
try:
while True:
message = self._in_queue.get()
# A message of None is our terminal message.
if message is None:
break
try:
self.HandleMessage(message)
# Catch any errors and keep going here
except Exception as e: # pylint: disable=broad-except
logging.warning("%s", e)
self.SendReply(
rdf_flows.GrrStatus(
status=rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR,
error_message=utils.SmartUnicode(e)),
request_id=message.request_id,
response_id=1,
session_id=message.session_id,
task_id=message.task_id,
message_type=rdf_flows.GrrMessage.Type.STATUS)
if flags.FLAGS.pdb_post_mortem:
pdb.post_mortem()
except Exception as e: # pylint: disable=broad-except
logging.error("Exception outside of the processing loop: %r", e)
finally:
# There's no point in running the client if it's broken out of the
# processing loop and it should be restarted shortly anyway.
logging.fatal("The client has broken out of its processing loop.")
# The binary (Python threading library, perhaps) has proven in tests to be
# very persistent to termination calls, so we kill it with fire.
os.kill(os.getpid(), signal.SIGKILL)
|
Main thread for processing messages.
|
def ExecuteCmd(cmd, quiet=False):
""" Run a command in a shell. """
result = None
if quiet:
with open(os.devnull, "w") as fnull:
result = subprocess.call(cmd, shell=True, stdout=fnull, stderr=fnull)
else:
result = subprocess.call(cmd, shell=True)
return result
|
Run a command in a shell.
|
def tar_add_bytes(tf, filename, bytestring):
""" Add a file to a tar archive
Args:
tf (tarfile.TarFile): tarfile to add the file to
filename (str): path within the tar file
bytestring (bytes or str): file contents. Must be :class:`bytes` or
ascii-encodable :class:`str`
"""
if not isinstance(bytestring, bytes): # it hasn't been encoded yet
bytestring = bytestring.encode('ascii')
buff = io.BytesIO(bytestring)
tarinfo = tarfile.TarInfo(filename)
tarinfo.size = len(bytestring)
tf.addfile(tarinfo, buff)
|
Add a file to a tar archive
Args:
tf (tarfile.TarFile): tarfile to add the file to
filename (str): path within the tar file
bytestring (bytes or str): file contents. Must be :class:`bytes` or
ascii-encodable :class:`str`
|
def perm_by_group_and_perm_name(
cls, resource_id, group_id, perm_name, db_session=None
):
"""
fetch permissions by group and permission name
:param resource_id:
:param group_id:
:param perm_name:
:param db_session:
:return:
"""
db_session = get_db_session(db_session)
query = db_session.query(cls.models_proxy.GroupResourcePermission)
query = query.filter(
cls.models_proxy.GroupResourcePermission.group_id == group_id
)
query = query.filter(
cls.models_proxy.GroupResourcePermission.perm_name == perm_name
)
query = query.filter(
cls.models_proxy.GroupResourcePermission.resource_id == resource_id
)
return query.first()
|
fetch permissions by group and permission name
:param resource_id:
:param group_id:
:param perm_name:
:param db_session:
:return:
|
def add_user_to_user_groups(self, id, **kwargs): # noqa: E501
"""Adds specific user groups to the user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_user_to_user_groups(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body: The list of user groups that should be added to the user
:return: UserModel
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.add_user_to_user_groups_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.add_user_to_user_groups_with_http_info(id, **kwargs) # noqa: E501
return data
|
Adds specific user groups to the user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_user_to_user_groups(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body: The list of user groups that should be added to the user
:return: UserModel
If the method is called asynchronously,
returns the request thread.
|
def getfile2(url, auth=None, outdir=None):
"""Function to fetch files using requests
Works with https authentication
"""
import requests
print("Retrieving: %s" % url)
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if auth is not None:
r = requests.get(url, stream=True, auth=auth)
else:
r = requests.get(url, stream=True)
chunk_size = 1000000
with open(fn, 'wb') as fd:
for chunk in r.iter_content(chunk_size):
fd.write(chunk)
|
Function to fetch files using requests
Works with https authentication
|
def get_resource_url(cls, resource, base_url):
"""
Construct the URL for talking to this resource.
i.e.:
http://myapi.com/api/resource
Note that this is NOT the method for calling individual instances i.e.
http://myapi.com/api/resource/1
Args:
resource: The resource class instance
base_url: The Base URL of this API service.
returns:
resource_url: The URL for this resource
"""
if resource.Meta.resource_name:
url = '{}/{}'.format(base_url, resource.Meta.resource_name)
else:
p = inflect.engine()
plural_name = p.plural(resource.Meta.name.lower())
url = '{}/{}'.format(base_url, plural_name)
return cls._parse_url_and_validate(url)
|
Construct the URL for talking to this resource.
i.e.:
http://myapi.com/api/resource
Note that this is NOT the method for calling individual instances i.e.
http://myapi.com/api/resource/1
Args:
resource: The resource class instance
base_url: The Base URL of this API service.
returns:
resource_url: The URL for this resource
|
def save_loop(filename, framerate=30, time=3.0, axis=np.array([0.,0.,1.]), clf=True, **kwargs):
"""Off-screen save a GIF of one rotation about the scene.
Parameters
----------
filename : str
The filename in which to save the output image (should have extension .gif)
framerate : int
The frame rate at which to animate motion.
time : float
The number of seconds for one rotation.
axis : (3,) float or None
If present, the animation will rotate about the given axis in world coordinates.
Otherwise, the animation will rotate in azimuth.
clf : bool
If true, the Visualizer is cleared after rendering the figure.
kwargs : dict
Other keyword arguments for the SceneViewer instance.
"""
n_frames = framerate * time
az = 2.0 * np.pi / n_frames
Visualizer3D.save(filename, n_frames=n_frames, axis=axis, clf=clf,
animate_rate=framerate, animate_az=az)
if clf:
Visualizer3D.clf()
|
Off-screen save a GIF of one rotation about the scene.
Parameters
----------
filename : str
The filename in which to save the output image (should have extension .gif)
framerate : int
The frame rate at which to animate motion.
time : float
The number of seconds for one rotation.
axis : (3,) float or None
If present, the animation will rotate about the given axis in world coordinates.
Otherwise, the animation will rotate in azimuth.
clf : bool
If true, the Visualizer is cleared after rendering the figure.
kwargs : dict
Other keyword arguments for the SceneViewer instance.
|
def _fire_event(self, event_name, *event_args, **event_kwargs):
"""Execute all the handlers associated with given event.
This method executes all handlers associated with the event
`event_name`. Optional positional and keyword arguments can be used to
pass arguments to **all** handlers added with this event. These
aguments updates arguments passed using :meth:`~ignite.engine.Engine.add_event_handler`.
Args:
event_name: event for which the handlers should be executed. Valid
events are from :class:`~ignite.engine.Events` or any `event_name` added by
:meth:`~ignite.engine.Engine.register_events`.
*event_args: optional args to be passed to all handlers.
**event_kwargs: optional keyword args to be passed to all handlers.
"""
if event_name in self._allowed_events:
self._logger.debug("firing handlers for event %s ", event_name)
for func, args, kwargs in self._event_handlers[event_name]:
kwargs.update(event_kwargs)
func(self, *(event_args + args), **kwargs)
|
Execute all the handlers associated with given event.
This method executes all handlers associated with the event
`event_name`. Optional positional and keyword arguments can be used to
pass arguments to **all** handlers added with this event. These
aguments updates arguments passed using :meth:`~ignite.engine.Engine.add_event_handler`.
Args:
event_name: event for which the handlers should be executed. Valid
events are from :class:`~ignite.engine.Events` or any `event_name` added by
:meth:`~ignite.engine.Engine.register_events`.
*event_args: optional args to be passed to all handlers.
**event_kwargs: optional keyword args to be passed to all handlers.
|
def read(self):
"""Read data from serial port and returns a ``bytearray``."""
data = bytearray()
while True:
incoming_bytes = self.comport.inWaiting()
if incoming_bytes == 0:
break
else:
content = self.comport.read(size=incoming_bytes)
data.extend(bytearray(content))
return data
|
Read data from serial port and returns a ``bytearray``.
|
def wrap(self, starter_cls):
"""
If starter_cls is not a ProcessStarter, assume it's the legacy
preparefunc and return it bound to a CompatStarter.
"""
if isinstance(starter_cls, type) and issubclass(starter_cls, ProcessStarter):
return starter_cls
depr_msg = 'Pass a ProcessStarter for preparefunc'
warnings.warn(depr_msg, DeprecationWarning, stacklevel=3)
return functools.partial(CompatStarter, starter_cls)
|
If starter_cls is not a ProcessStarter, assume it's the legacy
preparefunc and return it bound to a CompatStarter.
|
def data_to_binary(self):
"""
:return: bytes
"""
return bytes([
COMMAND_CODE,
self.channels_to_byte(self.led_on),
self.channels_to_byte(self.led_slow_blinking),
self.channels_to_byte(self.led_fast_blinking)
])
|
:return: bytes
|
def list(self):
"""Get a list of the names of the functions stored in this database."""
return [x["_id"] for x in self._db.system.js.find(projection=["_id"])]
|
Get a list of the names of the functions stored in this database.
|
def get_translations_sorted(codes):
""" Returns a sorted list of (code, translation) tuples for codes """
codes = codes or self.codes
return self._get_priority_translations(priority, codes)
|
Returns a sorted list of (code, translation) tuples for codes
|
def labels(self):
"""All labels present in the match patterns.
RETURNS (set): The string labels.
DOCS: https://spacy.io/api/entityruler#labels
"""
all_labels = set(self.token_patterns.keys())
all_labels.update(self.phrase_patterns.keys())
return tuple(all_labels)
|
All labels present in the match patterns.
RETURNS (set): The string labels.
DOCS: https://spacy.io/api/entityruler#labels
|
def send(self, node_id, request, wakeup=True):
"""Send a request to a specific node. Bytes are placed on an
internal per-connection send-queue. Actual network I/O will be
triggered in a subsequent call to .poll()
Arguments:
node_id (int): destination node
request (Struct): request object (not-encoded)
wakeup (bool): optional flag to disable thread-wakeup
Raises:
AssertionError: if node_id is not in current cluster metadata
Returns:
Future: resolves to Response struct or Error
"""
conn = self._conns.get(node_id)
if not conn or not self._can_send_request(node_id):
self.maybe_connect(node_id, wakeup=wakeup)
return Future().failure(Errors.NodeNotReadyError(node_id))
# conn.send will queue the request internally
# we will need to call send_pending_requests()
# to trigger network I/O
future = conn.send(request, blocking=False)
# Wakeup signal is useful in case another thread is
# blocked waiting for incoming network traffic while holding
# the client lock in poll().
if wakeup:
self.wakeup()
return future
|
Send a request to a specific node. Bytes are placed on an
internal per-connection send-queue. Actual network I/O will be
triggered in a subsequent call to .poll()
Arguments:
node_id (int): destination node
request (Struct): request object (not-encoded)
wakeup (bool): optional flag to disable thread-wakeup
Raises:
AssertionError: if node_id is not in current cluster metadata
Returns:
Future: resolves to Response struct or Error
|
def restoreSettings(self, settings):
"""
Restores the files for this menu from the settings.
:param settings | <QSettings>
"""
value = unwrapVariant(settings.value('recent_files'))
if value:
self.setFilenames(value.split(os.path.pathsep))
|
Restores the files for this menu from the settings.
:param settings | <QSettings>
|
def run(self):
"""Starts the sender."""
# Create the thread pool.
executor = concurrent.futures.ThreadPoolExecutor(
max_workers=self._config['num_workers'])
# Wait to ensure multiple senders can be synchronised.
now = int(datetime.datetime.utcnow().timestamp())
start_time = ((now + 29) // 30) * 30
self._log.info('Waiting until {}'.format(
datetime.datetime.fromtimestamp(start_time)))
while int(datetime.datetime.utcnow().timestamp()) < start_time:
time.sleep(0.1)
# Run the event loop.
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(self._run_loop(executor))
except KeyboardInterrupt:
pass
finally:
# Send the end of stream message to each stream.
self._log.info('Shutting down, closing streams...')
tasks = []
for stream, item_group in self._streams:
tasks.append(stream.async_send_heap(item_group.get_end()))
loop.run_until_complete(asyncio.gather(*tasks))
self._log.info('... finished.')
executor.shutdown()
|
Starts the sender.
|
def plot(self, channel_names, kind='histogram',
gates=None, gate_colors=None, gate_lw=1, **kwargs):
"""Plot the flow cytometry data associated with the sample on the current axis.
To produce the plot, follow up with a call to matplotlib's show() function.
Parameters
----------
{graph_plotFCM_pars}
{FCMeasurement_plot_pars}
{common_plot_ax}
gates : [None, Gate, list of Gate]
Gate must be of type {_gate_available_classes}.
gate_lw: float | iterable
line width to use when drawing gates
if float, uses the same line width for all gates
if iterable, then cycles between the values
kwargs : dict
Additional keyword arguments to be passed to graph.plotFCM
Returns
-------
None : if no data is present
plot_output : output of plot command used to draw (e.g., output of hist)
Examples
--------
>>> sample.plot('Y2-A', bins=100, alpha=0.7, color='green', normed=1) # 1d histogram
>>> sample.plot(['B1-A', 'Y2-A'], cmap=cm.Oranges, colorbar=False) # 2d histogram
"""
ax = kwargs.get('ax')
channel_names = to_list(channel_names)
gates = to_list(gates)
plot_output = graph.plotFCM(self.data, channel_names, kind=kind, **kwargs)
if gates is not None:
if gate_colors is None:
gate_colors = cycle(('b', 'g', 'r', 'm', 'c', 'y'))
if not isinstance(gate_lw, collections.Iterable):
gate_lw = [gate_lw]
gate_lw = cycle(gate_lw)
for (g, c, lw) in zip(gates, gate_colors, gate_lw):
g.plot(ax=ax, ax_channels=channel_names, color=c, lw=lw)
return plot_output
|
Plot the flow cytometry data associated with the sample on the current axis.
To produce the plot, follow up with a call to matplotlib's show() function.
Parameters
----------
{graph_plotFCM_pars}
{FCMeasurement_plot_pars}
{common_plot_ax}
gates : [None, Gate, list of Gate]
Gate must be of type {_gate_available_classes}.
gate_lw: float | iterable
line width to use when drawing gates
if float, uses the same line width for all gates
if iterable, then cycles between the values
kwargs : dict
Additional keyword arguments to be passed to graph.plotFCM
Returns
-------
None : if no data is present
plot_output : output of plot command used to draw (e.g., output of hist)
Examples
--------
>>> sample.plot('Y2-A', bins=100, alpha=0.7, color='green', normed=1) # 1d histogram
>>> sample.plot(['B1-A', 'Y2-A'], cmap=cm.Oranges, colorbar=False) # 2d histogram
|
def get_cursor(cls, cursor_type=_CursorType.PLAIN) -> Cursor:
"""
Yields:
new client-side cursor from existing db connection pool
"""
_cur = None
if cls._use_pool:
_connection_source = yield from cls.get_pool()
else:
_connection_source = yield from aiopg.connect(echo=False, **cls._connection_params)
if cursor_type == _CursorType.PLAIN:
_cur = yield from _connection_source.cursor()
if cursor_type == _CursorType.NAMEDTUPLE:
_cur = yield from _connection_source.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)
if cursor_type == _CursorType.DICT:
_cur = yield from _connection_source.cursor(cursor_factory=psycopg2.extras.DictCursor)
if not cls._use_pool:
_cur = cursor_context_manager(_connection_source, _cur)
return _cur
|
Yields:
new client-side cursor from existing db connection pool
|
def apply_T5(word): # BROKEN
'''If a (V)VVV-sequence contains a VV-sequence that could be an /i/-final
diphthong, there is a syllable boundary between it and the third vowel,
e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an],
[säi.e], [oi.om.me].'''
T5 = ''
WORD = word.split('.')
for i, v in enumerate(WORD):
if contains_VVV(v) and any(i for i in i_DIPHTHONGS if i in v):
I = v.rfind('i') - 1 or 2
I = I + 2 if is_consonant(v[I - 1]) else I
WORD[i] = v[:I] + '.' + v[I:]
T5 = ' T5'
word = '.'.join(WORD)
return word, T5
|
If a (V)VVV-sequence contains a VV-sequence that could be an /i/-final
diphthong, there is a syllable boundary between it and the third vowel,
e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an],
[säi.e], [oi.om.me].
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.