code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def day_display(year, month, all_month_events, day):
"""
Returns the events that occur on the given day.
Works by getting all occurrences for the month, then drilling
down to only those occurring on the given day.
"""
# Get a dict with all of the events for the month
count = CountHandler(year, month, all_month_events).get_count()
pks = [x[1] for x in count[day]] # list of pks for events on given day
# List enables sorting.
# See the comments in EventMonthView in views.py for more info
day_events = list(Event.objects.filter(pk__in=pks).order_by(
'start_date').prefetch_related('cancellations'))
day_events.sort(key=lambda x: x.l_start_date.hour)
return day_events | Returns the events that occur on the given day.
Works by getting all occurrences for the month, then drilling
down to only those occurring on the given day. | Below is the the instruction that describes the task:
### Input:
Returns the events that occur on the given day.
Works by getting all occurrences for the month, then drilling
down to only those occurring on the given day.
### Response:
def day_display(year, month, all_month_events, day):
"""
Returns the events that occur on the given day.
Works by getting all occurrences for the month, then drilling
down to only those occurring on the given day.
"""
# Get a dict with all of the events for the month
count = CountHandler(year, month, all_month_events).get_count()
pks = [x[1] for x in count[day]] # list of pks for events on given day
# List enables sorting.
# See the comments in EventMonthView in views.py for more info
day_events = list(Event.objects.filter(pk__in=pks).order_by(
'start_date').prefetch_related('cancellations'))
day_events.sort(key=lambda x: x.l_start_date.hour)
return day_events |
def reindent(s, numspaces):
""" reinidents a string (s) by the given number of spaces (numspaces) """
leading_space = numspaces * ' '
lines = [leading_space + line.strip()for line in s.splitlines()]
return '\n'.join(lines) | reinidents a string (s) by the given number of spaces (numspaces) | Below is the the instruction that describes the task:
### Input:
reinidents a string (s) by the given number of spaces (numspaces)
### Response:
def reindent(s, numspaces):
""" reinidents a string (s) by the given number of spaces (numspaces) """
leading_space = numspaces * ' '
lines = [leading_space + line.strip()for line in s.splitlines()]
return '\n'.join(lines) |
def update(self, email=None, username=None, first_name=None, last_name=None, country=None):
"""
Update values on an existing user. See the API docs for what kinds of update are possible.
:param email: new email for this user
:param username: new username for this user
:param first_name: new first name for this user
:param last_name: new last name for this user
:param country: new country for this user
:return: the User, so you can do User(...).update(...).add_to_groups(...)
"""
if username and self.id_type != IdentityTypes.federatedID:
raise ArgumentError("You cannot set username except for a federated ID")
if username and '@' in username and not email:
raise ArgumentError("Cannot update email-type username when email is not specified")
if email and username and email.lower() == username.lower():
raise ArgumentError("Specify just email to set both email and username for a federated ID")
updates = {}
for k, v in six.iteritems(dict(email=email, username=username,
firstname=first_name, lastname=last_name,
country=country)):
if v:
updates[k] = v
return self.append(update=updates) | Update values on an existing user. See the API docs for what kinds of update are possible.
:param email: new email for this user
:param username: new username for this user
:param first_name: new first name for this user
:param last_name: new last name for this user
:param country: new country for this user
:return: the User, so you can do User(...).update(...).add_to_groups(...) | Below is the the instruction that describes the task:
### Input:
Update values on an existing user. See the API docs for what kinds of update are possible.
:param email: new email for this user
:param username: new username for this user
:param first_name: new first name for this user
:param last_name: new last name for this user
:param country: new country for this user
:return: the User, so you can do User(...).update(...).add_to_groups(...)
### Response:
def update(self, email=None, username=None, first_name=None, last_name=None, country=None):
"""
Update values on an existing user. See the API docs for what kinds of update are possible.
:param email: new email for this user
:param username: new username for this user
:param first_name: new first name for this user
:param last_name: new last name for this user
:param country: new country for this user
:return: the User, so you can do User(...).update(...).add_to_groups(...)
"""
if username and self.id_type != IdentityTypes.federatedID:
raise ArgumentError("You cannot set username except for a federated ID")
if username and '@' in username and not email:
raise ArgumentError("Cannot update email-type username when email is not specified")
if email and username and email.lower() == username.lower():
raise ArgumentError("Specify just email to set both email and username for a federated ID")
updates = {}
for k, v in six.iteritems(dict(email=email, username=username,
firstname=first_name, lastname=last_name,
country=country)):
if v:
updates[k] = v
return self.append(update=updates) |
def set_url(self, url):
"""Sets the URL referring to a robots.txt file."""
self.url = url
self.host, self.path = urllib.parse.urlparse(url)[1:3] | Sets the URL referring to a robots.txt file. | Below is the the instruction that describes the task:
### Input:
Sets the URL referring to a robots.txt file.
### Response:
def set_url(self, url):
"""Sets the URL referring to a robots.txt file."""
self.url = url
self.host, self.path = urllib.parse.urlparse(url)[1:3] |
def create_assembly(self, did, wid, name='My Assembly'):
'''
Creates a new assembly element in the specified document / workspace.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- name (str, default='My Assembly')
Returns:
- requests.Response: Onshape response data
'''
payload = {
'name': name
}
return self._api.request('post', '/api/assemblies/d/' + did + '/w/' + wid, body=payload) | Creates a new assembly element in the specified document / workspace.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- name (str, default='My Assembly')
Returns:
- requests.Response: Onshape response data | Below is the the instruction that describes the task:
### Input:
Creates a new assembly element in the specified document / workspace.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- name (str, default='My Assembly')
Returns:
- requests.Response: Onshape response data
### Response:
def create_assembly(self, did, wid, name='My Assembly'):
'''
Creates a new assembly element in the specified document / workspace.
Args:
- did (str): Document ID
- wid (str): Workspace ID
- name (str, default='My Assembly')
Returns:
- requests.Response: Onshape response data
'''
payload = {
'name': name
}
return self._api.request('post', '/api/assemblies/d/' + did + '/w/' + wid, body=payload) |
def execute(self, command, args):
""" Event firing and exception conversion around command execution.
Common exceptions are run through our exception handler for
pretty-printing or debugging and then converted to SystemExit so the
interpretor will exit without further ado (or be caught if
interactive). """
self.fire_event('precmd', command, args)
try:
try:
result = command.run_wrap(args)
except BaseException as e:
self.fire_event('postcmd', command, args, exc=e)
raise e
else:
self.fire_event('postcmd', command, args, result=result)
return result
except BrokenPipeError as e:
_vprinterr('<dim><red>...broken pipe...</red></dim>')
raise SystemExit(1) from e
except KeyboardInterrupt as e:
_vprinterr('<dim><red>...interrupted...</red></dim>')
raise SystemExit(1) from e
except SystemExit as e:
if e.args and not isinstance(e.args[0], int):
_vprinterr("<red>%s</red>" % e)
raise SystemExit(1) from e
raise e
except Exception as e:
self.handle_command_error(command, args, e)
raise SystemExit(1) from e | Event firing and exception conversion around command execution.
Common exceptions are run through our exception handler for
pretty-printing or debugging and then converted to SystemExit so the
interpretor will exit without further ado (or be caught if
interactive). | Below is the the instruction that describes the task:
### Input:
Event firing and exception conversion around command execution.
Common exceptions are run through our exception handler for
pretty-printing or debugging and then converted to SystemExit so the
interpretor will exit without further ado (or be caught if
interactive).
### Response:
def execute(self, command, args):
""" Event firing and exception conversion around command execution.
Common exceptions are run through our exception handler for
pretty-printing or debugging and then converted to SystemExit so the
interpretor will exit without further ado (or be caught if
interactive). """
self.fire_event('precmd', command, args)
try:
try:
result = command.run_wrap(args)
except BaseException as e:
self.fire_event('postcmd', command, args, exc=e)
raise e
else:
self.fire_event('postcmd', command, args, result=result)
return result
except BrokenPipeError as e:
_vprinterr('<dim><red>...broken pipe...</red></dim>')
raise SystemExit(1) from e
except KeyboardInterrupt as e:
_vprinterr('<dim><red>...interrupted...</red></dim>')
raise SystemExit(1) from e
except SystemExit as e:
if e.args and not isinstance(e.args[0], int):
_vprinterr("<red>%s</red>" % e)
raise SystemExit(1) from e
raise e
except Exception as e:
self.handle_command_error(command, args, e)
raise SystemExit(1) from e |
def update(self, configuration=values.unset, unique_name=values.unset):
"""
Update the InstalledAddOnInstance
:param dict configuration: The JSON object representing the configuration
:param unicode unique_name: The string that uniquely identifies this Add-on installation
:returns: Updated InstalledAddOnInstance
:rtype: twilio.rest.preview.marketplace.installed_add_on.InstalledAddOnInstance
"""
return self._proxy.update(configuration=configuration, unique_name=unique_name, ) | Update the InstalledAddOnInstance
:param dict configuration: The JSON object representing the configuration
:param unicode unique_name: The string that uniquely identifies this Add-on installation
:returns: Updated InstalledAddOnInstance
:rtype: twilio.rest.preview.marketplace.installed_add_on.InstalledAddOnInstance | Below is the the instruction that describes the task:
### Input:
Update the InstalledAddOnInstance
:param dict configuration: The JSON object representing the configuration
:param unicode unique_name: The string that uniquely identifies this Add-on installation
:returns: Updated InstalledAddOnInstance
:rtype: twilio.rest.preview.marketplace.installed_add_on.InstalledAddOnInstance
### Response:
def update(self, configuration=values.unset, unique_name=values.unset):
"""
Update the InstalledAddOnInstance
:param dict configuration: The JSON object representing the configuration
:param unicode unique_name: The string that uniquely identifies this Add-on installation
:returns: Updated InstalledAddOnInstance
:rtype: twilio.rest.preview.marketplace.installed_add_on.InstalledAddOnInstance
"""
return self._proxy.update(configuration=configuration, unique_name=unique_name, ) |
def parse(self, channel_id, payload):
'''
Parse a header frame for a channel given a Reader payload.
'''
class_id = payload.read_short()
weight = payload.read_short()
size = payload.read_longlong()
properties = {}
# The AMQP spec is overly-complex when it comes to handling header
# frames. The spec says that in addition to the first 16bit field,
# additional ones can follow which /may/ then be in the property list
# (because bit flags aren't in the list). Properly implementing custom
# values requires the ability change the properties and their types,
# which someone is welcome to do, but seriously, what's the point?
# Because the complexity of parsing and writing this frame directly
# impacts the speed at which messages can be processed, there are two
# branches for both a fast parse which assumes no changes to the
# properties and a slow parse. For now it's up to someone using custom
# headers to flip the flag.
if self.DEFAULT_PROPERTIES:
flag_bits = payload.read_short()
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
if flag_bits & mask:
properties[key] = rfunc(payload)
else:
flags = []
while True:
flag_bits = payload.read_short()
flags.append(flag_bits)
if flag_bits & 1 == 0:
break
shift = 0
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
if shift == 0:
if not flags:
break
flag_bits, flags = flags[0], flags[1:]
shift = 15
if flag_bits & (1 << shift):
properties[key] = rfunc(payload)
shift -= 1
return HeaderFrame(channel_id, class_id, weight, size, properties) | Parse a header frame for a channel given a Reader payload. | Below is the the instruction that describes the task:
### Input:
Parse a header frame for a channel given a Reader payload.
### Response:
def parse(self, channel_id, payload):
'''
Parse a header frame for a channel given a Reader payload.
'''
class_id = payload.read_short()
weight = payload.read_short()
size = payload.read_longlong()
properties = {}
# The AMQP spec is overly-complex when it comes to handling header
# frames. The spec says that in addition to the first 16bit field,
# additional ones can follow which /may/ then be in the property list
# (because bit flags aren't in the list). Properly implementing custom
# values requires the ability change the properties and their types,
# which someone is welcome to do, but seriously, what's the point?
# Because the complexity of parsing and writing this frame directly
# impacts the speed at which messages can be processed, there are two
# branches for both a fast parse which assumes no changes to the
# properties and a slow parse. For now it's up to someone using custom
# headers to flip the flag.
if self.DEFAULT_PROPERTIES:
flag_bits = payload.read_short()
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
if flag_bits & mask:
properties[key] = rfunc(payload)
else:
flags = []
while True:
flag_bits = payload.read_short()
flags.append(flag_bits)
if flag_bits & 1 == 0:
break
shift = 0
for key, proptype, rfunc, wfunc, mask in self.PROPERTIES:
if shift == 0:
if not flags:
break
flag_bits, flags = flags[0], flags[1:]
shift = 15
if flag_bits & (1 << shift):
properties[key] = rfunc(payload)
shift -= 1
return HeaderFrame(channel_id, class_id, weight, size, properties) |
def _execute_task(task_id,
verbosity=None,
runmode='run',
sigmode=None,
monitor_interval=5,
resource_monitor_interval=60,
master_runtime={}):
'''A function that execute specified task within a local dictionary
(from SoS env.sos_dict). This function should be self-contained in that
it can be handled by a task manager, be executed locally in a separate
process or remotely on a different machine.'''
# start a monitoring file, which would be killed after the job
# is done (killed etc)
if isinstance(task_id, str):
params, master_runtime = TaskFile(task_id).get_params_and_runtime()
sig_content = TaskFile(task_id).signature
subtask = False
else:
# subtask
subtask = True
task_id, params, sig_content = task_id
if 'TASK' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('TASK', f'Executing subtask {task_id}')
# update local runtime with master runtime
if '_runtime' in master_runtime:
params.sos_dict['_runtime'].update(master_runtime['_runtime'])
if task_id in master_runtime:
params.sos_dict.update(master_runtime[task_id])
if hasattr(params, 'task_stack'):
return _execute_sub_tasks(task_id, params, sig_content, verbosity,
runmode, sigmode, monitor_interval,
resource_monitor_interval, master_runtime)
global_def, task, sos_dict = params.global_def, params.task, params.sos_dict
# task output
env.sos_dict.set(
'__std_out__',
os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.out'))
env.sos_dict.set(
'__std_err__',
os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.err'))
env.logfile = os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.err')
# clear the content of existing .out and .err file if exists, but do not create one if it does not exist
if os.path.exists(env.sos_dict['__std_out__']):
open(env.sos_dict['__std_out__'], 'w').close()
if os.path.exists(env.sos_dict['__std_err__']):
open(env.sos_dict['__std_err__'], 'w').close()
if verbosity is not None:
env.verbosity = verbosity
prepare_env(global_def[0], global_def[1])
if '_runtime' not in sos_dict:
sos_dict['_runtime'] = {}
# pulse thread
if monitor_interval is not None:
m = ProcessMonitor(
task_id,
monitor_interval=monitor_interval,
resource_monitor_interval=resource_monitor_interval,
max_walltime=sos_dict['_runtime'].get('max_walltime', None),
max_mem=sos_dict['_runtime'].get('max_mem', None),
max_procs=sos_dict['_runtime'].get('max_procs', None),
sos_dict=sos_dict)
m.start()
env.config['run_mode'] = runmode
if runmode == 'dryrun':
env.config['sig_mode'] = 'ignore'
elif sigmode is not None:
env.config['sig_mode'] = sigmode
#
(env.logger.debug if subtask else env.logger.info)(f'{task_id} ``started``')
env.sos_dict.quick_update(sos_dict)
for key in [
'step_input', '_input', 'step_output', '_output', 'step_depends',
'_depends'
]:
if key in sos_dict and isinstance(sos_dict[key], sos_targets):
# resolve remote() target
env.sos_dict.set(
key,
sos_dict[key].remove_targets(type=sos_step).resolve_remote())
# when no output is specified, we just treat the task as having no output (determined)
env.sos_dict['_output']._undetermined = False
sig = None if env.config['sig_mode'] == 'ignore' else InMemorySignature(
env.sos_dict['_input'],
env.sos_dict['_output'],
env.sos_dict['_depends'],
env.sos_dict['__signature_vars__'],
shared_vars=parse_shared_vars(env.sos_dict['_runtime'].get(
'shared', None)))
if sig and _validate_task_signature(sig, sig_content.get(task_id, {}),
task_id, subtask):
#env.logger.info(f'{task_id} ``skipped``')
return collect_task_result(
task_id, sos_dict, skipped=True, signature=sig)
# if we are to really execute the task, touch the task file so that sos status shows correct
# execution duration.
if not subtask:
sos_dict['start_time'] = time.time()
try:
# go to 'workdir'
if '_runtime' in sos_dict and 'workdir' in sos_dict['_runtime']:
if not os.path.isdir(
os.path.expanduser(sos_dict['_runtime']['workdir'])):
try:
os.makedirs(
os.path.expanduser(sos_dict['_runtime']['workdir']))
os.chdir(
os.path.expanduser(sos_dict['_runtime']['workdir']))
except Exception as e:
# sometimes it is not possible to go to a "workdir" because of
# file system differences, but this should be ok if a work_dir
# has been specified.
env.logger.debug(
f'Failed to create workdir {sos_dict["_runtime"]["workdir"]}: {e}'
)
else:
os.chdir(os.path.expanduser(sos_dict['_runtime']['workdir']))
#
orig_dir = os.getcwd()
# we will need to check existence of targets because the task might
# be executed on a remote host where the targets are not available.
for target in (sos_dict['_input'] if isinstance(sos_dict['_input'], list) else []) + \
(sos_dict['_depends'] if isinstance(sos_dict['_depends'], list) else []):
# if the file does not exist (although the signature exists)
# request generation of files
if isinstance(target, str):
if not file_target(target).target_exists('target'):
# remove the signature and regenerate the file
raise RuntimeError(f'{target} not found')
# the sos_step target should not be checked in tasks because tasks are
# independently executable units.
elif not isinstance(
target, sos_step) and not target.target_exists('target'):
raise RuntimeError(f'{target} not found')
# create directory. This usually has been done at the step level but the task can be executed
# on a remote host where the directory does not yet exist.
ofiles = env.sos_dict['_output']
if ofiles.valid():
for ofile in ofiles:
parent_dir = ofile.parent
if not parent_dir.is_dir():
parent_dir.mkdir(parents=True, exist_ok=True)
# go to user specified workdir
if '_runtime' in sos_dict and 'workdir' in sos_dict['_runtime']:
if not os.path.isdir(
os.path.expanduser(sos_dict['_runtime']['workdir'])):
try:
os.makedirs(
os.path.expanduser(sos_dict['_runtime']['workdir']))
except Exception as e:
raise RuntimeError(
f'Failed to create workdir {sos_dict["_runtime"]["workdir"]}: {e}'
)
os.chdir(os.path.expanduser(sos_dict['_runtime']['workdir']))
# set environ ...
# we join PATH because the task might be executed on a different machine
if '_runtime' in sos_dict:
if 'env' in sos_dict['_runtime']:
for key, value in sos_dict['_runtime']['env'].items():
if 'PATH' in key and key in os.environ:
new_path = OrderedDict()
for p in value.split(os.pathsep):
new_path[p] = 1
for p in value.split(os.environ[key]):
new_path[p] = 1
os.environ[key] = os.pathsep.join(new_path.keys())
else:
os.environ[key] = value
if 'prepend_path' in sos_dict['_runtime']:
if isinstance(sos_dict['_runtime']['prepend_path'], str):
os.environ['PATH'] = sos_dict['_runtime']['prepend_path'] + \
os.pathsep + os.environ['PATH']
elif isinstance(env.sos_dict['_runtime']['prepend_path'],
Sequence):
os.environ['PATH'] = os.pathsep.join(
sos_dict['_runtime']
['prepend_path']) + os.pathsep + os.environ['PATH']
else:
raise ValueError(
f'Unacceptable input for option prepend_path: {sos_dict["_runtime"]["prepend_path"]}'
)
# step process
SoS_exec(task)
(env.logger.debug
if subtask else env.logger.info)(f'{task_id} ``completed``')
except StopInputGroup as e:
# task ignored with stop_if exception
if not e.keep_output:
clear_output()
env.sos_dict['_output'] = sos_targets([])
if e.message:
env.logger.info(e.message)
return {
'ret_code': 0,
'task': task_id,
'input': sos_targets([]),
'output': env.sos_dict['_output'],
'depends': sos_targets([]),
'shared': {}
}
except KeyboardInterrupt:
env.logger.error(f'{task_id} ``interrupted``')
raise
except subprocess.CalledProcessError as e:
return {
'ret_code': e.returncode,
'task': task_id,
'shared': {},
'exception': RuntimeError(e.stderr)
}
except ProcessKilled:
env.logger.error(f'{task_id} ``interrupted``')
raise
except Exception as e:
msg = get_traceback_msg(e)
# env.logger.error(f'{task_id} ``failed``: {msg}')
with open(
os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.err'),
'a') as err:
err.write(msg + '\n')
return {
'ret_code': 1,
'exception': RuntimeError(msg),
'task': task_id,
'shared': {}
}
finally:
os.chdir(orig_dir)
return collect_task_result(task_id, sos_dict, signature=sig) | A function that execute specified task within a local dictionary
(from SoS env.sos_dict). This function should be self-contained in that
it can be handled by a task manager, be executed locally in a separate
process or remotely on a different machine. | Below is the the instruction that describes the task:
### Input:
A function that execute specified task within a local dictionary
(from SoS env.sos_dict). This function should be self-contained in that
it can be handled by a task manager, be executed locally in a separate
process or remotely on a different machine.
### Response:
def _execute_task(task_id,
verbosity=None,
runmode='run',
sigmode=None,
monitor_interval=5,
resource_monitor_interval=60,
master_runtime={}):
'''A function that execute specified task within a local dictionary
(from SoS env.sos_dict). This function should be self-contained in that
it can be handled by a task manager, be executed locally in a separate
process or remotely on a different machine.'''
# start a monitoring file, which would be killed after the job
# is done (killed etc)
if isinstance(task_id, str):
params, master_runtime = TaskFile(task_id).get_params_and_runtime()
sig_content = TaskFile(task_id).signature
subtask = False
else:
# subtask
subtask = True
task_id, params, sig_content = task_id
if 'TASK' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('TASK', f'Executing subtask {task_id}')
# update local runtime with master runtime
if '_runtime' in master_runtime:
params.sos_dict['_runtime'].update(master_runtime['_runtime'])
if task_id in master_runtime:
params.sos_dict.update(master_runtime[task_id])
if hasattr(params, 'task_stack'):
return _execute_sub_tasks(task_id, params, sig_content, verbosity,
runmode, sigmode, monitor_interval,
resource_monitor_interval, master_runtime)
global_def, task, sos_dict = params.global_def, params.task, params.sos_dict
# task output
env.sos_dict.set(
'__std_out__',
os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.out'))
env.sos_dict.set(
'__std_err__',
os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.err'))
env.logfile = os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.err')
# clear the content of existing .out and .err file if exists, but do not create one if it does not exist
if os.path.exists(env.sos_dict['__std_out__']):
open(env.sos_dict['__std_out__'], 'w').close()
if os.path.exists(env.sos_dict['__std_err__']):
open(env.sos_dict['__std_err__'], 'w').close()
if verbosity is not None:
env.verbosity = verbosity
prepare_env(global_def[0], global_def[1])
if '_runtime' not in sos_dict:
sos_dict['_runtime'] = {}
# pulse thread
if monitor_interval is not None:
m = ProcessMonitor(
task_id,
monitor_interval=monitor_interval,
resource_monitor_interval=resource_monitor_interval,
max_walltime=sos_dict['_runtime'].get('max_walltime', None),
max_mem=sos_dict['_runtime'].get('max_mem', None),
max_procs=sos_dict['_runtime'].get('max_procs', None),
sos_dict=sos_dict)
m.start()
env.config['run_mode'] = runmode
if runmode == 'dryrun':
env.config['sig_mode'] = 'ignore'
elif sigmode is not None:
env.config['sig_mode'] = sigmode
#
(env.logger.debug if subtask else env.logger.info)(f'{task_id} ``started``')
env.sos_dict.quick_update(sos_dict)
for key in [
'step_input', '_input', 'step_output', '_output', 'step_depends',
'_depends'
]:
if key in sos_dict and isinstance(sos_dict[key], sos_targets):
# resolve remote() target
env.sos_dict.set(
key,
sos_dict[key].remove_targets(type=sos_step).resolve_remote())
# when no output is specified, we just treat the task as having no output (determined)
env.sos_dict['_output']._undetermined = False
sig = None if env.config['sig_mode'] == 'ignore' else InMemorySignature(
env.sos_dict['_input'],
env.sos_dict['_output'],
env.sos_dict['_depends'],
env.sos_dict['__signature_vars__'],
shared_vars=parse_shared_vars(env.sos_dict['_runtime'].get(
'shared', None)))
if sig and _validate_task_signature(sig, sig_content.get(task_id, {}),
task_id, subtask):
#env.logger.info(f'{task_id} ``skipped``')
return collect_task_result(
task_id, sos_dict, skipped=True, signature=sig)
# if we are to really execute the task, touch the task file so that sos status shows correct
# execution duration.
if not subtask:
sos_dict['start_time'] = time.time()
try:
# go to 'workdir'
if '_runtime' in sos_dict and 'workdir' in sos_dict['_runtime']:
if not os.path.isdir(
os.path.expanduser(sos_dict['_runtime']['workdir'])):
try:
os.makedirs(
os.path.expanduser(sos_dict['_runtime']['workdir']))
os.chdir(
os.path.expanduser(sos_dict['_runtime']['workdir']))
except Exception as e:
# sometimes it is not possible to go to a "workdir" because of
# file system differences, but this should be ok if a work_dir
# has been specified.
env.logger.debug(
f'Failed to create workdir {sos_dict["_runtime"]["workdir"]}: {e}'
)
else:
os.chdir(os.path.expanduser(sos_dict['_runtime']['workdir']))
#
orig_dir = os.getcwd()
# we will need to check existence of targets because the task might
# be executed on a remote host where the targets are not available.
for target in (sos_dict['_input'] if isinstance(sos_dict['_input'], list) else []) + \
(sos_dict['_depends'] if isinstance(sos_dict['_depends'], list) else []):
# if the file does not exist (although the signature exists)
# request generation of files
if isinstance(target, str):
if not file_target(target).target_exists('target'):
# remove the signature and regenerate the file
raise RuntimeError(f'{target} not found')
# the sos_step target should not be checked in tasks because tasks are
# independently executable units.
elif not isinstance(
target, sos_step) and not target.target_exists('target'):
raise RuntimeError(f'{target} not found')
# create directory. This usually has been done at the step level but the task can be executed
# on a remote host where the directory does not yet exist.
ofiles = env.sos_dict['_output']
if ofiles.valid():
for ofile in ofiles:
parent_dir = ofile.parent
if not parent_dir.is_dir():
parent_dir.mkdir(parents=True, exist_ok=True)
# go to user specified workdir
if '_runtime' in sos_dict and 'workdir' in sos_dict['_runtime']:
if not os.path.isdir(
os.path.expanduser(sos_dict['_runtime']['workdir'])):
try:
os.makedirs(
os.path.expanduser(sos_dict['_runtime']['workdir']))
except Exception as e:
raise RuntimeError(
f'Failed to create workdir {sos_dict["_runtime"]["workdir"]}: {e}'
)
os.chdir(os.path.expanduser(sos_dict['_runtime']['workdir']))
# set environ ...
# we join PATH because the task might be executed on a different machine
if '_runtime' in sos_dict:
if 'env' in sos_dict['_runtime']:
for key, value in sos_dict['_runtime']['env'].items():
if 'PATH' in key and key in os.environ:
new_path = OrderedDict()
for p in value.split(os.pathsep):
new_path[p] = 1
for p in value.split(os.environ[key]):
new_path[p] = 1
os.environ[key] = os.pathsep.join(new_path.keys())
else:
os.environ[key] = value
if 'prepend_path' in sos_dict['_runtime']:
if isinstance(sos_dict['_runtime']['prepend_path'], str):
os.environ['PATH'] = sos_dict['_runtime']['prepend_path'] + \
os.pathsep + os.environ['PATH']
elif isinstance(env.sos_dict['_runtime']['prepend_path'],
Sequence):
os.environ['PATH'] = os.pathsep.join(
sos_dict['_runtime']
['prepend_path']) + os.pathsep + os.environ['PATH']
else:
raise ValueError(
f'Unacceptable input for option prepend_path: {sos_dict["_runtime"]["prepend_path"]}'
)
# step process
SoS_exec(task)
(env.logger.debug
if subtask else env.logger.info)(f'{task_id} ``completed``')
except StopInputGroup as e:
# task ignored with stop_if exception
if not e.keep_output:
clear_output()
env.sos_dict['_output'] = sos_targets([])
if e.message:
env.logger.info(e.message)
return {
'ret_code': 0,
'task': task_id,
'input': sos_targets([]),
'output': env.sos_dict['_output'],
'depends': sos_targets([]),
'shared': {}
}
except KeyboardInterrupt:
env.logger.error(f'{task_id} ``interrupted``')
raise
except subprocess.CalledProcessError as e:
return {
'ret_code': e.returncode,
'task': task_id,
'shared': {},
'exception': RuntimeError(e.stderr)
}
except ProcessKilled:
env.logger.error(f'{task_id} ``interrupted``')
raise
except Exception as e:
msg = get_traceback_msg(e)
# env.logger.error(f'{task_id} ``failed``: {msg}')
with open(
os.path.join(
os.path.expanduser('~'), '.sos', 'tasks', task_id + '.err'),
'a') as err:
err.write(msg + '\n')
return {
'ret_code': 1,
'exception': RuntimeError(msg),
'task': task_id,
'shared': {}
}
finally:
os.chdir(orig_dir)
return collect_task_result(task_id, sos_dict, signature=sig) |
def histogram2d(x, y, bins, range, weights=None):
"""
Compute a 2D histogram assuming equally spaced bins.
Parameters
----------
x, y : `~numpy.ndarray`
The position of the points to bin in the 2D histogram
bins : int or iterable
The number of bins in each dimension. If given as an integer, the same
number of bins is used for each dimension.
range : iterable
The range to use in each dimention, as an iterable of value pairs, i.e.
[(xmin, xmax), (ymin, ymax)]
weights : `~numpy.ndarray`
The weights of the points in the 1D histogram
Returns
-------
array : `~numpy.ndarray`
The 2D histogram array
"""
if isinstance(bins, numbers.Integral):
nx = ny = bins
else:
nx, ny = bins
if not np.isscalar(nx) or not np.isscalar(ny):
raise TypeError('bins should be an iterable of two integers')
(xmin, xmax), (ymin, ymax) = range
if not np.isfinite(xmin):
raise ValueError("xmin should be finite")
if not np.isfinite(xmax):
raise ValueError("xmax should be finite")
if not np.isfinite(ymin):
raise ValueError("ymin should be finite")
if not np.isfinite(ymax):
raise ValueError("ymax should be finite")
if xmax <= xmin:
raise ValueError("xmax should be greater than xmin")
if ymax <= ymin:
raise ValueError("xmax should be greater than xmin")
if nx <= 0:
raise ValueError("nx should be strictly positive")
if ny <= 0:
raise ValueError("ny should be strictly positive")
if weights is None:
return _histogram2d(x, y, nx, xmin, xmax, ny, ymin, ymax)
else:
return _histogram2d_weighted(x, y, weights, nx, xmin, xmax, ny, ymin, ymax) | Compute a 2D histogram assuming equally spaced bins.
Parameters
----------
x, y : `~numpy.ndarray`
The position of the points to bin in the 2D histogram
bins : int or iterable
The number of bins in each dimension. If given as an integer, the same
number of bins is used for each dimension.
range : iterable
The range to use in each dimention, as an iterable of value pairs, i.e.
[(xmin, xmax), (ymin, ymax)]
weights : `~numpy.ndarray`
The weights of the points in the 1D histogram
Returns
-------
array : `~numpy.ndarray`
The 2D histogram array | Below is the the instruction that describes the task:
### Input:
Compute a 2D histogram assuming equally spaced bins.
Parameters
----------
x, y : `~numpy.ndarray`
The position of the points to bin in the 2D histogram
bins : int or iterable
The number of bins in each dimension. If given as an integer, the same
number of bins is used for each dimension.
range : iterable
The range to use in each dimention, as an iterable of value pairs, i.e.
[(xmin, xmax), (ymin, ymax)]
weights : `~numpy.ndarray`
The weights of the points in the 1D histogram
Returns
-------
array : `~numpy.ndarray`
The 2D histogram array
### Response:
def histogram2d(x, y, bins, range, weights=None):
"""
Compute a 2D histogram assuming equally spaced bins.
Parameters
----------
x, y : `~numpy.ndarray`
The position of the points to bin in the 2D histogram
bins : int or iterable
The number of bins in each dimension. If given as an integer, the same
number of bins is used for each dimension.
range : iterable
The range to use in each dimention, as an iterable of value pairs, i.e.
[(xmin, xmax), (ymin, ymax)]
weights : `~numpy.ndarray`
The weights of the points in the 1D histogram
Returns
-------
array : `~numpy.ndarray`
The 2D histogram array
"""
if isinstance(bins, numbers.Integral):
nx = ny = bins
else:
nx, ny = bins
if not np.isscalar(nx) or not np.isscalar(ny):
raise TypeError('bins should be an iterable of two integers')
(xmin, xmax), (ymin, ymax) = range
if not np.isfinite(xmin):
raise ValueError("xmin should be finite")
if not np.isfinite(xmax):
raise ValueError("xmax should be finite")
if not np.isfinite(ymin):
raise ValueError("ymin should be finite")
if not np.isfinite(ymax):
raise ValueError("ymax should be finite")
if xmax <= xmin:
raise ValueError("xmax should be greater than xmin")
if ymax <= ymin:
raise ValueError("xmax should be greater than xmin")
if nx <= 0:
raise ValueError("nx should be strictly positive")
if ny <= 0:
raise ValueError("ny should be strictly positive")
if weights is None:
return _histogram2d(x, y, nx, xmin, xmax, ny, ymin, ymax)
else:
return _histogram2d_weighted(x, y, weights, nx, xmin, xmax, ny, ymin, ymax) |
def release_subnet(self, cidr, direc):
"""Routine to release a subnet from the DB. """
if direc == 'in':
self.service_in_ip.release_subnet(cidr)
else:
self.service_out_ip.release_subnet(cidr) | Routine to release a subnet from the DB. | Below is the the instruction that describes the task:
### Input:
Routine to release a subnet from the DB.
### Response:
def release_subnet(self, cidr, direc):
"""Routine to release a subnet from the DB. """
if direc == 'in':
self.service_in_ip.release_subnet(cidr)
else:
self.service_out_ip.release_subnet(cidr) |
def evaluate(self, s, value, insert=None):
"""Expression evaluator.
* For expressions, returns the value of the expression.
* For Blocks, returns a generator (or the empty list []).
"""
assert not isinstance(value, kurt.Script)
if insert and insert.unevaluated:
return value
if isinstance(value, kurt.Block):
if value.type.shape == "hat":
return []
if value.type not in self.COMMANDS:
if getattr(value.type, '_workaround', None):
value = value.type._workaround(value)
if not value:
raise kurt.BlockNotSupported(value.type)
else:
raise kurt.BlockNotSupported(value.type)
f = self.COMMANDS[value.type]
args = [self.evaluate(s, arg, arg_insert)
for (arg, arg_insert)
in zip(list(value.args), value.type.inserts)]
value = f(s, *args)
def flatten_generators(gen):
for item in gen:
if inspect.isgenerator(item):
for x in flatten_generators(item):
yield x
else:
yield item
if inspect.isgenerator(value):
value = flatten_generators(value)
if value is None:
value = []
if insert:
if isinstance(value, basestring):
value = unicode(value)
if insert.shape in ("number", "number-menu", "string"):
try:
value = float(value)
except (TypeError, ValueError):
if insert.shape == "number":
value = 0
if isinstance(value, float) and value == int(value):
value = int(value)
if insert.kind in ("spriteOrStage", "spriteOrMouse", "stageOrThis",
"spriteOnly", "touching"):
if value not in ("mouse-pointer", "edge"):
value = (self.project.stage if value == "Stage"
else self.project.get_sprite(value))
elif insert.kind == "var":
if value in s.variables:
value = s.variables[value]
else:
value = s.project.variables[value]
elif insert.kind == "list":
if value in s.lists:
value = s.lists[value]
else:
value = s.project.lists[value]
elif insert.kind == "sound":
for sound in s.sounds:
if sound.name == value:
value = sound
break
return value | Expression evaluator.
* For expressions, returns the value of the expression.
* For Blocks, returns a generator (or the empty list []). | Below is the the instruction that describes the task:
### Input:
Expression evaluator.
* For expressions, returns the value of the expression.
* For Blocks, returns a generator (or the empty list []).
### Response:
def evaluate(self, s, value, insert=None):
"""Expression evaluator.
* For expressions, returns the value of the expression.
* For Blocks, returns a generator (or the empty list []).
"""
assert not isinstance(value, kurt.Script)
if insert and insert.unevaluated:
return value
if isinstance(value, kurt.Block):
if value.type.shape == "hat":
return []
if value.type not in self.COMMANDS:
if getattr(value.type, '_workaround', None):
value = value.type._workaround(value)
if not value:
raise kurt.BlockNotSupported(value.type)
else:
raise kurt.BlockNotSupported(value.type)
f = self.COMMANDS[value.type]
args = [self.evaluate(s, arg, arg_insert)
for (arg, arg_insert)
in zip(list(value.args), value.type.inserts)]
value = f(s, *args)
def flatten_generators(gen):
for item in gen:
if inspect.isgenerator(item):
for x in flatten_generators(item):
yield x
else:
yield item
if inspect.isgenerator(value):
value = flatten_generators(value)
if value is None:
value = []
if insert:
if isinstance(value, basestring):
value = unicode(value)
if insert.shape in ("number", "number-menu", "string"):
try:
value = float(value)
except (TypeError, ValueError):
if insert.shape == "number":
value = 0
if isinstance(value, float) and value == int(value):
value = int(value)
if insert.kind in ("spriteOrStage", "spriteOrMouse", "stageOrThis",
"spriteOnly", "touching"):
if value not in ("mouse-pointer", "edge"):
value = (self.project.stage if value == "Stage"
else self.project.get_sprite(value))
elif insert.kind == "var":
if value in s.variables:
value = s.variables[value]
else:
value = s.project.variables[value]
elif insert.kind == "list":
if value in s.lists:
value = s.lists[value]
else:
value = s.project.lists[value]
elif insert.kind == "sound":
for sound in s.sounds:
if sound.name == value:
value = sound
break
return value |
def replace_grid(self, updated_grid):
"""
replace all cells in current grid with updated grid
"""
for col in range(self.get_grid_width()):
for row in range(self.get_grid_height()):
if updated_grid[row][col] == EMPTY:
self.set_empty(row, col)
else:
self.set_full(row, col) | replace all cells in current grid with updated grid | Below is the the instruction that describes the task:
### Input:
replace all cells in current grid with updated grid
### Response:
def replace_grid(self, updated_grid):
"""
replace all cells in current grid with updated grid
"""
for col in range(self.get_grid_width()):
for row in range(self.get_grid_height()):
if updated_grid[row][col] == EMPTY:
self.set_empty(row, col)
else:
self.set_full(row, col) |
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7:8] in b'\x7F\xFF')
and (double_bytes[6:7] >= b'\xF0')
and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode) | Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number. | Below is the the instruction that describes the task:
### Input:
Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
### Response:
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7:8] in b'\x7F\xFF')
and (double_bytes[6:7] >= b'\xF0')
and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode) |
def mean_return_by_quantile(factor_data,
by_date=False,
by_group=False,
demeaned=True,
group_adjust=False):
"""
Computes mean returns for factor quantiles across
provided forward returns columns.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
by_date : bool
If True, compute quantile bucket returns separately for each date.
by_group : bool
If True, compute quantile bucket returns separately for each group.
demeaned : bool
Compute demeaned mean returns (long short portfolio)
group_adjust : bool
Returns demeaning will occur on the group level.
Returns
-------
mean_ret : pd.DataFrame
Mean period wise returns by specified factor quantile.
std_error_ret : pd.DataFrame
Standard error of returns by specified quantile.
"""
if group_adjust:
grouper = [factor_data.index.get_level_values('date')] + ['group']
factor_data = utils.demean_forward_returns(factor_data, grouper)
elif demeaned:
factor_data = utils.demean_forward_returns(factor_data)
else:
factor_data = factor_data.copy()
grouper = ['factor_quantile', factor_data.index.get_level_values('date')]
if by_group:
grouper.append('group')
group_stats = factor_data.groupby(grouper)[
utils.get_forward_returns_columns(factor_data.columns)] \
.agg(['mean', 'std', 'count'])
mean_ret = group_stats.T.xs('mean', level=1).T
if not by_date:
grouper = [mean_ret.index.get_level_values('factor_quantile')]
if by_group:
grouper.append(mean_ret.index.get_level_values('group'))
group_stats = mean_ret.groupby(grouper)\
.agg(['mean', 'std', 'count'])
mean_ret = group_stats.T.xs('mean', level=1).T
std_error_ret = group_stats.T.xs('std', level=1).T \
/ np.sqrt(group_stats.T.xs('count', level=1).T)
return mean_ret, std_error_ret | Computes mean returns for factor quantiles across
provided forward returns columns.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
by_date : bool
If True, compute quantile bucket returns separately for each date.
by_group : bool
If True, compute quantile bucket returns separately for each group.
demeaned : bool
Compute demeaned mean returns (long short portfolio)
group_adjust : bool
Returns demeaning will occur on the group level.
Returns
-------
mean_ret : pd.DataFrame
Mean period wise returns by specified factor quantile.
std_error_ret : pd.DataFrame
Standard error of returns by specified quantile. | Below is the the instruction that describes the task:
### Input:
Computes mean returns for factor quantiles across
provided forward returns columns.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
by_date : bool
If True, compute quantile bucket returns separately for each date.
by_group : bool
If True, compute quantile bucket returns separately for each group.
demeaned : bool
Compute demeaned mean returns (long short portfolio)
group_adjust : bool
Returns demeaning will occur on the group level.
Returns
-------
mean_ret : pd.DataFrame
Mean period wise returns by specified factor quantile.
std_error_ret : pd.DataFrame
Standard error of returns by specified quantile.
### Response:
def mean_return_by_quantile(factor_data,
by_date=False,
by_group=False,
demeaned=True,
group_adjust=False):
"""
Computes mean returns for factor quantiles across
provided forward returns columns.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for
each period, the factor quantile/bin that factor value belongs to, and
(optionally) the group the asset belongs to.
- See full explanation in utils.get_clean_factor_and_forward_returns
by_date : bool
If True, compute quantile bucket returns separately for each date.
by_group : bool
If True, compute quantile bucket returns separately for each group.
demeaned : bool
Compute demeaned mean returns (long short portfolio)
group_adjust : bool
Returns demeaning will occur on the group level.
Returns
-------
mean_ret : pd.DataFrame
Mean period wise returns by specified factor quantile.
std_error_ret : pd.DataFrame
Standard error of returns by specified quantile.
"""
if group_adjust:
grouper = [factor_data.index.get_level_values('date')] + ['group']
factor_data = utils.demean_forward_returns(factor_data, grouper)
elif demeaned:
factor_data = utils.demean_forward_returns(factor_data)
else:
factor_data = factor_data.copy()
grouper = ['factor_quantile', factor_data.index.get_level_values('date')]
if by_group:
grouper.append('group')
group_stats = factor_data.groupby(grouper)[
utils.get_forward_returns_columns(factor_data.columns)] \
.agg(['mean', 'std', 'count'])
mean_ret = group_stats.T.xs('mean', level=1).T
if not by_date:
grouper = [mean_ret.index.get_level_values('factor_quantile')]
if by_group:
grouper.append(mean_ret.index.get_level_values('group'))
group_stats = mean_ret.groupby(grouper)\
.agg(['mean', 'std', 'count'])
mean_ret = group_stats.T.xs('mean', level=1).T
std_error_ret = group_stats.T.xs('std', level=1).T \
/ np.sqrt(group_stats.T.xs('count', level=1).T)
return mean_ret, std_error_ret |
def metadata(sceneid, pmin=2, pmax=98, **kwargs):
"""
Retrieve image bounds and band statistics.
Attributes
----------
sceneid : str
Landsat sceneid. For scenes after May 2017,
sceneid have to be LANDSAT_PRODUCT_ID.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
kwargs : optional
These are passed to 'rio_tiler.landsat8._landsat_stats'
e.g: histogram_bins=20, dst_crs='epsg:4326'
Returns
-------
out : dict
Dictionary with bounds and bands statistics.
"""
scene_params = _landsat_parse_scene_id(sceneid)
meta_data = _landsat_get_mtl(sceneid).get("L1_METADATA_FILE")
path_prefix = "{}/{}".format(LANDSAT_BUCKET, scene_params["key"])
info = {"sceneid": sceneid}
_stats_worker = partial(
_landsat_stats,
address_prefix=path_prefix,
metadata=meta_data,
overview_level=1,
percentiles=(pmin, pmax),
**kwargs
)
with futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
responses = list(executor.map(_stats_worker, LANDSAT_BANDS))
info["bounds"] = [
r["bounds"] for b, r in zip(LANDSAT_BANDS, responses) if b == "8"
][0]
info["statistics"] = {
b: v
for b, d in zip(LANDSAT_BANDS, responses)
for k, v in d["statistics"].items()
}
return info | Retrieve image bounds and band statistics.
Attributes
----------
sceneid : str
Landsat sceneid. For scenes after May 2017,
sceneid have to be LANDSAT_PRODUCT_ID.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
kwargs : optional
These are passed to 'rio_tiler.landsat8._landsat_stats'
e.g: histogram_bins=20, dst_crs='epsg:4326'
Returns
-------
out : dict
Dictionary with bounds and bands statistics. | Below is the the instruction that describes the task:
### Input:
Retrieve image bounds and band statistics.
Attributes
----------
sceneid : str
Landsat sceneid. For scenes after May 2017,
sceneid have to be LANDSAT_PRODUCT_ID.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
kwargs : optional
These are passed to 'rio_tiler.landsat8._landsat_stats'
e.g: histogram_bins=20, dst_crs='epsg:4326'
Returns
-------
out : dict
Dictionary with bounds and bands statistics.
### Response:
def metadata(sceneid, pmin=2, pmax=98, **kwargs):
"""
Retrieve image bounds and band statistics.
Attributes
----------
sceneid : str
Landsat sceneid. For scenes after May 2017,
sceneid have to be LANDSAT_PRODUCT_ID.
pmin : int, optional, (default: 2)
Histogram minimum cut.
pmax : int, optional, (default: 98)
Histogram maximum cut.
kwargs : optional
These are passed to 'rio_tiler.landsat8._landsat_stats'
e.g: histogram_bins=20, dst_crs='epsg:4326'
Returns
-------
out : dict
Dictionary with bounds and bands statistics.
"""
scene_params = _landsat_parse_scene_id(sceneid)
meta_data = _landsat_get_mtl(sceneid).get("L1_METADATA_FILE")
path_prefix = "{}/{}".format(LANDSAT_BUCKET, scene_params["key"])
info = {"sceneid": sceneid}
_stats_worker = partial(
_landsat_stats,
address_prefix=path_prefix,
metadata=meta_data,
overview_level=1,
percentiles=(pmin, pmax),
**kwargs
)
with futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:
responses = list(executor.map(_stats_worker, LANDSAT_BANDS))
info["bounds"] = [
r["bounds"] for b, r in zip(LANDSAT_BANDS, responses) if b == "8"
][0]
info["statistics"] = {
b: v
for b, d in zip(LANDSAT_BANDS, responses)
for k, v in d["statistics"].items()
}
return info |
def remove_phenotype(self, ind_obj, phenotypes=None):
"""Remove multiple phenotypes from an individual."""
if phenotypes is None:
logger.info("delete all phenotypes related to %s", ind_obj.ind_id)
self.query(PhenotypeTerm).filter_by(ind_id=ind_obj.id).delete()
else:
for term in ind_obj.phenotypes:
if term.phenotype_id in phenotypes:
logger.info("delete phenotype: %s from %s",
term.phenotype_id, ind_obj.ind_id)
self.session.delete(term)
logger.debug('persist removals')
self.save()
for case_obj in ind_obj.cases:
self.update_hpolist(case_obj) | Remove multiple phenotypes from an individual. | Below is the the instruction that describes the task:
### Input:
Remove multiple phenotypes from an individual.
### Response:
def remove_phenotype(self, ind_obj, phenotypes=None):
"""Remove multiple phenotypes from an individual."""
if phenotypes is None:
logger.info("delete all phenotypes related to %s", ind_obj.ind_id)
self.query(PhenotypeTerm).filter_by(ind_id=ind_obj.id).delete()
else:
for term in ind_obj.phenotypes:
if term.phenotype_id in phenotypes:
logger.info("delete phenotype: %s from %s",
term.phenotype_id, ind_obj.ind_id)
self.session.delete(term)
logger.debug('persist removals')
self.save()
for case_obj in ind_obj.cases:
self.update_hpolist(case_obj) |
def get_jwt_data_from_app_context():
"""
Fetches a dict of jwt token data from the top of the flask app's context
"""
ctx = flask._app_ctx_stack.top
jwt_data = getattr(ctx, 'jwt_data', None)
PraetorianError.require_condition(
jwt_data is not None,
"""
No jwt_data found in app context.
Make sure @auth_required decorator is specified *first* for route
""",
)
return jwt_data | Fetches a dict of jwt token data from the top of the flask app's context | Below is the the instruction that describes the task:
### Input:
Fetches a dict of jwt token data from the top of the flask app's context
### Response:
def get_jwt_data_from_app_context():
"""
Fetches a dict of jwt token data from the top of the flask app's context
"""
ctx = flask._app_ctx_stack.top
jwt_data = getattr(ctx, 'jwt_data', None)
PraetorianError.require_condition(
jwt_data is not None,
"""
No jwt_data found in app context.
Make sure @auth_required decorator is specified *first* for route
""",
)
return jwt_data |
def enqueue_mod(self, dn, mod):
"""Enqueue a LDAP modification.
Arguments:
dn -- the distinguished name of the object to modify
mod -- an ldap modfication entry to enqueue
"""
# mark for update
if dn not in self.__pending_mod_dn__:
self.__pending_mod_dn__.append(dn)
self.__mod_queue__[dn] = []
self.__mod_queue__[dn].append(mod) | Enqueue a LDAP modification.
Arguments:
dn -- the distinguished name of the object to modify
mod -- an ldap modfication entry to enqueue | Below is the the instruction that describes the task:
### Input:
Enqueue a LDAP modification.
Arguments:
dn -- the distinguished name of the object to modify
mod -- an ldap modfication entry to enqueue
### Response:
def enqueue_mod(self, dn, mod):
"""Enqueue a LDAP modification.
Arguments:
dn -- the distinguished name of the object to modify
mod -- an ldap modfication entry to enqueue
"""
# mark for update
if dn not in self.__pending_mod_dn__:
self.__pending_mod_dn__.append(dn)
self.__mod_queue__[dn] = []
self.__mod_queue__[dn].append(mod) |
def add_output(self, out_name, type_or_serialize=None, **kwargs):
""" Declare an output
"""
if out_name not in self.engine.all_outputs():
raise ValueError("'%s' is not generated by the engine %s" % (out_name, self.engine.all_outputs()))
if type_or_serialize is None:
type_or_serialize = GenericType()
if not isinstance(type_or_serialize, GenericType) and callable(type_or_serialize):
type_or_serialize = GenericType(serialize=type_or_serialize)
elif not isinstance(type_or_serialize, GenericType):
raise ValueError("the given 'type_or_serialize' is invalid")
# register outpurs
self._outputs[out_name] = {
'serializer': type_or_serialize,
'parameters': kwargs if kwargs else {}
} | Declare an output | Below is the the instruction that describes the task:
### Input:
Declare an output
### Response:
def add_output(self, out_name, type_or_serialize=None, **kwargs):
""" Declare an output
"""
if out_name not in self.engine.all_outputs():
raise ValueError("'%s' is not generated by the engine %s" % (out_name, self.engine.all_outputs()))
if type_or_serialize is None:
type_or_serialize = GenericType()
if not isinstance(type_or_serialize, GenericType) and callable(type_or_serialize):
type_or_serialize = GenericType(serialize=type_or_serialize)
elif not isinstance(type_or_serialize, GenericType):
raise ValueError("the given 'type_or_serialize' is invalid")
# register outpurs
self._outputs[out_name] = {
'serializer': type_or_serialize,
'parameters': kwargs if kwargs else {}
} |
def open_args(subparsers):
"""
The `mp open` command will open a resource with the system application, such as Excel or OpenOffice
"""
parser = subparsers.add_parser(
'open',
help='open a CSV resoruce with a system application',
description=open_args.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(run_command=open_cmd)
parser.add_argument('metatabfile', nargs='?',
help="Path or URL to a metatab file. If not provided, defaults to 'metadata.csv' ")
return parser | The `mp open` command will open a resource with the system application, such as Excel or OpenOffice | Below is the the instruction that describes the task:
### Input:
The `mp open` command will open a resource with the system application, such as Excel or OpenOffice
### Response:
def open_args(subparsers):
"""
The `mp open` command will open a resource with the system application, such as Excel or OpenOffice
"""
parser = subparsers.add_parser(
'open',
help='open a CSV resoruce with a system application',
description=open_args.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.set_defaults(run_command=open_cmd)
parser.add_argument('metatabfile', nargs='?',
help="Path or URL to a metatab file. If not provided, defaults to 'metadata.csv' ")
return parser |
def height(self, value):
"""
Set the height of the vowel.
:param str value: the value to be set
"""
if (value is not None) and (not value in DG_V_HEIGHT):
raise ValueError("Unrecognized value for height: '%s'" % value)
self.__height = value | Set the height of the vowel.
:param str value: the value to be set | Below is the the instruction that describes the task:
### Input:
Set the height of the vowel.
:param str value: the value to be set
### Response:
def height(self, value):
"""
Set the height of the vowel.
:param str value: the value to be set
"""
if (value is not None) and (not value in DG_V_HEIGHT):
raise ValueError("Unrecognized value for height: '%s'" % value)
self.__height = value |
def regenerate(location='http://www.iana.org/assignments/language-subtag-registry',
filename=None, default_encoding='utf-8'):
"""
Generate the languages Python module.
"""
paren = re.compile('\([^)]*\)')
# Get the language list.
data = urllib2.urlopen(location)
if ('content-type' in data.headers and
'charset=' in data.headers['content-type']):
encoding = data.headers['content-type'].split('charset=')[-1]
else:
encoding = default_encoding
content = data.read().decode(encoding)
languages = []
info = {}
p = None
for line in content.splitlines():
if line == '%%':
if 'Type' in info and info['Type'] == 'language':
languages.append(info)
info = {}
elif ':' not in line and p:
info[p[0]] = paren.sub('', p[2]+line).strip()
else:
p = line.partition(':')
if not p[0] in info: # Keep the first description as it should be the most common
info[p[0]] = paren.sub('', p[2]).strip()
languages_lines = map(lambda x:'("%s", _(u"%s")),'%(x['Subtag'],x['Description']), languages)
# Generate and save the file.
if not filename:
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'languages.py')
# TODO: first make a backup of the file if it exists already.
f = codecs.open(filename, 'w', 'utf-8')
f.write(TEMPLATE % {
'languages': '\n '.join(languages_lines),
})
f.close() | Generate the languages Python module. | Below is the the instruction that describes the task:
### Input:
Generate the languages Python module.
### Response:
def regenerate(location='http://www.iana.org/assignments/language-subtag-registry',
filename=None, default_encoding='utf-8'):
"""
Generate the languages Python module.
"""
paren = re.compile('\([^)]*\)')
# Get the language list.
data = urllib2.urlopen(location)
if ('content-type' in data.headers and
'charset=' in data.headers['content-type']):
encoding = data.headers['content-type'].split('charset=')[-1]
else:
encoding = default_encoding
content = data.read().decode(encoding)
languages = []
info = {}
p = None
for line in content.splitlines():
if line == '%%':
if 'Type' in info and info['Type'] == 'language':
languages.append(info)
info = {}
elif ':' not in line and p:
info[p[0]] = paren.sub('', p[2]+line).strip()
else:
p = line.partition(':')
if not p[0] in info: # Keep the first description as it should be the most common
info[p[0]] = paren.sub('', p[2]).strip()
languages_lines = map(lambda x:'("%s", _(u"%s")),'%(x['Subtag'],x['Description']), languages)
# Generate and save the file.
if not filename:
filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'languages.py')
# TODO: first make a backup of the file if it exists already.
f = codecs.open(filename, 'w', 'utf-8')
f.write(TEMPLATE % {
'languages': '\n '.join(languages_lines),
})
f.close() |
def find(self, node_label):
"""Finds the set containing the node_label.
Returns the set label.
"""
queue = []
current_node = node_label
while self.__forest[current_node] >= 0:
queue.append(current_node)
current_node = self.__forest[current_node]
root_node = current_node
# Path compression
for n in queue:
self.__forest[n] = root_node
return root_node | Finds the set containing the node_label.
Returns the set label. | Below is the the instruction that describes the task:
### Input:
Finds the set containing the node_label.
Returns the set label.
### Response:
def find(self, node_label):
"""Finds the set containing the node_label.
Returns the set label.
"""
queue = []
current_node = node_label
while self.__forest[current_node] >= 0:
queue.append(current_node)
current_node = self.__forest[current_node]
root_node = current_node
# Path compression
for n in queue:
self.__forest[n] = root_node
return root_node |
def wait_until_element_value_is(self, locator, expected, strip=False, timeout=None):
"""Waits until the element identified by `locator` value is exactly the
expected value. You might want to use `Element Value Should Be` instead.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | boolean, determines whether it should strip the value of the field before comparison | ${True} / ${False} |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |"""
self._info("Waiting for '%s' value to be '%s'" % (locator, expected))
self._wait_until_no_error(timeout, self._check_element_value_exp, False, locator, expected, strip, timeout) | Waits until the element identified by `locator` value is exactly the
expected value. You might want to use `Element Value Should Be` instead.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | boolean, determines whether it should strip the value of the field before comparison | ${True} / ${False} |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s | | Below is the the instruction that describes the task:
### Input:
Waits until the element identified by `locator` value is exactly the
expected value. You might want to use `Element Value Should Be` instead.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | boolean, determines whether it should strip the value of the field before comparison | ${True} / ${False} |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |
### Response:
def wait_until_element_value_is(self, locator, expected, strip=False, timeout=None):
"""Waits until the element identified by `locator` value is exactly the
expected value. You might want to use `Element Value Should Be` instead.
| *Argument* | *Description* | *Example* |
| locator | Selenium 2 element locator | id=my_id |
| expected | expected value | My Name Is Slim Shady |
| strip | boolean, determines whether it should strip the value of the field before comparison | ${True} / ${False} |
| timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |"""
self._info("Waiting for '%s' value to be '%s'" % (locator, expected))
self._wait_until_no_error(timeout, self._check_element_value_exp, False, locator, expected, strip, timeout) |
def determine_file_type(self, z):
"""Determine file type."""
mimetype = z.read('mimetype').decode('utf-8').strip()
self.type = MIMEMAP[mimetype] | Determine file type. | Below is the the instruction that describes the task:
### Input:
Determine file type.
### Response:
def determine_file_type(self, z):
"""Determine file type."""
mimetype = z.read('mimetype').decode('utf-8').strip()
self.type = MIMEMAP[mimetype] |
def joint(letters, marks):
""" joint the letters with the marks
the length ot letters and marks must be equal
return word
@param letters: the word letters
@type letters: unicode
@param marks: the word marks
@type marks: unicode
@return: word
@rtype: unicode
"""
# The length ot letters and marks must be equal
if len(letters) != len(marks):
return ""
stack_letter = stack.Stack(letters)
stack_letter.items.reverse()
stack_mark = stack.Stack(marks)
stack_mark.items.reverse()
word_stack = stack.Stack()
last_letter = stack_letter.pop()
last_mark = stack_mark.pop()
vowels = HARAKAT
while last_letter != None and last_mark != None:
if last_letter == SHADDA:
top = word_stack.pop()
if top not in vowels:
word_stack.push(top)
word_stack.push(last_letter)
if last_mark != NOT_DEF_HARAKA:
word_stack.push(last_mark)
else:
word_stack.push(last_letter)
if last_mark != NOT_DEF_HARAKA:
word_stack.push(last_mark)
last_letter = stack_letter.pop()
last_mark = stack_mark.pop()
if not (stack_letter.is_empty() and stack_mark.is_empty()):
return False
else:
return ''.join(word_stack.items) | joint the letters with the marks
the length ot letters and marks must be equal
return word
@param letters: the word letters
@type letters: unicode
@param marks: the word marks
@type marks: unicode
@return: word
@rtype: unicode | Below is the the instruction that describes the task:
### Input:
joint the letters with the marks
the length ot letters and marks must be equal
return word
@param letters: the word letters
@type letters: unicode
@param marks: the word marks
@type marks: unicode
@return: word
@rtype: unicode
### Response:
def joint(letters, marks):
""" joint the letters with the marks
the length ot letters and marks must be equal
return word
@param letters: the word letters
@type letters: unicode
@param marks: the word marks
@type marks: unicode
@return: word
@rtype: unicode
"""
# The length ot letters and marks must be equal
if len(letters) != len(marks):
return ""
stack_letter = stack.Stack(letters)
stack_letter.items.reverse()
stack_mark = stack.Stack(marks)
stack_mark.items.reverse()
word_stack = stack.Stack()
last_letter = stack_letter.pop()
last_mark = stack_mark.pop()
vowels = HARAKAT
while last_letter != None and last_mark != None:
if last_letter == SHADDA:
top = word_stack.pop()
if top not in vowels:
word_stack.push(top)
word_stack.push(last_letter)
if last_mark != NOT_DEF_HARAKA:
word_stack.push(last_mark)
else:
word_stack.push(last_letter)
if last_mark != NOT_DEF_HARAKA:
word_stack.push(last_mark)
last_letter = stack_letter.pop()
last_mark = stack_mark.pop()
if not (stack_letter.is_empty() and stack_mark.is_empty()):
return False
else:
return ''.join(word_stack.items) |
def with_prefix(self, prefix, strict=False):
"""
decorator to handle commands with prefixes
Parameters
----------
prefix : str
the prefix of the command
strict : bool, optional
If set to True the command must be at the beginning
of the message. Defaults to False.
Returns
-------
function
a decorator that returns an :class:`EventHandler` instance
"""
def decorated(func):
return EventHandler(func=func, event=self.event,
prefix=prefix, strict=strict)
return decorated | decorator to handle commands with prefixes
Parameters
----------
prefix : str
the prefix of the command
strict : bool, optional
If set to True the command must be at the beginning
of the message. Defaults to False.
Returns
-------
function
a decorator that returns an :class:`EventHandler` instance | Below is the the instruction that describes the task:
### Input:
decorator to handle commands with prefixes
Parameters
----------
prefix : str
the prefix of the command
strict : bool, optional
If set to True the command must be at the beginning
of the message. Defaults to False.
Returns
-------
function
a decorator that returns an :class:`EventHandler` instance
### Response:
def with_prefix(self, prefix, strict=False):
"""
decorator to handle commands with prefixes
Parameters
----------
prefix : str
the prefix of the command
strict : bool, optional
If set to True the command must be at the beginning
of the message. Defaults to False.
Returns
-------
function
a decorator that returns an :class:`EventHandler` instance
"""
def decorated(func):
return EventHandler(func=func, event=self.event,
prefix=prefix, strict=strict)
return decorated |
def get_instance(self, payload):
"""
Build an instance of TaskQueueCumulativeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsInstance
"""
return TaskQueueCumulativeStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_queue_sid=self._solution['task_queue_sid'],
) | Build an instance of TaskQueueCumulativeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of TaskQueueCumulativeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of TaskQueueCumulativeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsInstance
"""
return TaskQueueCumulativeStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_queue_sid=self._solution['task_queue_sid'],
) |
def is_standalone(text, start, end):
"""check if the string text[start:end] is standalone by checking forwards
and backwards for blankspaces
:text: TODO
:(start, end): TODO
:returns: the start of next index after text[start:end]
"""
left = False
start -= 1
while start >= 0 and text[start] in spaces_not_newline:
start -= 1
if start < 0 or text[start] == '\n':
left = True
right = re_space.match(text, end)
return (start+1, right.end()) if left and right else None | check if the string text[start:end] is standalone by checking forwards
and backwards for blankspaces
:text: TODO
:(start, end): TODO
:returns: the start of next index after text[start:end] | Below is the the instruction that describes the task:
### Input:
check if the string text[start:end] is standalone by checking forwards
and backwards for blankspaces
:text: TODO
:(start, end): TODO
:returns: the start of next index after text[start:end]
### Response:
def is_standalone(text, start, end):
"""check if the string text[start:end] is standalone by checking forwards
and backwards for blankspaces
:text: TODO
:(start, end): TODO
:returns: the start of next index after text[start:end]
"""
left = False
start -= 1
while start >= 0 and text[start] in spaces_not_newline:
start -= 1
if start < 0 or text[start] == '\n':
left = True
right = re_space.match(text, end)
return (start+1, right.end()) if left and right else None |
def assembly(self, value):
"""The assembly property.
Args:
value (string). the property value.
"""
if value == self._defaults['assembly'] and 'assembly' in self._values:
del self._values['assembly']
else:
self._values['assembly'] = value | The assembly property.
Args:
value (string). the property value. | Below is the the instruction that describes the task:
### Input:
The assembly property.
Args:
value (string). the property value.
### Response:
def assembly(self, value):
"""The assembly property.
Args:
value (string). the property value.
"""
if value == self._defaults['assembly'] and 'assembly' in self._values:
del self._values['assembly']
else:
self._values['assembly'] = value |
def _get_responses(cls, requests, dispatcher):
""" Response to each single JSON-RPC Request.
:return iterator(JSONRPC20Response):
.. versionadded: 1.9.0
TypeError inside the function is distinguished from Invalid Params.
"""
for request in requests:
def response(**kwargs):
return cls.RESPONSE_CLASS_MAP[request.JSONRPC_VERSION](
_id=request._id, **kwargs)
try:
method = dispatcher[request.method]
except KeyError:
output = response(error=JSONRPCMethodNotFound()._data)
else:
try:
result = method(*request.args, **request.kwargs)
except JSONRPCDispatchException as e:
output = response(error=e.error._data)
except Exception as e:
data = {
"type": e.__class__.__name__,
"args": e.args,
"message": str(e),
}
if isinstance(e, TypeError) and is_invalid_params(
method, *request.args, **request.kwargs):
output = response(
error=JSONRPCInvalidParams(data=data)._data)
else:
# logger.exception("API Exception: {0}".format(data))
print("API Exception: {0}".format(data))
output = response(
error=JSONRPCServerError(data=data)._data)
else:
output = response(result=result)
finally:
if not request.is_notification:
yield output | Response to each single JSON-RPC Request.
:return iterator(JSONRPC20Response):
.. versionadded: 1.9.0
TypeError inside the function is distinguished from Invalid Params. | Below is the the instruction that describes the task:
### Input:
Response to each single JSON-RPC Request.
:return iterator(JSONRPC20Response):
.. versionadded: 1.9.0
TypeError inside the function is distinguished from Invalid Params.
### Response:
def _get_responses(cls, requests, dispatcher):
""" Response to each single JSON-RPC Request.
:return iterator(JSONRPC20Response):
.. versionadded: 1.9.0
TypeError inside the function is distinguished from Invalid Params.
"""
for request in requests:
def response(**kwargs):
return cls.RESPONSE_CLASS_MAP[request.JSONRPC_VERSION](
_id=request._id, **kwargs)
try:
method = dispatcher[request.method]
except KeyError:
output = response(error=JSONRPCMethodNotFound()._data)
else:
try:
result = method(*request.args, **request.kwargs)
except JSONRPCDispatchException as e:
output = response(error=e.error._data)
except Exception as e:
data = {
"type": e.__class__.__name__,
"args": e.args,
"message": str(e),
}
if isinstance(e, TypeError) and is_invalid_params(
method, *request.args, **request.kwargs):
output = response(
error=JSONRPCInvalidParams(data=data)._data)
else:
# logger.exception("API Exception: {0}".format(data))
print("API Exception: {0}".format(data))
output = response(
error=JSONRPCServerError(data=data)._data)
else:
output = response(result=result)
finally:
if not request.is_notification:
yield output |
def calcEL(self,**kwargs):
"""
NAME:
calcEL
PURPOSE:
calculate the energy and angular momentum
INPUT:
scipy.integrate.quadrature keywords
OUTPUT:
(E,L)
HISTORY:
2012-11-27 - Written - Bovy (IAS)
"""
E,L= calcELStaeckel(self._R,self._vR,self._vT,self._z,self._vz,
self._pot)
return (E,L) | NAME:
calcEL
PURPOSE:
calculate the energy and angular momentum
INPUT:
scipy.integrate.quadrature keywords
OUTPUT:
(E,L)
HISTORY:
2012-11-27 - Written - Bovy (IAS) | Below is the the instruction that describes the task:
### Input:
NAME:
calcEL
PURPOSE:
calculate the energy and angular momentum
INPUT:
scipy.integrate.quadrature keywords
OUTPUT:
(E,L)
HISTORY:
2012-11-27 - Written - Bovy (IAS)
### Response:
def calcEL(self,**kwargs):
"""
NAME:
calcEL
PURPOSE:
calculate the energy and angular momentum
INPUT:
scipy.integrate.quadrature keywords
OUTPUT:
(E,L)
HISTORY:
2012-11-27 - Written - Bovy (IAS)
"""
E,L= calcELStaeckel(self._R,self._vR,self._vT,self._z,self._vz,
self._pot)
return (E,L) |
def reset_logformat(logger: logging.Logger,
fmt: str,
datefmt: str = '%Y-%m-%d %H:%M:%S') -> None:
"""
Create a new formatter and apply it to the logger.
:func:`logging.basicConfig` won't reset the formatter if another module
has called it, so always set the formatter like this.
Args:
logger: logger to modify
fmt: passed to the ``fmt=`` argument of :class:`logging.Formatter`
datefmt: passed to the ``datefmt=`` argument of
:class:`logging.Formatter`
"""
handler = logging.StreamHandler()
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler.setFormatter(formatter)
remove_all_logger_handlers(logger)
logger.addHandler(handler)
logger.propagate = False | Create a new formatter and apply it to the logger.
:func:`logging.basicConfig` won't reset the formatter if another module
has called it, so always set the formatter like this.
Args:
logger: logger to modify
fmt: passed to the ``fmt=`` argument of :class:`logging.Formatter`
datefmt: passed to the ``datefmt=`` argument of
:class:`logging.Formatter` | Below is the the instruction that describes the task:
### Input:
Create a new formatter and apply it to the logger.
:func:`logging.basicConfig` won't reset the formatter if another module
has called it, so always set the formatter like this.
Args:
logger: logger to modify
fmt: passed to the ``fmt=`` argument of :class:`logging.Formatter`
datefmt: passed to the ``datefmt=`` argument of
:class:`logging.Formatter`
### Response:
def reset_logformat(logger: logging.Logger,
fmt: str,
datefmt: str = '%Y-%m-%d %H:%M:%S') -> None:
"""
Create a new formatter and apply it to the logger.
:func:`logging.basicConfig` won't reset the formatter if another module
has called it, so always set the formatter like this.
Args:
logger: logger to modify
fmt: passed to the ``fmt=`` argument of :class:`logging.Formatter`
datefmt: passed to the ``datefmt=`` argument of
:class:`logging.Formatter`
"""
handler = logging.StreamHandler()
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
handler.setFormatter(formatter)
remove_all_logger_handlers(logger)
logger.addHandler(handler)
logger.propagate = False |
def concat_padded(base, *args):
"""
Concatenate string and zero-padded 4 digit number
"""
ret = base
for n in args:
if is_string(n):
ret = "%s_%s" % (ret, n)
else:
ret = "%s_%04i" % (ret, n + 1)
return ret | Concatenate string and zero-padded 4 digit number | Below is the the instruction that describes the task:
### Input:
Concatenate string and zero-padded 4 digit number
### Response:
def concat_padded(base, *args):
"""
Concatenate string and zero-padded 4 digit number
"""
ret = base
for n in args:
if is_string(n):
ret = "%s_%s" % (ret, n)
else:
ret = "%s_%04i" % (ret, n + 1)
return ret |
def parse_vmnet_range(start, end):
"""
Parse the vmnet range on the command line.
"""
class Range(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if len(values) != 2:
raise argparse.ArgumentTypeError("vmnet range must consist of 2 numbers")
if not start <= values[0] or not values[1] <= end:
raise argparse.ArgumentTypeError("vmnet range must be between {} and {}".format(start, end))
setattr(args, self.dest, values)
return Range | Parse the vmnet range on the command line. | Below is the the instruction that describes the task:
### Input:
Parse the vmnet range on the command line.
### Response:
def parse_vmnet_range(start, end):
"""
Parse the vmnet range on the command line.
"""
class Range(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if len(values) != 2:
raise argparse.ArgumentTypeError("vmnet range must consist of 2 numbers")
if not start <= values[0] or not values[1] <= end:
raise argparse.ArgumentTypeError("vmnet range must be between {} and {}".format(start, end))
setattr(args, self.dest, values)
return Range |
def log_create(self, instance, **kwargs):
"""
Helper method to create a new log entry. This method automatically populates some fields when no explicit value
is given.
:param instance: The model instance to log a change for.
:type instance: Model
:param kwargs: Field overrides for the :py:class:`LogEntry` object.
:return: The new log entry or `None` if there were no changes.
:rtype: LogEntry
"""
changes = kwargs.get('changes', None)
pk = self._get_pk_value(instance)
if changes is not None:
kwargs.setdefault('content_type', ContentType.objects.get_for_model(instance))
kwargs.setdefault('object_pk', pk)
kwargs.setdefault('object_repr', smart_text(instance))
if isinstance(pk, integer_types):
kwargs.setdefault('object_id', pk)
get_additional_data = getattr(instance, 'get_additional_data', None)
if callable(get_additional_data):
kwargs.setdefault('additional_data', get_additional_data())
# Delete log entries with the same pk as a newly created model. This should only be necessary when an pk is
# used twice.
if kwargs.get('action', None) is LogEntry.Action.CREATE:
if kwargs.get('object_id', None) is not None and self.filter(content_type=kwargs.get('content_type'), object_id=kwargs.get('object_id')).exists():
self.filter(content_type=kwargs.get('content_type'), object_id=kwargs.get('object_id')).delete()
else:
self.filter(content_type=kwargs.get('content_type'), object_pk=kwargs.get('object_pk', '')).delete()
# save LogEntry to same database instance is using
db = instance._state.db
return self.create(**kwargs) if db is None or db == '' else self.using(db).create(**kwargs)
return None | Helper method to create a new log entry. This method automatically populates some fields when no explicit value
is given.
:param instance: The model instance to log a change for.
:type instance: Model
:param kwargs: Field overrides for the :py:class:`LogEntry` object.
:return: The new log entry or `None` if there were no changes.
:rtype: LogEntry | Below is the the instruction that describes the task:
### Input:
Helper method to create a new log entry. This method automatically populates some fields when no explicit value
is given.
:param instance: The model instance to log a change for.
:type instance: Model
:param kwargs: Field overrides for the :py:class:`LogEntry` object.
:return: The new log entry or `None` if there were no changes.
:rtype: LogEntry
### Response:
def log_create(self, instance, **kwargs):
"""
Helper method to create a new log entry. This method automatically populates some fields when no explicit value
is given.
:param instance: The model instance to log a change for.
:type instance: Model
:param kwargs: Field overrides for the :py:class:`LogEntry` object.
:return: The new log entry or `None` if there were no changes.
:rtype: LogEntry
"""
changes = kwargs.get('changes', None)
pk = self._get_pk_value(instance)
if changes is not None:
kwargs.setdefault('content_type', ContentType.objects.get_for_model(instance))
kwargs.setdefault('object_pk', pk)
kwargs.setdefault('object_repr', smart_text(instance))
if isinstance(pk, integer_types):
kwargs.setdefault('object_id', pk)
get_additional_data = getattr(instance, 'get_additional_data', None)
if callable(get_additional_data):
kwargs.setdefault('additional_data', get_additional_data())
# Delete log entries with the same pk as a newly created model. This should only be necessary when an pk is
# used twice.
if kwargs.get('action', None) is LogEntry.Action.CREATE:
if kwargs.get('object_id', None) is not None and self.filter(content_type=kwargs.get('content_type'), object_id=kwargs.get('object_id')).exists():
self.filter(content_type=kwargs.get('content_type'), object_id=kwargs.get('object_id')).delete()
else:
self.filter(content_type=kwargs.get('content_type'), object_pk=kwargs.get('object_pk', '')).delete()
# save LogEntry to same database instance is using
db = instance._state.db
return self.create(**kwargs) if db is None or db == '' else self.using(db).create(**kwargs)
return None |
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = '{head}, ..., {tail}'.format(head=head[:-1], tail=tail[1:])
if footer:
result = '{result}\n{footer}'.format(
result=result, footer=self._repr_footer())
return str(result) | a short repr displaying only max_vals and an optional (but default
footer) | Below is the the instruction that describes the task:
### Input:
a short repr displaying only max_vals and an optional (but default
footer)
### Response:
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = '{head}, ..., {tail}'.format(head=head[:-1], tail=tail[1:])
if footer:
result = '{result}\n{footer}'.format(
result=result, footer=self._repr_footer())
return str(result) |
def _init_model(self):
"""
Composes all layers of 2D CNN.
"""
model = Sequential()
model.add(Conv2D(input_shape=list(self.image_size) + [self.channels], filters=self.filters,
kernel_size=self.kernel_size, activation='relu', data_format='channels_last'))
model.add(
Conv2D(filters=self.filters, kernel_size=self.kernel_size, activation='relu', data_format='channels_last'))
model.add(MaxPooling2D())
model.add(
Conv2D(filters=self.filters, kernel_size=self.kernel_size, activation='relu', data_format='channels_last'))
model.add(
Conv2D(filters=self.filters, kernel_size=self.kernel_size, activation='relu', data_format='channels_last'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(units=len(self.emotion_map.keys()), activation="relu"))
if self.verbose:
model.summary()
self.model = model | Composes all layers of 2D CNN. | Below is the the instruction that describes the task:
### Input:
Composes all layers of 2D CNN.
### Response:
def _init_model(self):
"""
Composes all layers of 2D CNN.
"""
model = Sequential()
model.add(Conv2D(input_shape=list(self.image_size) + [self.channels], filters=self.filters,
kernel_size=self.kernel_size, activation='relu', data_format='channels_last'))
model.add(
Conv2D(filters=self.filters, kernel_size=self.kernel_size, activation='relu', data_format='channels_last'))
model.add(MaxPooling2D())
model.add(
Conv2D(filters=self.filters, kernel_size=self.kernel_size, activation='relu', data_format='channels_last'))
model.add(
Conv2D(filters=self.filters, kernel_size=self.kernel_size, activation='relu', data_format='channels_last'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(units=len(self.emotion_map.keys()), activation="relu"))
if self.verbose:
model.summary()
self.model = model |
def _mean_prediction(self, lmda, Y, scores, h, t_params, X_oos):
""" Creates a h-step ahead mean prediction
Parameters
----------
lmda : np.array
The past predicted values
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
X_oos : np.array
Out of sample predictors
Returns
----------
h-length vector of mean predictions
"""
# Create arrays to iteratre over
lmda_exp = lmda.copy()
scores_exp = scores.copy()
Y_exp = Y.copy()
# Loop over h time periods
for t in range(0,h):
new_lambda_value = 0
if self.p != 0:
for j in range(self.p):
new_lambda_value += t_params[j]*lmda_exp[-j-1]
if self.q != 0:
for k in range(self.q):
new_lambda_value += t_params[k+self.p]*scores_exp[-k-1]
# No leverage term for mean (should be zero in expectation?)
new_lambda_value += np.dot(X_oos[t],t_params[-len(self.X_names)*2:-len(self.X_names)])
new_theta_value = np.dot(X_oos[t],t_params[-len(self.X_names):]) + t_params[-(len(self.X_names)*2)-1]*np.exp(new_lambda_value/2.0)
lmda_exp = np.append(lmda_exp,[new_lambda_value]) # For indexing consistency
scores_exp = np.append(scores_exp,[0]) # expectation of score is zero
Y_exp = np.append(Y_exp,new_theta_value)
return lmda_exp | Creates a h-step ahead mean prediction
Parameters
----------
lmda : np.array
The past predicted values
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
X_oos : np.array
Out of sample predictors
Returns
----------
h-length vector of mean predictions | Below is the the instruction that describes the task:
### Input:
Creates a h-step ahead mean prediction
Parameters
----------
lmda : np.array
The past predicted values
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
X_oos : np.array
Out of sample predictors
Returns
----------
h-length vector of mean predictions
### Response:
def _mean_prediction(self, lmda, Y, scores, h, t_params, X_oos):
""" Creates a h-step ahead mean prediction
Parameters
----------
lmda : np.array
The past predicted values
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
X_oos : np.array
Out of sample predictors
Returns
----------
h-length vector of mean predictions
"""
# Create arrays to iteratre over
lmda_exp = lmda.copy()
scores_exp = scores.copy()
Y_exp = Y.copy()
# Loop over h time periods
for t in range(0,h):
new_lambda_value = 0
if self.p != 0:
for j in range(self.p):
new_lambda_value += t_params[j]*lmda_exp[-j-1]
if self.q != 0:
for k in range(self.q):
new_lambda_value += t_params[k+self.p]*scores_exp[-k-1]
# No leverage term for mean (should be zero in expectation?)
new_lambda_value += np.dot(X_oos[t],t_params[-len(self.X_names)*2:-len(self.X_names)])
new_theta_value = np.dot(X_oos[t],t_params[-len(self.X_names):]) + t_params[-(len(self.X_names)*2)-1]*np.exp(new_lambda_value/2.0)
lmda_exp = np.append(lmda_exp,[new_lambda_value]) # For indexing consistency
scores_exp = np.append(scores_exp,[0]) # expectation of score is zero
Y_exp = np.append(Y_exp,new_theta_value)
return lmda_exp |
def ReadRaster(self, *args, **kwargs):
"""Returns raster data bytes for partial or full extent.
Overrides gdal.Dataset.ReadRaster() with the full raster size by
default.
"""
args = args or (0, 0, self.ds.RasterXSize, self.ds.RasterYSize)
return self.ds.ReadRaster(*args, **kwargs) | Returns raster data bytes for partial or full extent.
Overrides gdal.Dataset.ReadRaster() with the full raster size by
default. | Below is the the instruction that describes the task:
### Input:
Returns raster data bytes for partial or full extent.
Overrides gdal.Dataset.ReadRaster() with the full raster size by
default.
### Response:
def ReadRaster(self, *args, **kwargs):
"""Returns raster data bytes for partial or full extent.
Overrides gdal.Dataset.ReadRaster() with the full raster size by
default.
"""
args = args or (0, 0, self.ds.RasterXSize, self.ds.RasterYSize)
return self.ds.ReadRaster(*args, **kwargs) |
def read_tsv(self):
"""
Read in the .tsv contig report file with pandas, and create a dictionary of all the headers: values
"""
logging.info('Parsing MOB-recon outputs')
for sample in self.metadata:
if os.path.isfile(sample[self.analysistype].contig_report):
# Read in the .tsv file with pandas. Skip the comment lines
df = pd.read_csv(sample[self.analysistype].contig_report, delimiter='\t')
for header in df:
# Remove any unwanted whitespace
clean_header = header.lstrip().rstrip()
# primary_key is the primary key, and value is the value of the cell for that key + header combo
for primary_key, value in df[header].items():
# Update the dictionary with the new data
try:
sample[self.analysistype].report_dict[primary_key].update({clean_header: value})
# Create the nested dictionary if it hasn't been created yet
except KeyError:
sample[self.analysistype].report_dict[primary_key] = dict()
sample[self.analysistype].report_dict[primary_key].update({clean_header: value}) | Read in the .tsv contig report file with pandas, and create a dictionary of all the headers: values | Below is the the instruction that describes the task:
### Input:
Read in the .tsv contig report file with pandas, and create a dictionary of all the headers: values
### Response:
def read_tsv(self):
"""
Read in the .tsv contig report file with pandas, and create a dictionary of all the headers: values
"""
logging.info('Parsing MOB-recon outputs')
for sample in self.metadata:
if os.path.isfile(sample[self.analysistype].contig_report):
# Read in the .tsv file with pandas. Skip the comment lines
df = pd.read_csv(sample[self.analysistype].contig_report, delimiter='\t')
for header in df:
# Remove any unwanted whitespace
clean_header = header.lstrip().rstrip()
# primary_key is the primary key, and value is the value of the cell for that key + header combo
for primary_key, value in df[header].items():
# Update the dictionary with the new data
try:
sample[self.analysistype].report_dict[primary_key].update({clean_header: value})
# Create the nested dictionary if it hasn't been created yet
except KeyError:
sample[self.analysistype].report_dict[primary_key] = dict()
sample[self.analysistype].report_dict[primary_key].update({clean_header: value}) |
def runtime_values(self):
"""
All of the concrete values used by this function at runtime (i.e., including passed-in arguments and global
values).
"""
constants = set()
for b in self.block_addrs:
for sirsb in self._function_manager._cfg.get_all_irsbs(b):
for s in sirsb.successors + sirsb.unsat_successors:
for a in s.history.recent_actions:
for ao in a.all_objects:
if not isinstance(ao.ast, claripy.ast.Base):
constants.add(ao.ast)
elif not ao.ast.symbolic:
constants.add(s.solver.eval(ao.ast))
return constants | All of the concrete values used by this function at runtime (i.e., including passed-in arguments and global
values). | Below is the the instruction that describes the task:
### Input:
All of the concrete values used by this function at runtime (i.e., including passed-in arguments and global
values).
### Response:
def runtime_values(self):
"""
All of the concrete values used by this function at runtime (i.e., including passed-in arguments and global
values).
"""
constants = set()
for b in self.block_addrs:
for sirsb in self._function_manager._cfg.get_all_irsbs(b):
for s in sirsb.successors + sirsb.unsat_successors:
for a in s.history.recent_actions:
for ao in a.all_objects:
if not isinstance(ao.ast, claripy.ast.Base):
constants.add(ao.ast)
elif not ao.ast.symbolic:
constants.add(s.solver.eval(ao.ast))
return constants |
def process_request(self, request, response):
"""Get session ID from cookie, load corresponding session data from coupled store and inject session data into
the request context.
"""
sid = request.cookies.get(self.cookie_name, None)
data = {}
if sid is not None:
if self.store.exists(sid):
data = self.store.get(sid)
request.context.update({self.context_name: data}) | Get session ID from cookie, load corresponding session data from coupled store and inject session data into
the request context. | Below is the the instruction that describes the task:
### Input:
Get session ID from cookie, load corresponding session data from coupled store and inject session data into
the request context.
### Response:
def process_request(self, request, response):
"""Get session ID from cookie, load corresponding session data from coupled store and inject session data into
the request context.
"""
sid = request.cookies.get(self.cookie_name, None)
data = {}
if sid is not None:
if self.store.exists(sid):
data = self.store.get(sid)
request.context.update({self.context_name: data}) |
def multi_session(self):
''' convert the multi_session param a number '''
_val = 0
if "multi_session" in self._dict:
_val = self._dict["multi_session"]
if str(_val).lower() == 'all':
_val = -1
return int(_val) | convert the multi_session param a number | Below is the the instruction that describes the task:
### Input:
convert the multi_session param a number
### Response:
def multi_session(self):
''' convert the multi_session param a number '''
_val = 0
if "multi_session" in self._dict:
_val = self._dict["multi_session"]
if str(_val).lower() == 'all':
_val = -1
return int(_val) |
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super(RawConfigParser, self).items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in d.keys()] | Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special. | Below is the the instruction that describes the task:
### Input:
Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
### Response:
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super(RawConfigParser, self).items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in d.keys()] |
def generate_legacy_webfinger(template=None, *args, **kwargs):
"""Generate a legacy webfinger XRD document.
Template specific key-value pairs need to be passed as ``kwargs``, see classes.
:arg template: Ready template to fill with args, for example "diaspora" (optional)
:returns: Rendered XRD document (str)
"""
if template == "diaspora":
webfinger = DiasporaWebFinger(*args, **kwargs)
else:
webfinger = BaseLegacyWebFinger(*args, **kwargs)
return webfinger.render() | Generate a legacy webfinger XRD document.
Template specific key-value pairs need to be passed as ``kwargs``, see classes.
:arg template: Ready template to fill with args, for example "diaspora" (optional)
:returns: Rendered XRD document (str) | Below is the the instruction that describes the task:
### Input:
Generate a legacy webfinger XRD document.
Template specific key-value pairs need to be passed as ``kwargs``, see classes.
:arg template: Ready template to fill with args, for example "diaspora" (optional)
:returns: Rendered XRD document (str)
### Response:
def generate_legacy_webfinger(template=None, *args, **kwargs):
"""Generate a legacy webfinger XRD document.
Template specific key-value pairs need to be passed as ``kwargs``, see classes.
:arg template: Ready template to fill with args, for example "diaspora" (optional)
:returns: Rendered XRD document (str)
"""
if template == "diaspora":
webfinger = DiasporaWebFinger(*args, **kwargs)
else:
webfinger = BaseLegacyWebFinger(*args, **kwargs)
return webfinger.render() |
def process_request(self, request):
"""
Store memory data to log later.
"""
if self._is_enabled():
self._cache.set(self.guid_key, six.text_type(uuid4()))
log_prefix = self._log_prefix(u"Before", request)
self._cache.set(self.memory_data_key, self._memory_data(log_prefix)) | Store memory data to log later. | Below is the the instruction that describes the task:
### Input:
Store memory data to log later.
### Response:
def process_request(self, request):
"""
Store memory data to log later.
"""
if self._is_enabled():
self._cache.set(self.guid_key, six.text_type(uuid4()))
log_prefix = self._log_prefix(u"Before", request)
self._cache.set(self.memory_data_key, self._memory_data(log_prefix)) |
def is_valid(cls, arg):
"""Return True if arg is valid value for the class."""
return isinstance(arg, (int, long)) and (not isinstance(arg, bool)) | Return True if arg is valid value for the class. | Below is the the instruction that describes the task:
### Input:
Return True if arg is valid value for the class.
### Response:
def is_valid(cls, arg):
"""Return True if arg is valid value for the class."""
return isinstance(arg, (int, long)) and (not isinstance(arg, bool)) |
def reset(self):
"""
Reset the videostream by restarting ffmpeg
"""
if self.ffmpeg_process is not None:
# Close the previous stream
try:
self.ffmpeg_process.send_signal(signal.SIGINT)
except OSError:
pass
command = []
command.extend([
self.ffmpeg_binary,
'-loglevel', 'verbose',
'-y', # overwrite previous file/stream
# '-re', # native frame-rate
'-analyzeduration', '1',
'-f', 'rawvideo',
'-r', '%d' % self.fps, # set a fixed frame rate
'-vcodec', 'rawvideo',
# size of one frame
'-s', '%dx%d' % (self.width, self.height),
'-pix_fmt', 'rgb24', # The input are raw bytes
'-thread_queue_size', '1024',
'-i', '/tmp/videopipe', # The input comes from a pipe
# Twitch needs to receive sound in their streams!
# '-an', # Tells FFMPEG not to expect any audio
])
if self.audio_enabled:
command.extend([
'-ar', '%d' % AUDIORATE,
'-ac', '2',
'-f', 's16le',
'-thread_queue_size', '1024',
'-i', '/tmp/audiopipe'
])
else:
command.extend([
'-ar', '8000',
'-ac', '1',
'-f', 's16le',
'-i', '/dev/zero', # silence alternative, works forever
# '-i','http://stream1.radiostyle.ru:8001/tunguska',
# '-filter_complex',
# '[0:1][1:0]amix=inputs=2:duration=first[all_audio]'
])
command.extend([
# VIDEO CODEC PARAMETERS
'-vcodec', 'libx264',
'-r', '%d' % self.fps,
'-b:v', '3000k',
'-s', '%dx%d' % (self.width, self.height),
'-preset', 'faster', '-tune', 'zerolatency',
'-crf', '23',
'-pix_fmt', 'yuv420p',
# '-force_key_frames', r'expr:gte(t,n_forced*2)',
'-minrate', '3000k', '-maxrate', '3000k',
'-bufsize', '12000k',
'-g', '60', # key frame distance
'-keyint_min', '1',
# '-filter:v "setpts=0.25*PTS"'
# '-vsync','passthrough',
# AUDIO CODEC PARAMETERS
'-acodec', 'libmp3lame', '-ar', '44100', '-b:a', '160k',
# '-bufsize', '8192k',
'-ac', '1',
# '-acodec', 'aac', '-strict', 'experimental',
# '-ab', '128k', '-ar', '44100', '-ac', '1',
# '-async','44100',
# '-filter_complex', 'asplit', #for audio sync?
# STORE THE VIDEO PARAMETERS
# '-vcodec', 'libx264', '-s', '%dx%d'%(width, height),
# '-preset', 'libx264-fast',
# 'my_output_videofile2.avi'
# MAP THE STREAMS
# use only video from first input and only audio from second
'-map', '0:v', '-map', '1:a',
# NUMBER OF THREADS
'-threads', '2',
# STREAM TO TWITCH
'-f', 'flv', 'rtmp://live-ams.twitch.tv/app/%s' %
self.twitch_stream_key
])
devnullpipe = open("/dev/null", "w") # Throw away stream
if self.verbose:
devnullpipe = None
self.ffmpeg_process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stderr=devnullpipe,
stdout=devnullpipe) | Reset the videostream by restarting ffmpeg | Below is the the instruction that describes the task:
### Input:
Reset the videostream by restarting ffmpeg
### Response:
def reset(self):
"""
Reset the videostream by restarting ffmpeg
"""
if self.ffmpeg_process is not None:
# Close the previous stream
try:
self.ffmpeg_process.send_signal(signal.SIGINT)
except OSError:
pass
command = []
command.extend([
self.ffmpeg_binary,
'-loglevel', 'verbose',
'-y', # overwrite previous file/stream
# '-re', # native frame-rate
'-analyzeduration', '1',
'-f', 'rawvideo',
'-r', '%d' % self.fps, # set a fixed frame rate
'-vcodec', 'rawvideo',
# size of one frame
'-s', '%dx%d' % (self.width, self.height),
'-pix_fmt', 'rgb24', # The input are raw bytes
'-thread_queue_size', '1024',
'-i', '/tmp/videopipe', # The input comes from a pipe
# Twitch needs to receive sound in their streams!
# '-an', # Tells FFMPEG not to expect any audio
])
if self.audio_enabled:
command.extend([
'-ar', '%d' % AUDIORATE,
'-ac', '2',
'-f', 's16le',
'-thread_queue_size', '1024',
'-i', '/tmp/audiopipe'
])
else:
command.extend([
'-ar', '8000',
'-ac', '1',
'-f', 's16le',
'-i', '/dev/zero', # silence alternative, works forever
# '-i','http://stream1.radiostyle.ru:8001/tunguska',
# '-filter_complex',
# '[0:1][1:0]amix=inputs=2:duration=first[all_audio]'
])
command.extend([
# VIDEO CODEC PARAMETERS
'-vcodec', 'libx264',
'-r', '%d' % self.fps,
'-b:v', '3000k',
'-s', '%dx%d' % (self.width, self.height),
'-preset', 'faster', '-tune', 'zerolatency',
'-crf', '23',
'-pix_fmt', 'yuv420p',
# '-force_key_frames', r'expr:gte(t,n_forced*2)',
'-minrate', '3000k', '-maxrate', '3000k',
'-bufsize', '12000k',
'-g', '60', # key frame distance
'-keyint_min', '1',
# '-filter:v "setpts=0.25*PTS"'
# '-vsync','passthrough',
# AUDIO CODEC PARAMETERS
'-acodec', 'libmp3lame', '-ar', '44100', '-b:a', '160k',
# '-bufsize', '8192k',
'-ac', '1',
# '-acodec', 'aac', '-strict', 'experimental',
# '-ab', '128k', '-ar', '44100', '-ac', '1',
# '-async','44100',
# '-filter_complex', 'asplit', #for audio sync?
# STORE THE VIDEO PARAMETERS
# '-vcodec', 'libx264', '-s', '%dx%d'%(width, height),
# '-preset', 'libx264-fast',
# 'my_output_videofile2.avi'
# MAP THE STREAMS
# use only video from first input and only audio from second
'-map', '0:v', '-map', '1:a',
# NUMBER OF THREADS
'-threads', '2',
# STREAM TO TWITCH
'-f', 'flv', 'rtmp://live-ams.twitch.tv/app/%s' %
self.twitch_stream_key
])
devnullpipe = open("/dev/null", "w") # Throw away stream
if self.verbose:
devnullpipe = None
self.ffmpeg_process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stderr=devnullpipe,
stdout=devnullpipe) |
def tile(imgs, cmap='gray', bar=False, nans=True, clim=None, grid=None, size=9, axis=0, fig=None):
"""
Display a collection of images as a grid of tiles
Parameters
----------
img : list or ndarray (2D or 3D)
The images to display. Can be a list of either 2D, 3D,
or a mix of 2D and 3D numpy arrays. Can also be a single
numpy array, in which case the axis parameter will be assumed
to index the image list, e.g. a (10, 512, 512) array with axis=0
will be treated as 10 different (512,512) images, and the same
array with axis=1 would be treated as 512 (10,512) images.
cmap : str or Colormap or list, optional, default = 'gray'
A colormap to use, for non RGB images, a list can be used to
specify a different colormap for each image
bar : boolean, optional, default = False
Whether to append a colorbar to each image
nans : boolean, optional, deafult = True
Whether to replace NaNs, if True, will replace with 0s
clim : tuple or list of tuples, optional, default = None
Limits for scaling image, a list can be used to
specify different limits for each image
grid : tuple, optional, default = None
Dimensions of image tile grid, if None, will use a square grid
large enough to include all images
size : scalar, optional, deafult = 11
Size of the figure
axis : int, optional, default = 0
Which axis of array indexes images
fig : matplotlib figure, optional, default = None
An existing figure to plot on
"""
from matplotlib.pyplot import figure, colorbar
from mpl_toolkits.axes_grid1 import ImageGrid
if not isinstance(imgs, list):
if not isinstance(imgs, ndarray):
imgs = asarray(imgs)
if (axis < 0) | (axis >= imgs.ndim):
raise ValueError("Must specify a valid axis to index the images")
imgs = list(rollaxis(imgs, axis, 0))
imgs = [asarray(im) for im in imgs]
if (nans is True) and (imgs[0].dtype != bool):
imgs = [nan_to_num(im) for im in imgs]
if fig is None:
fig = figure(figsize=(size, size))
if bar is True:
axes_pad = 0.4
if sum([im.ndim == 3 for im in imgs]):
raise ValueError("Cannot show meaningful colorbar for RGB image")
cbar_mode = "each"
else:
axes_pad = 0.2
cbar_mode = None
nimgs = len(imgs)
if not isinstance(cmap, list):
cmap = [cmap for _ in range(nimgs)]
if not isinstance(clim, list):
clim = [clim for _ in range(nimgs)]
if len(clim) < nimgs:
raise ValueError("Number of clim specifications %g too small for number of images %g"
% (len(clim), nimgs))
if len(cmap) < nimgs:
raise ValueError("Number of cmap specifications %g too small for number of images %g"
% (len(cmap), nimgs))
if grid is None:
c = int(ceil(sqrt(nimgs)))
grid = (c, c)
ngrid = grid[0] * grid[1]
if ngrid < nimgs:
raise ValueError("Total grid count %g too small for number of images %g" % (ngrid, nimgs))
g = ImageGrid(fig, 111, nrows_ncols=grid, axes_pad=axes_pad,
cbar_mode=cbar_mode, cbar_size="5%", cbar_pad="5%")
axes = []
for i, im in enumerate(imgs):
ax = g[i].imshow(im, cmap=cmap[i], interpolation='nearest', clim=clim[i])
g[i].axis('off')
if bar:
cb = colorbar(ax, g[i].cax)
rng = abs(cb.vmax - cb.vmin) * 0.05
cb.set_ticks([around(cb.vmin + rng, 1), around(cb.vmax - rng, 1)])
cb.outline.set_visible(False)
axes.append(ax)
if nimgs < ngrid:
for i in range(nimgs, ngrid):
g[i].axis('off')
g[i].cax.axis('off')
return axes, g | Display a collection of images as a grid of tiles
Parameters
----------
img : list or ndarray (2D or 3D)
The images to display. Can be a list of either 2D, 3D,
or a mix of 2D and 3D numpy arrays. Can also be a single
numpy array, in which case the axis parameter will be assumed
to index the image list, e.g. a (10, 512, 512) array with axis=0
will be treated as 10 different (512,512) images, and the same
array with axis=1 would be treated as 512 (10,512) images.
cmap : str or Colormap or list, optional, default = 'gray'
A colormap to use, for non RGB images, a list can be used to
specify a different colormap for each image
bar : boolean, optional, default = False
Whether to append a colorbar to each image
nans : boolean, optional, deafult = True
Whether to replace NaNs, if True, will replace with 0s
clim : tuple or list of tuples, optional, default = None
Limits for scaling image, a list can be used to
specify different limits for each image
grid : tuple, optional, default = None
Dimensions of image tile grid, if None, will use a square grid
large enough to include all images
size : scalar, optional, deafult = 11
Size of the figure
axis : int, optional, default = 0
Which axis of array indexes images
fig : matplotlib figure, optional, default = None
An existing figure to plot on | Below is the the instruction that describes the task:
### Input:
Display a collection of images as a grid of tiles
Parameters
----------
img : list or ndarray (2D or 3D)
The images to display. Can be a list of either 2D, 3D,
or a mix of 2D and 3D numpy arrays. Can also be a single
numpy array, in which case the axis parameter will be assumed
to index the image list, e.g. a (10, 512, 512) array with axis=0
will be treated as 10 different (512,512) images, and the same
array with axis=1 would be treated as 512 (10,512) images.
cmap : str or Colormap or list, optional, default = 'gray'
A colormap to use, for non RGB images, a list can be used to
specify a different colormap for each image
bar : boolean, optional, default = False
Whether to append a colorbar to each image
nans : boolean, optional, deafult = True
Whether to replace NaNs, if True, will replace with 0s
clim : tuple or list of tuples, optional, default = None
Limits for scaling image, a list can be used to
specify different limits for each image
grid : tuple, optional, default = None
Dimensions of image tile grid, if None, will use a square grid
large enough to include all images
size : scalar, optional, deafult = 11
Size of the figure
axis : int, optional, default = 0
Which axis of array indexes images
fig : matplotlib figure, optional, default = None
An existing figure to plot on
### Response:
def tile(imgs, cmap='gray', bar=False, nans=True, clim=None, grid=None, size=9, axis=0, fig=None):
"""
Display a collection of images as a grid of tiles
Parameters
----------
img : list or ndarray (2D or 3D)
The images to display. Can be a list of either 2D, 3D,
or a mix of 2D and 3D numpy arrays. Can also be a single
numpy array, in which case the axis parameter will be assumed
to index the image list, e.g. a (10, 512, 512) array with axis=0
will be treated as 10 different (512,512) images, and the same
array with axis=1 would be treated as 512 (10,512) images.
cmap : str or Colormap or list, optional, default = 'gray'
A colormap to use, for non RGB images, a list can be used to
specify a different colormap for each image
bar : boolean, optional, default = False
Whether to append a colorbar to each image
nans : boolean, optional, deafult = True
Whether to replace NaNs, if True, will replace with 0s
clim : tuple or list of tuples, optional, default = None
Limits for scaling image, a list can be used to
specify different limits for each image
grid : tuple, optional, default = None
Dimensions of image tile grid, if None, will use a square grid
large enough to include all images
size : scalar, optional, deafult = 11
Size of the figure
axis : int, optional, default = 0
Which axis of array indexes images
fig : matplotlib figure, optional, default = None
An existing figure to plot on
"""
from matplotlib.pyplot import figure, colorbar
from mpl_toolkits.axes_grid1 import ImageGrid
if not isinstance(imgs, list):
if not isinstance(imgs, ndarray):
imgs = asarray(imgs)
if (axis < 0) | (axis >= imgs.ndim):
raise ValueError("Must specify a valid axis to index the images")
imgs = list(rollaxis(imgs, axis, 0))
imgs = [asarray(im) for im in imgs]
if (nans is True) and (imgs[0].dtype != bool):
imgs = [nan_to_num(im) for im in imgs]
if fig is None:
fig = figure(figsize=(size, size))
if bar is True:
axes_pad = 0.4
if sum([im.ndim == 3 for im in imgs]):
raise ValueError("Cannot show meaningful colorbar for RGB image")
cbar_mode = "each"
else:
axes_pad = 0.2
cbar_mode = None
nimgs = len(imgs)
if not isinstance(cmap, list):
cmap = [cmap for _ in range(nimgs)]
if not isinstance(clim, list):
clim = [clim for _ in range(nimgs)]
if len(clim) < nimgs:
raise ValueError("Number of clim specifications %g too small for number of images %g"
% (len(clim), nimgs))
if len(cmap) < nimgs:
raise ValueError("Number of cmap specifications %g too small for number of images %g"
% (len(cmap), nimgs))
if grid is None:
c = int(ceil(sqrt(nimgs)))
grid = (c, c)
ngrid = grid[0] * grid[1]
if ngrid < nimgs:
raise ValueError("Total grid count %g too small for number of images %g" % (ngrid, nimgs))
g = ImageGrid(fig, 111, nrows_ncols=grid, axes_pad=axes_pad,
cbar_mode=cbar_mode, cbar_size="5%", cbar_pad="5%")
axes = []
for i, im in enumerate(imgs):
ax = g[i].imshow(im, cmap=cmap[i], interpolation='nearest', clim=clim[i])
g[i].axis('off')
if bar:
cb = colorbar(ax, g[i].cax)
rng = abs(cb.vmax - cb.vmin) * 0.05
cb.set_ticks([around(cb.vmin + rng, 1), around(cb.vmax - rng, 1)])
cb.outline.set_visible(False)
axes.append(ax)
if nimgs < ngrid:
for i in range(nimgs, ngrid):
g[i].axis('off')
g[i].cax.axis('off')
return axes, g |
def get_query(self, query):
"""Make a GET request, including a query, to the endpoint.
The path of the request is to the base URL assigned to the endpoint.
Parameters
----------
query : DataQuery
The query to pass when making the request
Returns
-------
resp : requests.Response
The server's response to the request
See Also
--------
get_path, get
"""
url = self._base[:-1] if self._base[-1] == '/' else self._base
return self.get(url, query) | Make a GET request, including a query, to the endpoint.
The path of the request is to the base URL assigned to the endpoint.
Parameters
----------
query : DataQuery
The query to pass when making the request
Returns
-------
resp : requests.Response
The server's response to the request
See Also
--------
get_path, get | Below is the the instruction that describes the task:
### Input:
Make a GET request, including a query, to the endpoint.
The path of the request is to the base URL assigned to the endpoint.
Parameters
----------
query : DataQuery
The query to pass when making the request
Returns
-------
resp : requests.Response
The server's response to the request
See Also
--------
get_path, get
### Response:
def get_query(self, query):
"""Make a GET request, including a query, to the endpoint.
The path of the request is to the base URL assigned to the endpoint.
Parameters
----------
query : DataQuery
The query to pass when making the request
Returns
-------
resp : requests.Response
The server's response to the request
See Also
--------
get_path, get
"""
url = self._base[:-1] if self._base[-1] == '/' else self._base
return self.get(url, query) |
def get_parent(self):
""" the parent of this DriveItem
:return: Parent of this item
:rtype: Drive or drive.Folder
"""
if self._parent and self._parent.object_id == self.parent_id:
return self._parent
else:
if self.parent_id:
return self.drive.get_item(self.parent_id)
else:
# return the drive
return self.drive | the parent of this DriveItem
:return: Parent of this item
:rtype: Drive or drive.Folder | Below is the the instruction that describes the task:
### Input:
the parent of this DriveItem
:return: Parent of this item
:rtype: Drive or drive.Folder
### Response:
def get_parent(self):
""" the parent of this DriveItem
:return: Parent of this item
:rtype: Drive or drive.Folder
"""
if self._parent and self._parent.object_id == self.parent_id:
return self._parent
else:
if self.parent_id:
return self.drive.get_item(self.parent_id)
else:
# return the drive
return self.drive |
def add_assembly_names(opts):
"""add assembly names as aliases to existing sequences
Specifically, associate aliases like GRCh37.p9:1 with existing
refseq accessions
```
[{'aliases': ['chr19'],
'assembly_unit': 'Primary Assembly',
'genbank_ac': 'CM000681.2',
'length': 58617616,
'name': '19',
'refseq_ac': 'NC_000019.10',
'relationship': '=',
'sequence_role': 'assembled-molecule'}]
```
For the above sample record, this function adds the following aliases:
* genbank:CM000681.2
* GRCh38:19
* GRCh38:chr19
to the sequence referred to by refseq:NC_000019.10.
"""
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
assemblies = bioutils.assemblies.get_assemblies()
if opts.reload_all:
assemblies_to_load = sorted(assemblies)
else:
namespaces = [r["namespace"] for r in sr.aliases._db.execute("select distinct namespace from seqalias")]
assemblies_to_load = sorted(k for k in assemblies if k not in namespaces)
_logger.info("{} assemblies to load".format(len(assemblies_to_load)))
ncbi_alias_map = {r["alias"]: r["seq_id"] for r in sr.aliases.find_aliases(namespace="NCBI", current_only=False)}
for assy_name in tqdm.tqdm(assemblies_to_load, unit="assembly"):
_logger.debug("loading " + assy_name)
sequences = assemblies[assy_name]["sequences"]
eq_sequences = [s for s in sequences if s["relationship"] in ("=", "<>")]
if not eq_sequences:
_logger.info("No '=' sequences to load for {an}; skipping".format(an=assy_name))
continue
# all assembled-molecules (1..22, X, Y, MT) have ncbi aliases in seqrepo
not_in_seqrepo = [s["refseq_ac"] for s in eq_sequences if s["refseq_ac"] not in ncbi_alias_map]
if not_in_seqrepo:
_logger.warning("Assembly {an} references {n} accessions not in SeqRepo instance {opts.instance_name} (e.g., {acs})".format(
an=assy_name, n=len(not_in_seqrepo), opts=opts, acs=", ".join(not_in_seqrepo[:5]+["..."]), seqrepo_dir=seqrepo_dir))
if not opts.partial_load:
_logger.warning("Skipping {an} (-p to enable partial loading)".format(an=assy_name))
continue
eq_sequences = [es for es in eq_sequences if es["refseq_ac"] in ncbi_alias_map]
_logger.info("Loading {n} new accessions for assembly {an}".format(an=assy_name, n=len(eq_sequences)))
for s in eq_sequences:
seq_id = ncbi_alias_map[s["refseq_ac"]]
aliases = [{"namespace": assy_name, "alias": a} for a in [s["name"]] + s["aliases"]]
if "genbank_ac" in s and s["genbank_ac"]:
aliases += [{"namespace": "genbank", "alias": s["genbank_ac"]}]
for alias in aliases:
sr.aliases.store_alias(seq_id=seq_id, **alias)
_logger.debug("Added assembly alias {a[namespace]}:{a[alias]} for {seq_id}".format(a=alias, seq_id=seq_id))
sr.commit() | add assembly names as aliases to existing sequences
Specifically, associate aliases like GRCh37.p9:1 with existing
refseq accessions
```
[{'aliases': ['chr19'],
'assembly_unit': 'Primary Assembly',
'genbank_ac': 'CM000681.2',
'length': 58617616,
'name': '19',
'refseq_ac': 'NC_000019.10',
'relationship': '=',
'sequence_role': 'assembled-molecule'}]
```
For the above sample record, this function adds the following aliases:
* genbank:CM000681.2
* GRCh38:19
* GRCh38:chr19
to the sequence referred to by refseq:NC_000019.10. | Below is the the instruction that describes the task:
### Input:
add assembly names as aliases to existing sequences
Specifically, associate aliases like GRCh37.p9:1 with existing
refseq accessions
```
[{'aliases': ['chr19'],
'assembly_unit': 'Primary Assembly',
'genbank_ac': 'CM000681.2',
'length': 58617616,
'name': '19',
'refseq_ac': 'NC_000019.10',
'relationship': '=',
'sequence_role': 'assembled-molecule'}]
```
For the above sample record, this function adds the following aliases:
* genbank:CM000681.2
* GRCh38:19
* GRCh38:chr19
to the sequence referred to by refseq:NC_000019.10.
### Response:
def add_assembly_names(opts):
"""add assembly names as aliases to existing sequences
Specifically, associate aliases like GRCh37.p9:1 with existing
refseq accessions
```
[{'aliases': ['chr19'],
'assembly_unit': 'Primary Assembly',
'genbank_ac': 'CM000681.2',
'length': 58617616,
'name': '19',
'refseq_ac': 'NC_000019.10',
'relationship': '=',
'sequence_role': 'assembled-molecule'}]
```
For the above sample record, this function adds the following aliases:
* genbank:CM000681.2
* GRCh38:19
* GRCh38:chr19
to the sequence referred to by refseq:NC_000019.10.
"""
seqrepo_dir = os.path.join(opts.root_directory, opts.instance_name)
sr = SeqRepo(seqrepo_dir, writeable=True)
assemblies = bioutils.assemblies.get_assemblies()
if opts.reload_all:
assemblies_to_load = sorted(assemblies)
else:
namespaces = [r["namespace"] for r in sr.aliases._db.execute("select distinct namespace from seqalias")]
assemblies_to_load = sorted(k for k in assemblies if k not in namespaces)
_logger.info("{} assemblies to load".format(len(assemblies_to_load)))
ncbi_alias_map = {r["alias"]: r["seq_id"] for r in sr.aliases.find_aliases(namespace="NCBI", current_only=False)}
for assy_name in tqdm.tqdm(assemblies_to_load, unit="assembly"):
_logger.debug("loading " + assy_name)
sequences = assemblies[assy_name]["sequences"]
eq_sequences = [s for s in sequences if s["relationship"] in ("=", "<>")]
if not eq_sequences:
_logger.info("No '=' sequences to load for {an}; skipping".format(an=assy_name))
continue
# all assembled-molecules (1..22, X, Y, MT) have ncbi aliases in seqrepo
not_in_seqrepo = [s["refseq_ac"] for s in eq_sequences if s["refseq_ac"] not in ncbi_alias_map]
if not_in_seqrepo:
_logger.warning("Assembly {an} references {n} accessions not in SeqRepo instance {opts.instance_name} (e.g., {acs})".format(
an=assy_name, n=len(not_in_seqrepo), opts=opts, acs=", ".join(not_in_seqrepo[:5]+["..."]), seqrepo_dir=seqrepo_dir))
if not opts.partial_load:
_logger.warning("Skipping {an} (-p to enable partial loading)".format(an=assy_name))
continue
eq_sequences = [es for es in eq_sequences if es["refseq_ac"] in ncbi_alias_map]
_logger.info("Loading {n} new accessions for assembly {an}".format(an=assy_name, n=len(eq_sequences)))
for s in eq_sequences:
seq_id = ncbi_alias_map[s["refseq_ac"]]
aliases = [{"namespace": assy_name, "alias": a} for a in [s["name"]] + s["aliases"]]
if "genbank_ac" in s and s["genbank_ac"]:
aliases += [{"namespace": "genbank", "alias": s["genbank_ac"]}]
for alias in aliases:
sr.aliases.store_alias(seq_id=seq_id, **alias)
_logger.debug("Added assembly alias {a[namespace]}:{a[alias]} for {seq_id}".format(a=alias, seq_id=seq_id))
sr.commit() |
def get_brain_by_uid(self, uid):
"""Lookup brain from the right catalog
"""
if uid == "0":
return api.get_portal()
# ensure we have the primary catalog
if self._catalog is None:
uid_catalog = api.get_tool("uid_catalog")
results = uid_catalog({"UID": uid})
if len(results) != 1:
raise ValueError("No object found for UID '{}'".format(uid))
brain = results[0]
self._catalog = self.get_catalog_for(brain)
# Fetch the brain with the primary catalog
results = self.catalog({"UID": uid})
if not results:
raise ValueError("No results found for UID '{}'".format(uid))
if len(results) != 1:
raise ValueError("Found more than one object for UID '{}'"
.format(uid))
return results[0] | Lookup brain from the right catalog | Below is the the instruction that describes the task:
### Input:
Lookup brain from the right catalog
### Response:
def get_brain_by_uid(self, uid):
"""Lookup brain from the right catalog
"""
if uid == "0":
return api.get_portal()
# ensure we have the primary catalog
if self._catalog is None:
uid_catalog = api.get_tool("uid_catalog")
results = uid_catalog({"UID": uid})
if len(results) != 1:
raise ValueError("No object found for UID '{}'".format(uid))
brain = results[0]
self._catalog = self.get_catalog_for(brain)
# Fetch the brain with the primary catalog
results = self.catalog({"UID": uid})
if not results:
raise ValueError("No results found for UID '{}'".format(uid))
if len(results) != 1:
raise ValueError("Found more than one object for UID '{}'"
.format(uid))
return results[0] |
def convert_to_ascii(statement):
"""
Converts unicode characters to ASCII character equivalents.
For example: "på fédéral" becomes "pa federal".
"""
import unicodedata
text = unicodedata.normalize('NFKD', statement.text)
text = text.encode('ascii', 'ignore').decode('utf-8')
statement.text = str(text)
return statement | Converts unicode characters to ASCII character equivalents.
For example: "på fédéral" becomes "pa federal". | Below is the the instruction that describes the task:
### Input:
Converts unicode characters to ASCII character equivalents.
For example: "på fédéral" becomes "pa federal".
### Response:
def convert_to_ascii(statement):
"""
Converts unicode characters to ASCII character equivalents.
For example: "på fédéral" becomes "pa federal".
"""
import unicodedata
text = unicodedata.normalize('NFKD', statement.text)
text = text.encode('ascii', 'ignore').decode('utf-8')
statement.text = str(text)
return statement |
def _extract_features(self):
"""
Extracts and sets the feature data from the log file necessary for a reduction
"""
for parsed_line in self.parsed_lines:
# If it's ssh, we can handle it
if parsed_line.get('program') == 'sshd':
result = self._parse_auth_message(parsed_line['message'])
# Add the ip if we have it
if 'ip' in result:
self.features['ips'].append(result['ip'])
# If we haven't seen the ip, add it
if result['ip'] not in self.ips_to_pids:
# Make the value a list of pids
self.ips_to_pids[result['ip']] = [parsed_line['processid']]
else:
# If we have seen the ip before, add the pid if it's a new one
if parsed_line['processid'] not in self.ips_to_pids[result['ip']]:
self.ips_to_pids[result['ip']].append(parsed_line['processid']) | Extracts and sets the feature data from the log file necessary for a reduction | Below is the the instruction that describes the task:
### Input:
Extracts and sets the feature data from the log file necessary for a reduction
### Response:
def _extract_features(self):
"""
Extracts and sets the feature data from the log file necessary for a reduction
"""
for parsed_line in self.parsed_lines:
# If it's ssh, we can handle it
if parsed_line.get('program') == 'sshd':
result = self._parse_auth_message(parsed_line['message'])
# Add the ip if we have it
if 'ip' in result:
self.features['ips'].append(result['ip'])
# If we haven't seen the ip, add it
if result['ip'] not in self.ips_to_pids:
# Make the value a list of pids
self.ips_to_pids[result['ip']] = [parsed_line['processid']]
else:
# If we have seen the ip before, add the pid if it's a new one
if parsed_line['processid'] not in self.ips_to_pids[result['ip']]:
self.ips_to_pids[result['ip']].append(parsed_line['processid']) |
def enforce_types(key, val):
'''
Force params to be strings unless they should remain a different type
'''
non_string_params = {
'ssl_verify': bool,
'insecure_auth': bool,
'disable_saltenv_mapping': bool,
'env_whitelist': 'stringlist',
'env_blacklist': 'stringlist',
'saltenv_whitelist': 'stringlist',
'saltenv_blacklist': 'stringlist',
'refspecs': 'stringlist',
'ref_types': 'stringlist',
'update_interval': int,
}
def _find_global(key):
for item in non_string_params:
try:
if key.endswith('_' + item):
ret = item
break
except TypeError:
if key.endswith('_' + six.text_type(item)):
ret = item
break
else:
ret = None
return ret
if key not in non_string_params:
key = _find_global(key)
if key is None:
return six.text_type(val)
expected = non_string_params[key]
if expected == 'stringlist':
if not isinstance(val, (six.string_types, list)):
val = six.text_type(val)
if isinstance(val, six.string_types):
return [x.strip() for x in val.split(',')]
return [six.text_type(x) for x in val]
else:
try:
return expected(val)
except Exception as exc:
log.error(
'Failed to enforce type for key=%s with val=%s, falling back '
'to a string', key, val
)
return six.text_type(val) | Force params to be strings unless they should remain a different type | Below is the the instruction that describes the task:
### Input:
Force params to be strings unless they should remain a different type
### Response:
def enforce_types(key, val):
'''
Force params to be strings unless they should remain a different type
'''
non_string_params = {
'ssl_verify': bool,
'insecure_auth': bool,
'disable_saltenv_mapping': bool,
'env_whitelist': 'stringlist',
'env_blacklist': 'stringlist',
'saltenv_whitelist': 'stringlist',
'saltenv_blacklist': 'stringlist',
'refspecs': 'stringlist',
'ref_types': 'stringlist',
'update_interval': int,
}
def _find_global(key):
for item in non_string_params:
try:
if key.endswith('_' + item):
ret = item
break
except TypeError:
if key.endswith('_' + six.text_type(item)):
ret = item
break
else:
ret = None
return ret
if key not in non_string_params:
key = _find_global(key)
if key is None:
return six.text_type(val)
expected = non_string_params[key]
if expected == 'stringlist':
if not isinstance(val, (six.string_types, list)):
val = six.text_type(val)
if isinstance(val, six.string_types):
return [x.strip() for x in val.split(',')]
return [six.text_type(x) for x in val]
else:
try:
return expected(val)
except Exception as exc:
log.error(
'Failed to enforce type for key=%s with val=%s, falling back '
'to a string', key, val
)
return six.text_type(val) |
def stop_tracking(self, end_time = None):
"""Stop tracking current activity. end_time can be passed in if the
activity should have other end time than the current moment"""
end_time = timegm((end_time or dt.datetime.now()).timetuple())
return self.conn.StopTracking(end_time) | Stop tracking current activity. end_time can be passed in if the
activity should have other end time than the current moment | Below is the the instruction that describes the task:
### Input:
Stop tracking current activity. end_time can be passed in if the
activity should have other end time than the current moment
### Response:
def stop_tracking(self, end_time = None):
"""Stop tracking current activity. end_time can be passed in if the
activity should have other end time than the current moment"""
end_time = timegm((end_time or dt.datetime.now()).timetuple())
return self.conn.StopTracking(end_time) |
def get_routertypes(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""Lists defined router types."""
pass | Lists defined router types. | Below is the the instruction that describes the task:
### Input:
Lists defined router types.
### Response:
def get_routertypes(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""Lists defined router types."""
pass |
def _add_slice(seq, slc):
""" Our textwrap routine deals in slices. This function will concat
contiguous slices as an optimization so lookup performance is faster.
It expects a sequence (probably a list) to add slice to or will extend
the last slice of the sequence if it ends where the new slice begins. """
if seq and seq[-1].stop == slc.start:
seq[-1] = slice(seq[-1].start, slc.stop)
else:
seq.append(slc) | Our textwrap routine deals in slices. This function will concat
contiguous slices as an optimization so lookup performance is faster.
It expects a sequence (probably a list) to add slice to or will extend
the last slice of the sequence if it ends where the new slice begins. | Below is the the instruction that describes the task:
### Input:
Our textwrap routine deals in slices. This function will concat
contiguous slices as an optimization so lookup performance is faster.
It expects a sequence (probably a list) to add slice to or will extend
the last slice of the sequence if it ends where the new slice begins.
### Response:
def _add_slice(seq, slc):
""" Our textwrap routine deals in slices. This function will concat
contiguous slices as an optimization so lookup performance is faster.
It expects a sequence (probably a list) to add slice to or will extend
the last slice of the sequence if it ends where the new slice begins. """
if seq and seq[-1].stop == slc.start:
seq[-1] = slice(seq[-1].start, slc.stop)
else:
seq.append(slc) |
def decode_qp_numpy(msg, return_matrix=True):
"""Decode SAPI response, results in a `qp` format, explicitly using numpy.
If numpy is not installed, the method will fail.
To use numpy for decoding, but return the results a lists (instead of
numpy matrices), set `return_matrix=False`.
"""
import numpy as np
result = msg['answer']
# Build some little endian type encodings
double_type = np.dtype(np.double)
double_type = double_type.newbyteorder('<')
int_type = np.dtype(np.int32)
int_type = int_type.newbyteorder('<')
# Decode the simple buffers
result['energies'] = np.frombuffer(base64.b64decode(result['energies']),
dtype=double_type)
if 'num_occurrences' in result:
result['num_occurrences'] = \
np.frombuffer(base64.b64decode(result['num_occurrences']),
dtype=int_type)
result['active_variables'] = \
np.frombuffer(base64.b64decode(result['active_variables']),
dtype=int_type)
# Measure out the binary data size
num_solutions = len(result['energies'])
active_variables = result['active_variables']
num_variables = len(active_variables)
total_variables = result['num_variables']
# Decode the solutions, which will be a continuous run of bits
byte_type = np.dtype(np.uint8)
byte_type = byte_type.newbyteorder('<')
bits = np.unpackbits(np.frombuffer(base64.b64decode(result['solutions']),
dtype=byte_type))
# Clip off the extra bits from encoding
if num_solutions:
bits = np.reshape(bits, (num_solutions, bits.size // num_solutions))
bits = np.delete(bits, range(num_variables, bits.shape[1]), 1)
# Switch from bits to spins
default = 3
if msg['type'] == 'ising':
bits = bits.astype(np.int8)
bits *= 2
bits -= 1
default = 0
# Fill in the missing variables
solutions = np.full((num_solutions, total_variables), default, dtype=np.int8)
solutions[:, active_variables] = bits
result['solutions'] = solutions
# If the final result shouldn't be numpy formats switch back to python objects
if not return_matrix:
result['energies'] = result['energies'].tolist()
if 'num_occurrences' in result:
result['num_occurrences'] = result['num_occurrences'].tolist()
result['active_variables'] = result['active_variables'].tolist()
result['solutions'] = result['solutions'].tolist()
return result | Decode SAPI response, results in a `qp` format, explicitly using numpy.
If numpy is not installed, the method will fail.
To use numpy for decoding, but return the results a lists (instead of
numpy matrices), set `return_matrix=False`. | Below is the the instruction that describes the task:
### Input:
Decode SAPI response, results in a `qp` format, explicitly using numpy.
If numpy is not installed, the method will fail.
To use numpy for decoding, but return the results a lists (instead of
numpy matrices), set `return_matrix=False`.
### Response:
def decode_qp_numpy(msg, return_matrix=True):
"""Decode SAPI response, results in a `qp` format, explicitly using numpy.
If numpy is not installed, the method will fail.
To use numpy for decoding, but return the results a lists (instead of
numpy matrices), set `return_matrix=False`.
"""
import numpy as np
result = msg['answer']
# Build some little endian type encodings
double_type = np.dtype(np.double)
double_type = double_type.newbyteorder('<')
int_type = np.dtype(np.int32)
int_type = int_type.newbyteorder('<')
# Decode the simple buffers
result['energies'] = np.frombuffer(base64.b64decode(result['energies']),
dtype=double_type)
if 'num_occurrences' in result:
result['num_occurrences'] = \
np.frombuffer(base64.b64decode(result['num_occurrences']),
dtype=int_type)
result['active_variables'] = \
np.frombuffer(base64.b64decode(result['active_variables']),
dtype=int_type)
# Measure out the binary data size
num_solutions = len(result['energies'])
active_variables = result['active_variables']
num_variables = len(active_variables)
total_variables = result['num_variables']
# Decode the solutions, which will be a continuous run of bits
byte_type = np.dtype(np.uint8)
byte_type = byte_type.newbyteorder('<')
bits = np.unpackbits(np.frombuffer(base64.b64decode(result['solutions']),
dtype=byte_type))
# Clip off the extra bits from encoding
if num_solutions:
bits = np.reshape(bits, (num_solutions, bits.size // num_solutions))
bits = np.delete(bits, range(num_variables, bits.shape[1]), 1)
# Switch from bits to spins
default = 3
if msg['type'] == 'ising':
bits = bits.astype(np.int8)
bits *= 2
bits -= 1
default = 0
# Fill in the missing variables
solutions = np.full((num_solutions, total_variables), default, dtype=np.int8)
solutions[:, active_variables] = bits
result['solutions'] = solutions
# If the final result shouldn't be numpy formats switch back to python objects
if not return_matrix:
result['energies'] = result['energies'].tolist()
if 'num_occurrences' in result:
result['num_occurrences'] = result['num_occurrences'].tolist()
result['active_variables'] = result['active_variables'].tolist()
result['solutions'] = result['solutions'].tolist()
return result |
def to_json(df, state_index, color_index, fills):
"""Transforms dataframe to json response"""
records = {}
for i, row in df.iterrows():
records[row[state_index]] = {
"fillKey": row[color_index]
}
return {
"data": records,
"fills": fills
} | Transforms dataframe to json response | Below is the the instruction that describes the task:
### Input:
Transforms dataframe to json response
### Response:
def to_json(df, state_index, color_index, fills):
"""Transforms dataframe to json response"""
records = {}
for i, row in df.iterrows():
records[row[state_index]] = {
"fillKey": row[color_index]
}
return {
"data": records,
"fills": fills
} |
def spherical_to_cartesian(lons, lats, depths=None):
"""
Return the position vectors (in Cartesian coordinates) of list of spherical
coordinates.
For equations see: http://mathworld.wolfram.com/SphericalCoordinates.html.
Parameters are components of spherical coordinates in a form of scalars,
lists or numpy arrays. ``depths`` can be ``None`` in which case it's
considered zero for all points.
:returns:
``numpy.array`` of 3d vectors representing points' coordinates in
Cartesian space in km. The array has shape `lons.shape + (3,)`.
In particular, if ``lons`` and ``lats`` are scalars the result is a
3D vector and if they are vectors the result is a matrix of shape
(N, 3).
See also :func:`cartesian_to_spherical`.
"""
phi = numpy.radians(lons)
theta = numpy.radians(lats)
if depths is None:
rr = EARTH_RADIUS
else:
rr = EARTH_RADIUS - numpy.array(depths)
cos_theta_r = rr * numpy.cos(theta)
try:
shape = lons.shape
except AttributeError: # a list/tuple was passed
try:
shape = (len(lons),)
except TypeError: # a scalar was passed
shape = ()
arr = numpy.zeros(shape + (3,))
arr[..., 0] = cos_theta_r * numpy.cos(phi)
arr[..., 1] = cos_theta_r * numpy.sin(phi)
arr[..., 2] = rr * numpy.sin(theta)
return arr | Return the position vectors (in Cartesian coordinates) of list of spherical
coordinates.
For equations see: http://mathworld.wolfram.com/SphericalCoordinates.html.
Parameters are components of spherical coordinates in a form of scalars,
lists or numpy arrays. ``depths`` can be ``None`` in which case it's
considered zero for all points.
:returns:
``numpy.array`` of 3d vectors representing points' coordinates in
Cartesian space in km. The array has shape `lons.shape + (3,)`.
In particular, if ``lons`` and ``lats`` are scalars the result is a
3D vector and if they are vectors the result is a matrix of shape
(N, 3).
See also :func:`cartesian_to_spherical`. | Below is the the instruction that describes the task:
### Input:
Return the position vectors (in Cartesian coordinates) of list of spherical
coordinates.
For equations see: http://mathworld.wolfram.com/SphericalCoordinates.html.
Parameters are components of spherical coordinates in a form of scalars,
lists or numpy arrays. ``depths`` can be ``None`` in which case it's
considered zero for all points.
:returns:
``numpy.array`` of 3d vectors representing points' coordinates in
Cartesian space in km. The array has shape `lons.shape + (3,)`.
In particular, if ``lons`` and ``lats`` are scalars the result is a
3D vector and if they are vectors the result is a matrix of shape
(N, 3).
See also :func:`cartesian_to_spherical`.
### Response:
def spherical_to_cartesian(lons, lats, depths=None):
"""
Return the position vectors (in Cartesian coordinates) of list of spherical
coordinates.
For equations see: http://mathworld.wolfram.com/SphericalCoordinates.html.
Parameters are components of spherical coordinates in a form of scalars,
lists or numpy arrays. ``depths`` can be ``None`` in which case it's
considered zero for all points.
:returns:
``numpy.array`` of 3d vectors representing points' coordinates in
Cartesian space in km. The array has shape `lons.shape + (3,)`.
In particular, if ``lons`` and ``lats`` are scalars the result is a
3D vector and if they are vectors the result is a matrix of shape
(N, 3).
See also :func:`cartesian_to_spherical`.
"""
phi = numpy.radians(lons)
theta = numpy.radians(lats)
if depths is None:
rr = EARTH_RADIUS
else:
rr = EARTH_RADIUS - numpy.array(depths)
cos_theta_r = rr * numpy.cos(theta)
try:
shape = lons.shape
except AttributeError: # a list/tuple was passed
try:
shape = (len(lons),)
except TypeError: # a scalar was passed
shape = ()
arr = numpy.zeros(shape + (3,))
arr[..., 0] = cos_theta_r * numpy.cos(phi)
arr[..., 1] = cos_theta_r * numpy.sin(phi)
arr[..., 2] = rr * numpy.sin(theta)
return arr |
def energy(self, sample_like, dtype=np.float):
"""The energy of the given sample.
Args:
sample_like (samples_like):
A raw sample. `sample_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`, optional):
The data type of the returned energies. Defaults to float.
Returns:
The energy.
"""
energy, = self.energies(sample_like, dtype=dtype)
return energy | The energy of the given sample.
Args:
sample_like (samples_like):
A raw sample. `sample_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`, optional):
The data type of the returned energies. Defaults to float.
Returns:
The energy. | Below is the the instruction that describes the task:
### Input:
The energy of the given sample.
Args:
sample_like (samples_like):
A raw sample. `sample_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`, optional):
The data type of the returned energies. Defaults to float.
Returns:
The energy.
### Response:
def energy(self, sample_like, dtype=np.float):
"""The energy of the given sample.
Args:
sample_like (samples_like):
A raw sample. `sample_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`, optional):
The data type of the returned energies. Defaults to float.
Returns:
The energy.
"""
energy, = self.energies(sample_like, dtype=dtype)
return energy |
def r_cts(self):
""" Actual main route of CTS APIs. Transfer typical requests through the ?request=REQUESTNAME route
:return: Response
"""
_request = request.args.get("request", None)
if _request is not None:
try:
if _request.lower() == "getcapabilities":
return self._get_capabilities(
urn=request.args.get("urn", None)
)
elif _request.lower() == "getpassage":
return self._get_passage(
urn=request.args.get("urn", None)
)
elif _request.lower() == "getpassageplus":
return self._get_passage_plus(
urn=request.args.get("urn", None)
)
elif _request.lower() == "getlabel":
return self._get_label(
urn=request.args.get("urn", None)
)
elif _request.lower() == "getfirsturn":
return self._get_first_urn(
urn=request.args.get("urn", None)
)
elif _request.lower() == "getprevnexturn":
return self._get_prev_next(
urn=request.args.get("urn", None)
)
elif _request.lower() == "getvalidreff":
return self._get_valid_reff(
urn=request.args.get("urn", None),
level=request.args.get("level", 1, type=int)
)
except NautilusError as E:
return self.cts_error(error_name=E.__class__.__name__, message=E.__doc__)
return self.cts_error(MissingParameter.__name__, message=MissingParameter.__doc__) | Actual main route of CTS APIs. Transfer typical requests through the ?request=REQUESTNAME route
:return: Response | Below is the the instruction that describes the task:
### Input:
Actual main route of CTS APIs. Transfer typical requests through the ?request=REQUESTNAME route
:return: Response
### Response:
def r_cts(self):
""" Actual main route of CTS APIs. Transfer typical requests through the ?request=REQUESTNAME route
:return: Response
"""
_request = request.args.get("request", None)
if _request is not None:
try:
if _request.lower() == "getcapabilities":
return self._get_capabilities(
urn=request.args.get("urn", None)
)
elif _request.lower() == "getpassage":
return self._get_passage(
urn=request.args.get("urn", None)
)
elif _request.lower() == "getpassageplus":
return self._get_passage_plus(
urn=request.args.get("urn", None)
)
elif _request.lower() == "getlabel":
return self._get_label(
urn=request.args.get("urn", None)
)
elif _request.lower() == "getfirsturn":
return self._get_first_urn(
urn=request.args.get("urn", None)
)
elif _request.lower() == "getprevnexturn":
return self._get_prev_next(
urn=request.args.get("urn", None)
)
elif _request.lower() == "getvalidreff":
return self._get_valid_reff(
urn=request.args.get("urn", None),
level=request.args.get("level", 1, type=int)
)
except NautilusError as E:
return self.cts_error(error_name=E.__class__.__name__, message=E.__doc__)
return self.cts_error(MissingParameter.__name__, message=MissingParameter.__doc__) |
def ordinal_float(dt):
"""Like datetime.ordinal, but rather than integer allows fractional days (so float not ordinal at all)
Similar to the Microsoft Excel numerical representation of a datetime object
>>> ordinal_float(datetime.datetime(1970, 1, 1))
719163.0
>>> ordinal_float(datetime.datetime(1, 2, 3, 4, 5, 6, 7)) # doctest: +ELLIPSIS
34.1702083334143...
"""
try:
return dt.toordinal() + ((((dt.microsecond / 1000000.) + dt.second) / 60. + dt.minute) / 60 + dt.hour) / 24.
except AttributeError:
try:
return ordinal_float(make_datetime(dt))
except ValueError:
pass
dt = list(make_datetime(val) for val in dt)
assert(all(isinstance(val, datetime.datetime) for val in dt))
return [ordinal_float(val) for val in dt] | Like datetime.ordinal, but rather than integer allows fractional days (so float not ordinal at all)
Similar to the Microsoft Excel numerical representation of a datetime object
>>> ordinal_float(datetime.datetime(1970, 1, 1))
719163.0
>>> ordinal_float(datetime.datetime(1, 2, 3, 4, 5, 6, 7)) # doctest: +ELLIPSIS
34.1702083334143... | Below is the the instruction that describes the task:
### Input:
Like datetime.ordinal, but rather than integer allows fractional days (so float not ordinal at all)
Similar to the Microsoft Excel numerical representation of a datetime object
>>> ordinal_float(datetime.datetime(1970, 1, 1))
719163.0
>>> ordinal_float(datetime.datetime(1, 2, 3, 4, 5, 6, 7)) # doctest: +ELLIPSIS
34.1702083334143...
### Response:
def ordinal_float(dt):
"""Like datetime.ordinal, but rather than integer allows fractional days (so float not ordinal at all)
Similar to the Microsoft Excel numerical representation of a datetime object
>>> ordinal_float(datetime.datetime(1970, 1, 1))
719163.0
>>> ordinal_float(datetime.datetime(1, 2, 3, 4, 5, 6, 7)) # doctest: +ELLIPSIS
34.1702083334143...
"""
try:
return dt.toordinal() + ((((dt.microsecond / 1000000.) + dt.second) / 60. + dt.minute) / 60 + dt.hour) / 24.
except AttributeError:
try:
return ordinal_float(make_datetime(dt))
except ValueError:
pass
dt = list(make_datetime(val) for val in dt)
assert(all(isinstance(val, datetime.datetime) for val in dt))
return [ordinal_float(val) for val in dt] |
def help(context, command):
'''Get command help'''
if command:
cmd = cli.commands.get(command, None)
if cmd:
context.info_name = command
click.echo(cmd.get_help(context))
else:
raise click.ClickException('no command: %s' % command)
else:
click.echo(cli.get_help(context)) | Get command help | Below is the the instruction that describes the task:
### Input:
Get command help
### Response:
def help(context, command):
'''Get command help'''
if command:
cmd = cli.commands.get(command, None)
if cmd:
context.info_name = command
click.echo(cmd.get_help(context))
else:
raise click.ClickException('no command: %s' % command)
else:
click.echo(cli.get_help(context)) |
def sign(user_id, user_type=None, today=None, session=None):
"""Check user id for validity, then sign user in if they are signed
out, or out if they are signed in.
:param user_id: The ID of the user to sign in or out.
:param user_type: (optional) Specify whether user is signing in as a `'student'` or `'tutor'`.
:param today: (optional) The current date as a `datetime.date` object. Used for testing.
:param session: (optional) SQLAlchemy session through which to access the database.
:return: `Status` named tuple object. Information about the sign attempt.
""" # noqa
if session is None:
session = Session()
else:
session = session
if today is None:
today = date.today()
else:
today = today
user = (
session
.query(User)
.filter(User.user_id == user_id)
.one_or_none()
)
if user:
signed_in_entries = (
user
.entries
.filter(Entry.date == today)
.filter(Entry.time_out.is_(None))
.all()
)
if not signed_in_entries:
new_entry = sign_in(user, user_type=user_type)
session.add(new_entry)
status = Status(
valid=True,
in_or_out='in',
user_name=get_user_name(user),
user_type=new_entry.user_type,
entry=new_entry
)
else:
for entry in signed_in_entries:
signed_out_entry = sign_out(entry)
session.add(signed_out_entry)
status = Status(
valid=True,
in_or_out='out',
user_name=get_user_name(user),
user_type=signed_out_entry.user_type,
entry=signed_out_entry
)
session.commit()
else:
raise UnregisteredUser(
'{} not registered. Please register at the front desk.'.format(
user_id
)
)
logger.debug(status)
return status | Check user id for validity, then sign user in if they are signed
out, or out if they are signed in.
:param user_id: The ID of the user to sign in or out.
:param user_type: (optional) Specify whether user is signing in as a `'student'` or `'tutor'`.
:param today: (optional) The current date as a `datetime.date` object. Used for testing.
:param session: (optional) SQLAlchemy session through which to access the database.
:return: `Status` named tuple object. Information about the sign attempt. | Below is the the instruction that describes the task:
### Input:
Check user id for validity, then sign user in if they are signed
out, or out if they are signed in.
:param user_id: The ID of the user to sign in or out.
:param user_type: (optional) Specify whether user is signing in as a `'student'` or `'tutor'`.
:param today: (optional) The current date as a `datetime.date` object. Used for testing.
:param session: (optional) SQLAlchemy session through which to access the database.
:return: `Status` named tuple object. Information about the sign attempt.
### Response:
def sign(user_id, user_type=None, today=None, session=None):
"""Check user id for validity, then sign user in if they are signed
out, or out if they are signed in.
:param user_id: The ID of the user to sign in or out.
:param user_type: (optional) Specify whether user is signing in as a `'student'` or `'tutor'`.
:param today: (optional) The current date as a `datetime.date` object. Used for testing.
:param session: (optional) SQLAlchemy session through which to access the database.
:return: `Status` named tuple object. Information about the sign attempt.
""" # noqa
if session is None:
session = Session()
else:
session = session
if today is None:
today = date.today()
else:
today = today
user = (
session
.query(User)
.filter(User.user_id == user_id)
.one_or_none()
)
if user:
signed_in_entries = (
user
.entries
.filter(Entry.date == today)
.filter(Entry.time_out.is_(None))
.all()
)
if not signed_in_entries:
new_entry = sign_in(user, user_type=user_type)
session.add(new_entry)
status = Status(
valid=True,
in_or_out='in',
user_name=get_user_name(user),
user_type=new_entry.user_type,
entry=new_entry
)
else:
for entry in signed_in_entries:
signed_out_entry = sign_out(entry)
session.add(signed_out_entry)
status = Status(
valid=True,
in_or_out='out',
user_name=get_user_name(user),
user_type=signed_out_entry.user_type,
entry=signed_out_entry
)
session.commit()
else:
raise UnregisteredUser(
'{} not registered. Please register at the front desk.'.format(
user_id
)
)
logger.debug(status)
return status |
def update_bounds(self, bounds):
'''Update cylinders start and end positions
'''
starts = bounds[:,0,:]
ends = bounds[:,1,:]
self.bounds = bounds
self.lengths = np.sqrt(((ends - starts)**2).sum(axis=1))
vertices, normals, colors = self._process_reference()
self.tr.update_vertices(vertices)
self.tr.update_normals(normals) | Update cylinders start and end positions | Below is the the instruction that describes the task:
### Input:
Update cylinders start and end positions
### Response:
def update_bounds(self, bounds):
'''Update cylinders start and end positions
'''
starts = bounds[:,0,:]
ends = bounds[:,1,:]
self.bounds = bounds
self.lengths = np.sqrt(((ends - starts)**2).sum(axis=1))
vertices, normals, colors = self._process_reference()
self.tr.update_vertices(vertices)
self.tr.update_normals(normals) |
def split_leading_indent(line, max_indents=None):
"""Split line into leading indent and main."""
indent = ""
while (
(max_indents is None or max_indents > 0)
and line.startswith((openindent, closeindent))
) or line.lstrip() != line:
if max_indents is not None and line.startswith((openindent, closeindent)):
max_indents -= 1
indent += line[0]
line = line[1:]
return indent, line | Split line into leading indent and main. | Below is the the instruction that describes the task:
### Input:
Split line into leading indent and main.
### Response:
def split_leading_indent(line, max_indents=None):
"""Split line into leading indent and main."""
indent = ""
while (
(max_indents is None or max_indents > 0)
and line.startswith((openindent, closeindent))
) or line.lstrip() != line:
if max_indents is not None and line.startswith((openindent, closeindent)):
max_indents -= 1
indent += line[0]
line = line[1:]
return indent, line |
def set_until(self, frame, lineno=None):
"""Stop when the current line number in frame is greater than lineno or
when returning from frame."""
if lineno is None:
lineno = frame.f_lineno + 1
self._set_stopinfo(frame, lineno) | Stop when the current line number in frame is greater than lineno or
when returning from frame. | Below is the the instruction that describes the task:
### Input:
Stop when the current line number in frame is greater than lineno or
when returning from frame.
### Response:
def set_until(self, frame, lineno=None):
"""Stop when the current line number in frame is greater than lineno or
when returning from frame."""
if lineno is None:
lineno = frame.f_lineno + 1
self._set_stopinfo(frame, lineno) |
def _parse_sequences(ilines, expect_qlen):
"""Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
"""
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
} | Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}* | Below is the the instruction that describes the task:
### Input:
Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
### Response:
def _parse_sequences(ilines, expect_qlen):
"""Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
"""
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
} |
def metric(self, name, count, elapsed):
"""A metric function that writes a single CSV file
:arg str name: name of the metric
:arg int count: number of items
:arg float elapsed: time in seconds
"""
if name is None:
warnings.warn("Ignoring unnamed metric", stacklevel=3)
return
with self.lock:
self.writer.writerow((name, count, "%f"%elapsed)) | A metric function that writes a single CSV file
:arg str name: name of the metric
:arg int count: number of items
:arg float elapsed: time in seconds | Below is the the instruction that describes the task:
### Input:
A metric function that writes a single CSV file
:arg str name: name of the metric
:arg int count: number of items
:arg float elapsed: time in seconds
### Response:
def metric(self, name, count, elapsed):
"""A metric function that writes a single CSV file
:arg str name: name of the metric
:arg int count: number of items
:arg float elapsed: time in seconds
"""
if name is None:
warnings.warn("Ignoring unnamed metric", stacklevel=3)
return
with self.lock:
self.writer.writerow((name, count, "%f"%elapsed)) |
def increment(version, major=False, minor=False, patch=True):
"""
Increment a semantic version
:param version: str of the version to increment
:param major: bool specifying major level version increment
:param minor: bool specifying minor level version increment
:param patch: bool specifying patch level version increment
:return: str of the incremented version
"""
version = semantic_version.Version(version)
if major:
version.major += 1
version.minor = 0
version.patch = 0
elif minor:
version.minor += 1
version.patch = 0
elif patch:
version.patch += 1
return str(version) | Increment a semantic version
:param version: str of the version to increment
:param major: bool specifying major level version increment
:param minor: bool specifying minor level version increment
:param patch: bool specifying patch level version increment
:return: str of the incremented version | Below is the the instruction that describes the task:
### Input:
Increment a semantic version
:param version: str of the version to increment
:param major: bool specifying major level version increment
:param minor: bool specifying minor level version increment
:param patch: bool specifying patch level version increment
:return: str of the incremented version
### Response:
def increment(version, major=False, minor=False, patch=True):
"""
Increment a semantic version
:param version: str of the version to increment
:param major: bool specifying major level version increment
:param minor: bool specifying minor level version increment
:param patch: bool specifying patch level version increment
:return: str of the incremented version
"""
version = semantic_version.Version(version)
if major:
version.major += 1
version.minor = 0
version.patch = 0
elif minor:
version.minor += 1
version.patch = 0
elif patch:
version.patch += 1
return str(version) |
def set_keep_alive(self, sock, after_idle_sec=5, interval_sec=60,
max_fails=5):
"""
This function instructs the TCP socket to send a heart beat every n
seconds to detect dead connections. It's the TCP equivalent of the
IRC ping-pong protocol and allows for better cleanup / detection
of dead TCP connections.
It activates after 1 second (after_idle_sec) of idleness, then sends
a keepalive ping once every 3 seconds(interval_sec), and closes the
connection after 5 failed ping (max_fails), or 15 seconds
"""
# OSX
if platform.system() == "Darwin":
# scraped from /usr/include, not exported by python's socket module
TCP_KEEPALIVE = 0x10
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, TCP_KEEPALIVE, interval_sec)
if platform.system() == "Windows":
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 10000, 3000))
if platform.system() == "Linux":
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
after_idle_sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
interval_sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails) | This function instructs the TCP socket to send a heart beat every n
seconds to detect dead connections. It's the TCP equivalent of the
IRC ping-pong protocol and allows for better cleanup / detection
of dead TCP connections.
It activates after 1 second (after_idle_sec) of idleness, then sends
a keepalive ping once every 3 seconds(interval_sec), and closes the
connection after 5 failed ping (max_fails), or 15 seconds | Below is the the instruction that describes the task:
### Input:
This function instructs the TCP socket to send a heart beat every n
seconds to detect dead connections. It's the TCP equivalent of the
IRC ping-pong protocol and allows for better cleanup / detection
of dead TCP connections.
It activates after 1 second (after_idle_sec) of idleness, then sends
a keepalive ping once every 3 seconds(interval_sec), and closes the
connection after 5 failed ping (max_fails), or 15 seconds
### Response:
def set_keep_alive(self, sock, after_idle_sec=5, interval_sec=60,
max_fails=5):
"""
This function instructs the TCP socket to send a heart beat every n
seconds to detect dead connections. It's the TCP equivalent of the
IRC ping-pong protocol and allows for better cleanup / detection
of dead TCP connections.
It activates after 1 second (after_idle_sec) of idleness, then sends
a keepalive ping once every 3 seconds(interval_sec), and closes the
connection after 5 failed ping (max_fails), or 15 seconds
"""
# OSX
if platform.system() == "Darwin":
# scraped from /usr/include, not exported by python's socket module
TCP_KEEPALIVE = 0x10
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, TCP_KEEPALIVE, interval_sec)
if platform.system() == "Windows":
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 10000, 3000))
if platform.system() == "Linux":
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
after_idle_sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
interval_sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails) |
def setup_ipython(self):
"""Monkey patch shell's error handler.
This method is to monkey-patch the showtraceback method of
IPython's InteractiveShell to
__IPYTHON__ is not detected when starting an IPython kernel,
so this method is called from start_kernel in spyder-modelx.
"""
if self.is_ipysetup:
return
from ipykernel.kernelapp import IPKernelApp
self.shell = IPKernelApp.instance().shell # None in PyCharm console
if not self.shell and is_ipython():
self.shell = get_ipython()
if self.shell:
shell_class = type(self.shell)
shell_class.default_showtraceback = shell_class.showtraceback
shell_class.showtraceback = custom_showtraceback
self.is_ipysetup = True
else:
raise RuntimeError("IPython shell not found.") | Monkey patch shell's error handler.
This method is to monkey-patch the showtraceback method of
IPython's InteractiveShell to
__IPYTHON__ is not detected when starting an IPython kernel,
so this method is called from start_kernel in spyder-modelx. | Below is the the instruction that describes the task:
### Input:
Monkey patch shell's error handler.
This method is to monkey-patch the showtraceback method of
IPython's InteractiveShell to
__IPYTHON__ is not detected when starting an IPython kernel,
so this method is called from start_kernel in spyder-modelx.
### Response:
def setup_ipython(self):
"""Monkey patch shell's error handler.
This method is to monkey-patch the showtraceback method of
IPython's InteractiveShell to
__IPYTHON__ is not detected when starting an IPython kernel,
so this method is called from start_kernel in spyder-modelx.
"""
if self.is_ipysetup:
return
from ipykernel.kernelapp import IPKernelApp
self.shell = IPKernelApp.instance().shell # None in PyCharm console
if not self.shell and is_ipython():
self.shell = get_ipython()
if self.shell:
shell_class = type(self.shell)
shell_class.default_showtraceback = shell_class.showtraceback
shell_class.showtraceback = custom_showtraceback
self.is_ipysetup = True
else:
raise RuntimeError("IPython shell not found.") |
def save(self, *args, **kwargs):
"""
Create formatted version of body text.
"""
self.body_formatted = sanetize_text(self.body)
super(Contact, self).save() | Create formatted version of body text. | Below is the the instruction that describes the task:
### Input:
Create formatted version of body text.
### Response:
def save(self, *args, **kwargs):
"""
Create formatted version of body text.
"""
self.body_formatted = sanetize_text(self.body)
super(Contact, self).save() |
def handle_wiki(msg):
""" Given a wiki message, return the FAS username. """
if 'wiki.article.edit' in msg.topic:
username = msg.msg['user']
elif 'wiki.upload.complete' in msg.topic:
username = msg.msg['user_text']
else:
raise ValueError("Unhandled topic.")
return username | Given a wiki message, return the FAS username. | Below is the the instruction that describes the task:
### Input:
Given a wiki message, return the FAS username.
### Response:
def handle_wiki(msg):
""" Given a wiki message, return the FAS username. """
if 'wiki.article.edit' in msg.topic:
username = msg.msg['user']
elif 'wiki.upload.complete' in msg.topic:
username = msg.msg['user_text']
else:
raise ValueError("Unhandled topic.")
return username |
def transform_frame(frame, transform, columns=None, direction='forward',
return_all=True, args=(), **kwargs):
"""
Apply transform to specified columns.
direction: 'forward' | 'inverse'
return_all: bool
True - return all columns, with specified ones transformed.
False - return only specified columns.
.. warning:: deprecated
"""
tfun, tname = parse_transform(transform, direction)
columns = to_list(columns)
if columns is None:
columns = frame.columns
if return_all:
transformed = frame.copy()
for c in columns:
transformed[c] = tfun(frame[c], *args, **kwargs)
else:
transformed = frame.filter(columns).apply(tfun, *args, **kwargs)
return transformed | Apply transform to specified columns.
direction: 'forward' | 'inverse'
return_all: bool
True - return all columns, with specified ones transformed.
False - return only specified columns.
.. warning:: deprecated | Below is the the instruction that describes the task:
### Input:
Apply transform to specified columns.
direction: 'forward' | 'inverse'
return_all: bool
True - return all columns, with specified ones transformed.
False - return only specified columns.
.. warning:: deprecated
### Response:
def transform_frame(frame, transform, columns=None, direction='forward',
return_all=True, args=(), **kwargs):
"""
Apply transform to specified columns.
direction: 'forward' | 'inverse'
return_all: bool
True - return all columns, with specified ones transformed.
False - return only specified columns.
.. warning:: deprecated
"""
tfun, tname = parse_transform(transform, direction)
columns = to_list(columns)
if columns is None:
columns = frame.columns
if return_all:
transformed = frame.copy()
for c in columns:
transformed[c] = tfun(frame[c], *args, **kwargs)
else:
transformed = frame.filter(columns).apply(tfun, *args, **kwargs)
return transformed |
def generate_data_key(self, name, key_type, context="", nonce="", bits=256, mount_point=DEFAULT_MOUNT_POINT):
"""Generates a new high-entropy key and the value encrypted with the named key.
Optionally return the plaintext of the key as well. Whether plaintext is returned depends on the path; as a
result, you can use Vault ACL policies to control whether a user is allowed to retrieve the plaintext value of a
key. This is useful if you want an untrusted user or operation to generate keys that are then made available to
trusted users.
Supported methods:
POST: /{mount_point}/datakey/{key_type}/{name}. Produces: 200 application/json
:param name: Specifies the name of the encryption key to use to encrypt the datakey. This is specified as part
of the URL.
:type name: str | unicode
:param key_type: Specifies the type of key to generate. If plaintext, the plaintext key will be returned along
with the ciphertext. If wrapped, only the ciphertext value will be returned. This is specified as part of
the URL.
:type key_type: str | unicode
:param context: Specifies the key derivation context, provided as a base64-encoded string. This must be provided
if derivation is enabled.
:type context: str | unicode
:param nonce: Specifies a nonce value, provided as base64 encoded. Must be provided if convergent encryption is
enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created in 0.6.2+.
The value must be exactly 96 bits (12 bytes) long and the user must ensure that for any given context (and
thus, any given encryption key) this nonce value is never reused.
:type nonce: str | unicode
:param bits: Specifies the number of bits in the desired key. Can be 128, 256, or 512.
:type bits: int
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
"""
if key_type not in transit_constants.ALLOWED_DATA_KEY_TYPES:
error_msg = 'invalid key_type argument provided "{arg}", supported types: "{allowed_types}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=key_type,
allowed_types=', '.join(transit_constants.ALLOWED_DATA_KEY_TYPES),
))
if bits not in transit_constants.ALLOWED_DATA_KEY_BITS:
error_msg = 'invalid bits argument provided "{arg}", supported values: "{allowed_values}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=bits,
allowed_values=', '.join([str(b) for b in transit_constants.ALLOWED_DATA_KEY_BITS]),
))
params = {
'context': context,
'nonce': nonce,
'bits': bits,
}
api_path = '/v1/{mount_point}/datakey/{key_type}/{name}'.format(
mount_point=mount_point,
key_type=key_type,
name=name,
)
response = self._adapter.post(
url=api_path,
json=params,
)
return response.json() | Generates a new high-entropy key and the value encrypted with the named key.
Optionally return the plaintext of the key as well. Whether plaintext is returned depends on the path; as a
result, you can use Vault ACL policies to control whether a user is allowed to retrieve the plaintext value of a
key. This is useful if you want an untrusted user or operation to generate keys that are then made available to
trusted users.
Supported methods:
POST: /{mount_point}/datakey/{key_type}/{name}. Produces: 200 application/json
:param name: Specifies the name of the encryption key to use to encrypt the datakey. This is specified as part
of the URL.
:type name: str | unicode
:param key_type: Specifies the type of key to generate. If plaintext, the plaintext key will be returned along
with the ciphertext. If wrapped, only the ciphertext value will be returned. This is specified as part of
the URL.
:type key_type: str | unicode
:param context: Specifies the key derivation context, provided as a base64-encoded string. This must be provided
if derivation is enabled.
:type context: str | unicode
:param nonce: Specifies a nonce value, provided as base64 encoded. Must be provided if convergent encryption is
enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created in 0.6.2+.
The value must be exactly 96 bits (12 bytes) long and the user must ensure that for any given context (and
thus, any given encryption key) this nonce value is never reused.
:type nonce: str | unicode
:param bits: Specifies the number of bits in the desired key. Can be 128, 256, or 512.
:type bits: int
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response | Below is the the instruction that describes the task:
### Input:
Generates a new high-entropy key and the value encrypted with the named key.
Optionally return the plaintext of the key as well. Whether plaintext is returned depends on the path; as a
result, you can use Vault ACL policies to control whether a user is allowed to retrieve the plaintext value of a
key. This is useful if you want an untrusted user or operation to generate keys that are then made available to
trusted users.
Supported methods:
POST: /{mount_point}/datakey/{key_type}/{name}. Produces: 200 application/json
:param name: Specifies the name of the encryption key to use to encrypt the datakey. This is specified as part
of the URL.
:type name: str | unicode
:param key_type: Specifies the type of key to generate. If plaintext, the plaintext key will be returned along
with the ciphertext. If wrapped, only the ciphertext value will be returned. This is specified as part of
the URL.
:type key_type: str | unicode
:param context: Specifies the key derivation context, provided as a base64-encoded string. This must be provided
if derivation is enabled.
:type context: str | unicode
:param nonce: Specifies a nonce value, provided as base64 encoded. Must be provided if convergent encryption is
enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created in 0.6.2+.
The value must be exactly 96 bits (12 bytes) long and the user must ensure that for any given context (and
thus, any given encryption key) this nonce value is never reused.
:type nonce: str | unicode
:param bits: Specifies the number of bits in the desired key. Can be 128, 256, or 512.
:type bits: int
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
### Response:
def generate_data_key(self, name, key_type, context="", nonce="", bits=256, mount_point=DEFAULT_MOUNT_POINT):
"""Generates a new high-entropy key and the value encrypted with the named key.
Optionally return the plaintext of the key as well. Whether plaintext is returned depends on the path; as a
result, you can use Vault ACL policies to control whether a user is allowed to retrieve the plaintext value of a
key. This is useful if you want an untrusted user or operation to generate keys that are then made available to
trusted users.
Supported methods:
POST: /{mount_point}/datakey/{key_type}/{name}. Produces: 200 application/json
:param name: Specifies the name of the encryption key to use to encrypt the datakey. This is specified as part
of the URL.
:type name: str | unicode
:param key_type: Specifies the type of key to generate. If plaintext, the plaintext key will be returned along
with the ciphertext. If wrapped, only the ciphertext value will be returned. This is specified as part of
the URL.
:type key_type: str | unicode
:param context: Specifies the key derivation context, provided as a base64-encoded string. This must be provided
if derivation is enabled.
:type context: str | unicode
:param nonce: Specifies a nonce value, provided as base64 encoded. Must be provided if convergent encryption is
enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created in 0.6.2+.
The value must be exactly 96 bits (12 bytes) long and the user must ensure that for any given context (and
thus, any given encryption key) this nonce value is never reused.
:type nonce: str | unicode
:param bits: Specifies the number of bits in the desired key. Can be 128, 256, or 512.
:type bits: int
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
"""
if key_type not in transit_constants.ALLOWED_DATA_KEY_TYPES:
error_msg = 'invalid key_type argument provided "{arg}", supported types: "{allowed_types}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=key_type,
allowed_types=', '.join(transit_constants.ALLOWED_DATA_KEY_TYPES),
))
if bits not in transit_constants.ALLOWED_DATA_KEY_BITS:
error_msg = 'invalid bits argument provided "{arg}", supported values: "{allowed_values}"'
raise exceptions.ParamValidationError(error_msg.format(
arg=bits,
allowed_values=', '.join([str(b) for b in transit_constants.ALLOWED_DATA_KEY_BITS]),
))
params = {
'context': context,
'nonce': nonce,
'bits': bits,
}
api_path = '/v1/{mount_point}/datakey/{key_type}/{name}'.format(
mount_point=mount_point,
key_type=key_type,
name=name,
)
response = self._adapter.post(
url=api_path,
json=params,
)
return response.json() |
def all(self):
" execute query, get all list of lists"
query,inputs = self._toedn()
return self.db.q(query,
inputs = inputs,
limit = self._limit,
offset = self._offset,
history = self._history) | execute query, get all list of lists | Below is the the instruction that describes the task:
### Input:
execute query, get all list of lists
### Response:
def all(self):
" execute query, get all list of lists"
query,inputs = self._toedn()
return self.db.q(query,
inputs = inputs,
limit = self._limit,
offset = self._offset,
history = self._history) |
def _create_bundle(self, data):
"""Return a bundle initialised by the given dict."""
kwargs = {}
filters = None
if isinstance(data, dict):
kwargs.update(
filters=data.get('filters', None),
output=data.get('output', None),
debug=data.get('debug', None),
extra=data.get('extra', {}),
config=data.get('config', {}),
depends=data.get('depends', None))
bundle = Bundle(*list(self._yield_bundle_contents(data)), **kwargs)
return self._auto_filter_bundle(bundle) | Return a bundle initialised by the given dict. | Below is the the instruction that describes the task:
### Input:
Return a bundle initialised by the given dict.
### Response:
def _create_bundle(self, data):
"""Return a bundle initialised by the given dict."""
kwargs = {}
filters = None
if isinstance(data, dict):
kwargs.update(
filters=data.get('filters', None),
output=data.get('output', None),
debug=data.get('debug', None),
extra=data.get('extra', {}),
config=data.get('config', {}),
depends=data.get('depends', None))
bundle = Bundle(*list(self._yield_bundle_contents(data)), **kwargs)
return self._auto_filter_bundle(bundle) |
def create_vm(self, userid, cpu, memory, disk_list,
user_profile, max_cpu, max_mem, ipl_from,
ipl_param, ipl_loadparam):
"""Create z/VM userid into user directory for a z/VM instance."""
LOG.info("Creating the user directory for vm %s", userid)
info = self._smtclient.create_vm(userid, cpu, memory,
disk_list, user_profile,
max_cpu, max_mem, ipl_from,
ipl_param, ipl_loadparam)
# add userid into smapi namelist
self._smtclient.namelist_add(self._namelist, userid)
return info | Create z/VM userid into user directory for a z/VM instance. | Below is the the instruction that describes the task:
### Input:
Create z/VM userid into user directory for a z/VM instance.
### Response:
def create_vm(self, userid, cpu, memory, disk_list,
user_profile, max_cpu, max_mem, ipl_from,
ipl_param, ipl_loadparam):
"""Create z/VM userid into user directory for a z/VM instance."""
LOG.info("Creating the user directory for vm %s", userid)
info = self._smtclient.create_vm(userid, cpu, memory,
disk_list, user_profile,
max_cpu, max_mem, ipl_from,
ipl_param, ipl_loadparam)
# add userid into smapi namelist
self._smtclient.namelist_add(self._namelist, userid)
return info |
def interrupt_guard(msg='', reraise=True):
"""
context for guard keyboardinterrupt
ex)
with interrupt_guard('need long time'):
critical_work_to_prevent()
:param str msg: message to print when interrupted
:param reraise: re-raise or not when exit
:return: context
"""
def echo():
print(msg)
return on_interrupt(echo, reraise=reraise) | context for guard keyboardinterrupt
ex)
with interrupt_guard('need long time'):
critical_work_to_prevent()
:param str msg: message to print when interrupted
:param reraise: re-raise or not when exit
:return: context | Below is the the instruction that describes the task:
### Input:
context for guard keyboardinterrupt
ex)
with interrupt_guard('need long time'):
critical_work_to_prevent()
:param str msg: message to print when interrupted
:param reraise: re-raise or not when exit
:return: context
### Response:
def interrupt_guard(msg='', reraise=True):
"""
context for guard keyboardinterrupt
ex)
with interrupt_guard('need long time'):
critical_work_to_prevent()
:param str msg: message to print when interrupted
:param reraise: re-raise or not when exit
:return: context
"""
def echo():
print(msg)
return on_interrupt(echo, reraise=reraise) |
def download_image(self, image_type, image):
"""
Read file of a project and download it
:param image_type: Image type
:param image: The path of the image
:returns: A file stream
"""
url = self._getUrl("/{}/images/{}".format(image_type, image))
response = yield from self._session().request("GET", url, auth=self._auth)
if response.status == 404:
raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(image))
return response | Read file of a project and download it
:param image_type: Image type
:param image: The path of the image
:returns: A file stream | Below is the the instruction that describes the task:
### Input:
Read file of a project and download it
:param image_type: Image type
:param image: The path of the image
:returns: A file stream
### Response:
def download_image(self, image_type, image):
"""
Read file of a project and download it
:param image_type: Image type
:param image: The path of the image
:returns: A file stream
"""
url = self._getUrl("/{}/images/{}".format(image_type, image))
response = yield from self._session().request("GET", url, auth=self._auth)
if response.status == 404:
raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(image))
return response |
def position_half_h(pslit, cpix, backw=4):
"""Find the position where the value is half of the peak"""
# Find the first peak to the right of cpix
next_peak = simple_prot(pslit, cpix)
if next_peak is None:
raise ValueError
dis_peak = next_peak - cpix
wpos2 = cpix - dis_peak
wpos1 = wpos2 - backw
# Compute background in a window of width backw
# in a position simetrical to the peak
# around cpix
left_background = pslit[wpos1:wpos2].min()
# height of the peak
height = pslit[next_peak] - left_background
half_height = left_background + 0.5 * height
# Position at halg peak, linear interpolation
vv = pslit[wpos1:next_peak+1] - half_height
res1, = numpy.nonzero(numpy.diff(vv > 0))
i1 = res1[0]
xint = wpos1 + i1 + (0 - vv[i1]) / (vv[i1+1] - vv[i1])
return xint, next_peak, wpos1, wpos2, left_background, half_height | Find the position where the value is half of the peak | Below is the the instruction that describes the task:
### Input:
Find the position where the value is half of the peak
### Response:
def position_half_h(pslit, cpix, backw=4):
"""Find the position where the value is half of the peak"""
# Find the first peak to the right of cpix
next_peak = simple_prot(pslit, cpix)
if next_peak is None:
raise ValueError
dis_peak = next_peak - cpix
wpos2 = cpix - dis_peak
wpos1 = wpos2 - backw
# Compute background in a window of width backw
# in a position simetrical to the peak
# around cpix
left_background = pslit[wpos1:wpos2].min()
# height of the peak
height = pslit[next_peak] - left_background
half_height = left_background + 0.5 * height
# Position at halg peak, linear interpolation
vv = pslit[wpos1:next_peak+1] - half_height
res1, = numpy.nonzero(numpy.diff(vv > 0))
i1 = res1[0]
xint = wpos1 + i1 + (0 - vv[i1]) / (vv[i1+1] - vv[i1])
return xint, next_peak, wpos1, wpos2, left_background, half_height |
def parse_diaspora_webfinger(document):
"""
Parse Diaspora webfinger which is either in JSON format (new) or XRD (old).
https://diaspora.github.io/diaspora_federation/discovery/webfinger.html
"""
webfinger = {
"hcard_url": None,
}
try:
doc = json.loads(document)
for link in doc["links"]:
if link["rel"] == "http://microformats.org/profile/hcard":
webfinger["hcard_url"] = link["href"]
break
else:
logger.warning("parse_diaspora_webfinger: found JSON webfinger but it has no hcard href")
raise ValueError
except Exception:
try:
xrd = XRD.parse_xrd(document)
webfinger["hcard_url"] = xrd.find_link(rels="http://microformats.org/profile/hcard").href
except xml.parsers.expat.ExpatError:
logger.warning("parse_diaspora_webfinger: found XML webfinger but it fails to parse (ExpatError)")
pass
return webfinger | Parse Diaspora webfinger which is either in JSON format (new) or XRD (old).
https://diaspora.github.io/diaspora_federation/discovery/webfinger.html | Below is the the instruction that describes the task:
### Input:
Parse Diaspora webfinger which is either in JSON format (new) or XRD (old).
https://diaspora.github.io/diaspora_federation/discovery/webfinger.html
### Response:
def parse_diaspora_webfinger(document):
"""
Parse Diaspora webfinger which is either in JSON format (new) or XRD (old).
https://diaspora.github.io/diaspora_federation/discovery/webfinger.html
"""
webfinger = {
"hcard_url": None,
}
try:
doc = json.loads(document)
for link in doc["links"]:
if link["rel"] == "http://microformats.org/profile/hcard":
webfinger["hcard_url"] = link["href"]
break
else:
logger.warning("parse_diaspora_webfinger: found JSON webfinger but it has no hcard href")
raise ValueError
except Exception:
try:
xrd = XRD.parse_xrd(document)
webfinger["hcard_url"] = xrd.find_link(rels="http://microformats.org/profile/hcard").href
except xml.parsers.expat.ExpatError:
logger.warning("parse_diaspora_webfinger: found XML webfinger but it fails to parse (ExpatError)")
pass
return webfinger |
def calculate_border_width(self):
"""
Calculate the width of the menu border. This will be the width of the maximum allowable
dimensions (usually the screen size), minus the left and right margins and the newline character.
For example, given a maximum width of 80 characters, with left and right margins both
set to 1, the border width would be 77 (80 - 1 - 1 - 1 = 77).
Returns:
int: the menu border width in columns.
"""
return self.max_dimension.width - self.margins.left - self.margins.right - 1 | Calculate the width of the menu border. This will be the width of the maximum allowable
dimensions (usually the screen size), minus the left and right margins and the newline character.
For example, given a maximum width of 80 characters, with left and right margins both
set to 1, the border width would be 77 (80 - 1 - 1 - 1 = 77).
Returns:
int: the menu border width in columns. | Below is the the instruction that describes the task:
### Input:
Calculate the width of the menu border. This will be the width of the maximum allowable
dimensions (usually the screen size), minus the left and right margins and the newline character.
For example, given a maximum width of 80 characters, with left and right margins both
set to 1, the border width would be 77 (80 - 1 - 1 - 1 = 77).
Returns:
int: the menu border width in columns.
### Response:
def calculate_border_width(self):
"""
Calculate the width of the menu border. This will be the width of the maximum allowable
dimensions (usually the screen size), minus the left and right margins and the newline character.
For example, given a maximum width of 80 characters, with left and right margins both
set to 1, the border width would be 77 (80 - 1 - 1 - 1 = 77).
Returns:
int: the menu border width in columns.
"""
return self.max_dimension.width - self.margins.left - self.margins.right - 1 |
def get_attrs(obj):
"""Helper for dir2 implementation."""
if not hasattr(obj, '__dict__'):
return [] # slots only
proxy_type = types.MappingProxyType if six.PY3 else types.DictProxyType
if not isinstance(obj.__dict__, (dict, proxy_type)):
print(type(obj.__dict__), obj)
raise TypeError("%s.__dict__ is not a dictionary" % obj.__name__)
return obj.__dict__.keys() | Helper for dir2 implementation. | Below is the the instruction that describes the task:
### Input:
Helper for dir2 implementation.
### Response:
def get_attrs(obj):
"""Helper for dir2 implementation."""
if not hasattr(obj, '__dict__'):
return [] # slots only
proxy_type = types.MappingProxyType if six.PY3 else types.DictProxyType
if not isinstance(obj.__dict__, (dict, proxy_type)):
print(type(obj.__dict__), obj)
raise TypeError("%s.__dict__ is not a dictionary" % obj.__name__)
return obj.__dict__.keys() |
def get_image_name(self):
"""
@rtype: int
@return: Filename of the process main module.
This method does it's best to retrieve the filename.
However sometimes this is not possible, so C{None} may
be returned instead.
"""
# Method 1: Module.fileName
# It's cached if the filename was already found by the other methods,
# if it came with the corresponding debug event, or it was found by the
# toolhelp API.
mainModule = None
try:
mainModule = self.get_main_module()
name = mainModule.fileName
if not name:
name = None
except (KeyError, AttributeError, WindowsError):
## traceback.print_exc() # XXX DEBUG
name = None
# Method 2: QueryFullProcessImageName()
# Not implemented until Windows Vista.
if not name:
try:
hProcess = self.get_handle(
win32.PROCESS_QUERY_LIMITED_INFORMATION)
name = win32.QueryFullProcessImageName(hProcess)
except (AttributeError, WindowsError):
## traceback.print_exc() # XXX DEBUG
name = None
# Method 3: GetProcessImageFileName()
#
# Not implemented until Windows XP.
# For more info see:
# https://voidnish.wordpress.com/2005/06/20/getprocessimagefilenamequerydosdevice-trivia/
if not name:
try:
hProcess = self.get_handle(win32.PROCESS_QUERY_INFORMATION)
name = win32.GetProcessImageFileName(hProcess)
if name:
name = PathOperations.native_to_win32_pathname(name)
else:
name = None
except (AttributeError, WindowsError):
## traceback.print_exc() # XXX DEBUG
if not name:
name = None
# Method 4: GetModuleFileNameEx()
# Not implemented until Windows 2000.
#
# May be spoofed by malware, since this information resides
# in usermode space (see http://www.ragestorm.net/blogs/?p=163).
if not name:
try:
hProcess = self.get_handle( win32.PROCESS_VM_READ |
win32.PROCESS_QUERY_INFORMATION )
try:
name = win32.GetModuleFileNameEx(hProcess)
except WindowsError:
## traceback.print_exc() # XXX DEBUG
name = win32.GetModuleFileNameEx(
hProcess, self.get_image_base())
if name:
name = PathOperations.native_to_win32_pathname(name)
else:
name = None
except (AttributeError, WindowsError):
## traceback.print_exc() # XXX DEBUG
if not name:
name = None
# Method 5: PEB.ProcessParameters->ImagePathName
#
# May fail since it's using an undocumented internal structure.
#
# May be spoofed by malware, since this information resides
# in usermode space (see http://www.ragestorm.net/blogs/?p=163).
if not name:
try:
peb = self.get_peb()
pp = self.read_structure(peb.ProcessParameters,
win32.RTL_USER_PROCESS_PARAMETERS)
s = pp.ImagePathName
name = self.peek_string(s.Buffer,
dwMaxSize=s.MaximumLength, fUnicode=True)
if name:
name = PathOperations.native_to_win32_pathname(name)
else:
name = None
except (AttributeError, WindowsError):
## traceback.print_exc() # XXX DEBUG
name = None
# Method 6: Module.get_filename()
# It tries to get the filename from the file handle.
#
# There are currently some problems due to the strange way the API
# works - it returns the pathname without the drive letter, and I
# couldn't figure out a way to fix it.
if not name and mainModule is not None:
try:
name = mainModule.get_filename()
if not name:
name = None
except (AttributeError, WindowsError):
## traceback.print_exc() # XXX DEBUG
name = None
# Remember the filename.
if name and mainModule is not None:
mainModule.fileName = name
# Return the image filename, or None on error.
return name | @rtype: int
@return: Filename of the process main module.
This method does it's best to retrieve the filename.
However sometimes this is not possible, so C{None} may
be returned instead. | Below is the the instruction that describes the task:
### Input:
@rtype: int
@return: Filename of the process main module.
This method does it's best to retrieve the filename.
However sometimes this is not possible, so C{None} may
be returned instead.
### Response:
def get_image_name(self):
"""
@rtype: int
@return: Filename of the process main module.
This method does it's best to retrieve the filename.
However sometimes this is not possible, so C{None} may
be returned instead.
"""
# Method 1: Module.fileName
# It's cached if the filename was already found by the other methods,
# if it came with the corresponding debug event, or it was found by the
# toolhelp API.
mainModule = None
try:
mainModule = self.get_main_module()
name = mainModule.fileName
if not name:
name = None
except (KeyError, AttributeError, WindowsError):
## traceback.print_exc() # XXX DEBUG
name = None
# Method 2: QueryFullProcessImageName()
# Not implemented until Windows Vista.
if not name:
try:
hProcess = self.get_handle(
win32.PROCESS_QUERY_LIMITED_INFORMATION)
name = win32.QueryFullProcessImageName(hProcess)
except (AttributeError, WindowsError):
## traceback.print_exc() # XXX DEBUG
name = None
# Method 3: GetProcessImageFileName()
#
# Not implemented until Windows XP.
# For more info see:
# https://voidnish.wordpress.com/2005/06/20/getprocessimagefilenamequerydosdevice-trivia/
if not name:
try:
hProcess = self.get_handle(win32.PROCESS_QUERY_INFORMATION)
name = win32.GetProcessImageFileName(hProcess)
if name:
name = PathOperations.native_to_win32_pathname(name)
else:
name = None
except (AttributeError, WindowsError):
## traceback.print_exc() # XXX DEBUG
if not name:
name = None
# Method 4: GetModuleFileNameEx()
# Not implemented until Windows 2000.
#
# May be spoofed by malware, since this information resides
# in usermode space (see http://www.ragestorm.net/blogs/?p=163).
if not name:
try:
hProcess = self.get_handle( win32.PROCESS_VM_READ |
win32.PROCESS_QUERY_INFORMATION )
try:
name = win32.GetModuleFileNameEx(hProcess)
except WindowsError:
## traceback.print_exc() # XXX DEBUG
name = win32.GetModuleFileNameEx(
hProcess, self.get_image_base())
if name:
name = PathOperations.native_to_win32_pathname(name)
else:
name = None
except (AttributeError, WindowsError):
## traceback.print_exc() # XXX DEBUG
if not name:
name = None
# Method 5: PEB.ProcessParameters->ImagePathName
#
# May fail since it's using an undocumented internal structure.
#
# May be spoofed by malware, since this information resides
# in usermode space (see http://www.ragestorm.net/blogs/?p=163).
if not name:
try:
peb = self.get_peb()
pp = self.read_structure(peb.ProcessParameters,
win32.RTL_USER_PROCESS_PARAMETERS)
s = pp.ImagePathName
name = self.peek_string(s.Buffer,
dwMaxSize=s.MaximumLength, fUnicode=True)
if name:
name = PathOperations.native_to_win32_pathname(name)
else:
name = None
except (AttributeError, WindowsError):
## traceback.print_exc() # XXX DEBUG
name = None
# Method 6: Module.get_filename()
# It tries to get the filename from the file handle.
#
# There are currently some problems due to the strange way the API
# works - it returns the pathname without the drive letter, and I
# couldn't figure out a way to fix it.
if not name and mainModule is not None:
try:
name = mainModule.get_filename()
if not name:
name = None
except (AttributeError, WindowsError):
## traceback.print_exc() # XXX DEBUG
name = None
# Remember the filename.
if name and mainModule is not None:
mainModule.fileName = name
# Return the image filename, or None on error.
return name |
def bookmark_create(endpoint_plus_path, bookmark_name):
"""
Executor for `globus bookmark create`
"""
endpoint_id, path = endpoint_plus_path
client = get_client()
submit_data = {"endpoint_id": str(endpoint_id), "path": path, "name": bookmark_name}
res = client.create_bookmark(submit_data)
formatted_print(res, simple_text="Bookmark ID: {}".format(res["id"])) | Executor for `globus bookmark create` | Below is the the instruction that describes the task:
### Input:
Executor for `globus bookmark create`
### Response:
def bookmark_create(endpoint_plus_path, bookmark_name):
"""
Executor for `globus bookmark create`
"""
endpoint_id, path = endpoint_plus_path
client = get_client()
submit_data = {"endpoint_id": str(endpoint_id), "path": path, "name": bookmark_name}
res = client.create_bookmark(submit_data)
formatted_print(res, simple_text="Bookmark ID: {}".format(res["id"])) |
def get_sun_times(dates, lon, lat, time_zone):
"""Computes the times of sunrise, solar noon, and sunset for each day.
Parameters
----
dates: datetime
lat : latitude in DecDeg
lon : longitude in DecDeg
time_zone : timezone
Returns
----
DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours
"""
df = pd.DataFrame(index=dates, columns=['sunrise', 'sunnoon', 'sunset', 'daylength'])
doy = np.array([(d - d.replace(day=1, month=1)).days + 1 for d in df.index]) # day of year
# Day angle and declination after Bourges (1985):
day_angle_b = np.deg2rad((360. / 365.25) * (doy - 79.346))
declination = np.deg2rad(
0.3723 + 23.2567 * np.sin(day_angle_b) - 0.7580 * np.cos(day_angle_b)
+ 0.1149 * np.sin(2*day_angle_b) + 0.3656 * np.cos(2*day_angle_b)
- 0.1712 * np.sin(3*day_angle_b) + 0.0201 * np.cos(3*day_angle_b)
)
# Equation of time with day angle after Spencer (1971):
day_angle_s = 2 * np.pi * (doy - 1) / 365.
eq_time = 12. / np.pi * (
0.000075 +
0.001868 * np.cos( day_angle_s) - 0.032077 * np.sin( day_angle_s) -
0.014615 * np.cos(2*day_angle_s) - 0.040849 * np.sin(2*day_angle_s)
)
#
standard_meridian = time_zone * 15.
delta_lat_time = (lon - standard_meridian) * 24. / 360.
omega_nul_arg = -np.tan(np.deg2rad(lat)) * np.tan(declination)
omega_nul = np.arccos(omega_nul_arg)
sunrise = 12. * (1. - (omega_nul) / np.pi) - delta_lat_time - eq_time
sunset = 12. * (1. + (omega_nul) / np.pi) - delta_lat_time - eq_time
# as an approximation, solar noon is independent of the below mentioned
# cases:
sunnoon = 12. * (1.) - delta_lat_time - eq_time
# $kf 2015-11-13: special case midnight sun and polar night
# CASE 1: MIDNIGHT SUN
# set sunrise and sunset to values that would yield the maximum day
# length even though this a crude assumption
pos = omega_nul_arg < -1
sunrise[pos] = sunnoon[pos] - 12
sunset[pos] = sunnoon[pos] + 12
# CASE 2: POLAR NIGHT
# set sunrise and sunset to values that would yield the minmum day
# length even though this a crude assumption
pos = omega_nul_arg > 1
sunrise[pos] = sunnoon[pos]
sunset[pos] = sunnoon[pos]
daylength = sunset - sunrise
# adjust if required
sunrise[sunrise < 0] += 24
sunset[sunset > 24] -= 24
df.sunrise = sunrise
df.sunnoon = sunnoon
df.sunset = sunset
df.daylength = daylength
return df | Computes the times of sunrise, solar noon, and sunset for each day.
Parameters
----
dates: datetime
lat : latitude in DecDeg
lon : longitude in DecDeg
time_zone : timezone
Returns
----
DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours | Below is the the instruction that describes the task:
### Input:
Computes the times of sunrise, solar noon, and sunset for each day.
Parameters
----
dates: datetime
lat : latitude in DecDeg
lon : longitude in DecDeg
time_zone : timezone
Returns
----
DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours
### Response:
def get_sun_times(dates, lon, lat, time_zone):
"""Computes the times of sunrise, solar noon, and sunset for each day.
Parameters
----
dates: datetime
lat : latitude in DecDeg
lon : longitude in DecDeg
time_zone : timezone
Returns
----
DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours
"""
df = pd.DataFrame(index=dates, columns=['sunrise', 'sunnoon', 'sunset', 'daylength'])
doy = np.array([(d - d.replace(day=1, month=1)).days + 1 for d in df.index]) # day of year
# Day angle and declination after Bourges (1985):
day_angle_b = np.deg2rad((360. / 365.25) * (doy - 79.346))
declination = np.deg2rad(
0.3723 + 23.2567 * np.sin(day_angle_b) - 0.7580 * np.cos(day_angle_b)
+ 0.1149 * np.sin(2*day_angle_b) + 0.3656 * np.cos(2*day_angle_b)
- 0.1712 * np.sin(3*day_angle_b) + 0.0201 * np.cos(3*day_angle_b)
)
# Equation of time with day angle after Spencer (1971):
day_angle_s = 2 * np.pi * (doy - 1) / 365.
eq_time = 12. / np.pi * (
0.000075 +
0.001868 * np.cos( day_angle_s) - 0.032077 * np.sin( day_angle_s) -
0.014615 * np.cos(2*day_angle_s) - 0.040849 * np.sin(2*day_angle_s)
)
#
standard_meridian = time_zone * 15.
delta_lat_time = (lon - standard_meridian) * 24. / 360.
omega_nul_arg = -np.tan(np.deg2rad(lat)) * np.tan(declination)
omega_nul = np.arccos(omega_nul_arg)
sunrise = 12. * (1. - (omega_nul) / np.pi) - delta_lat_time - eq_time
sunset = 12. * (1. + (omega_nul) / np.pi) - delta_lat_time - eq_time
# as an approximation, solar noon is independent of the below mentioned
# cases:
sunnoon = 12. * (1.) - delta_lat_time - eq_time
# $kf 2015-11-13: special case midnight sun and polar night
# CASE 1: MIDNIGHT SUN
# set sunrise and sunset to values that would yield the maximum day
# length even though this a crude assumption
pos = omega_nul_arg < -1
sunrise[pos] = sunnoon[pos] - 12
sunset[pos] = sunnoon[pos] + 12
# CASE 2: POLAR NIGHT
# set sunrise and sunset to values that would yield the minmum day
# length even though this a crude assumption
pos = omega_nul_arg > 1
sunrise[pos] = sunnoon[pos]
sunset[pos] = sunnoon[pos]
daylength = sunset - sunrise
# adjust if required
sunrise[sunrise < 0] += 24
sunset[sunset > 24] -= 24
df.sunrise = sunrise
df.sunnoon = sunnoon
df.sunset = sunset
df.daylength = daylength
return df |
def from_irafpath(irafpath):
"""Resolve IRAF path like ``jref$`` into actual file path.
Parameters
----------
irafpath : str
Path containing IRAF syntax.
Returns
-------
realpath : str
Actual file path. If input does not follow ``path$filename``
format, then this is the same as input.
Raises
------
ValueError
The required environment variable is undefined.
"""
s = irafpath.split('$')
if len(s) != 2:
return irafpath
if len(s[0]) == 0:
return irafpath
try:
refdir = os.environ[s[0]]
except KeyError:
raise ValueError('{0} environment variable undefined'.format(s[0]))
return os.path.join(refdir, s[1]) | Resolve IRAF path like ``jref$`` into actual file path.
Parameters
----------
irafpath : str
Path containing IRAF syntax.
Returns
-------
realpath : str
Actual file path. If input does not follow ``path$filename``
format, then this is the same as input.
Raises
------
ValueError
The required environment variable is undefined. | Below is the the instruction that describes the task:
### Input:
Resolve IRAF path like ``jref$`` into actual file path.
Parameters
----------
irafpath : str
Path containing IRAF syntax.
Returns
-------
realpath : str
Actual file path. If input does not follow ``path$filename``
format, then this is the same as input.
Raises
------
ValueError
The required environment variable is undefined.
### Response:
def from_irafpath(irafpath):
"""Resolve IRAF path like ``jref$`` into actual file path.
Parameters
----------
irafpath : str
Path containing IRAF syntax.
Returns
-------
realpath : str
Actual file path. If input does not follow ``path$filename``
format, then this is the same as input.
Raises
------
ValueError
The required environment variable is undefined.
"""
s = irafpath.split('$')
if len(s) != 2:
return irafpath
if len(s[0]) == 0:
return irafpath
try:
refdir = os.environ[s[0]]
except KeyError:
raise ValueError('{0} environment variable undefined'.format(s[0]))
return os.path.join(refdir, s[1]) |
def extract(pcmiter, samplerate, channels, duration = -1):
"""Given a PCM data stream, extract fingerprint data from the
audio. Returns a byte string of fingerprint data. Raises an
ExtractionError if fingerprinting fails.
"""
extractor = _fplib.Extractor(samplerate, channels, duration)
# Get first block.
try:
next_block = next(pcmiter)
except StopIteration:
raise ExtractionError()
# Get and process subsequent blocks.
while True:
# Shift over blocks.
cur_block = next_block
try:
next_block = next(pcmiter)
except StopIteration:
next_block = None
done = next_block is None
# Process the block.
try:
if extractor.process(cur_block, done):
# Success!
break
except RuntimeError as exc:
# Exception from fplib. Most likely the file is too short.
raise ExtractionError(exc.args[0])
# End of file but processor never became ready?
if done:
raise ExtractionError()
# Get resulting fingerprint data.
out = extractor.result()
if out is None:
raise ExtractionError()
# Free extractor memory.
extractor.free()
return out | Given a PCM data stream, extract fingerprint data from the
audio. Returns a byte string of fingerprint data. Raises an
ExtractionError if fingerprinting fails. | Below is the the instruction that describes the task:
### Input:
Given a PCM data stream, extract fingerprint data from the
audio. Returns a byte string of fingerprint data. Raises an
ExtractionError if fingerprinting fails.
### Response:
def extract(pcmiter, samplerate, channels, duration = -1):
"""Given a PCM data stream, extract fingerprint data from the
audio. Returns a byte string of fingerprint data. Raises an
ExtractionError if fingerprinting fails.
"""
extractor = _fplib.Extractor(samplerate, channels, duration)
# Get first block.
try:
next_block = next(pcmiter)
except StopIteration:
raise ExtractionError()
# Get and process subsequent blocks.
while True:
# Shift over blocks.
cur_block = next_block
try:
next_block = next(pcmiter)
except StopIteration:
next_block = None
done = next_block is None
# Process the block.
try:
if extractor.process(cur_block, done):
# Success!
break
except RuntimeError as exc:
# Exception from fplib. Most likely the file is too short.
raise ExtractionError(exc.args[0])
# End of file but processor never became ready?
if done:
raise ExtractionError()
# Get resulting fingerprint data.
out = extractor.result()
if out is None:
raise ExtractionError()
# Free extractor memory.
extractor.free()
return out |
def whitelist_method_generator(base, klass, whitelist):
"""
Yields all GroupBy member defs for DataFrame/Series names in whitelist.
Parameters
----------
base : class
base class
klass : class
class where members are defined.
Should be Series or DataFrame
whitelist : list
list of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
method_wrapper_template = \
"""def %(name)s(%(sig)s) :
\"""
%(doc)s
\"""
f = %(self)s.__getattr__('%(name)s')
return f(%(args)s)"""
property_wrapper_template = \
"""@property
def %(name)s(self) :
\"""%(doc)s\"""
return self.__getattr__('%(name)s')"""
for name in whitelist:
# don't override anything that was explicitly defined
# in the base class
if hasattr(base, name):
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass, name)
doc = f.__doc__
doc = doc if type(doc) == str else ''
if isinstance(f, types.MethodType):
wrapper_template = method_wrapper_template
decl, args = make_signature(f)
# pass args by name to f because otherwise
# GroupBy._make_wrapper won't know whether
# we passed in an axis parameter.
args_by_name = ['{0}={0}'.format(arg) for arg in args[1:]]
params = {'name': name,
'doc': doc,
'sig': ','.join(decl),
'self': args[0],
'args': ','.join(args_by_name)}
else:
wrapper_template = property_wrapper_template
params = {'name': name, 'doc': doc}
yield wrapper_template % params | Yields all GroupBy member defs for DataFrame/Series names in whitelist.
Parameters
----------
base : class
base class
klass : class
class where members are defined.
Should be Series or DataFrame
whitelist : list
list of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped. | Below is the the instruction that describes the task:
### Input:
Yields all GroupBy member defs for DataFrame/Series names in whitelist.
Parameters
----------
base : class
base class
klass : class
class where members are defined.
Should be Series or DataFrame
whitelist : list
list of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
### Response:
def whitelist_method_generator(base, klass, whitelist):
"""
Yields all GroupBy member defs for DataFrame/Series names in whitelist.
Parameters
----------
base : class
base class
klass : class
class where members are defined.
Should be Series or DataFrame
whitelist : list
list of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
method_wrapper_template = \
"""def %(name)s(%(sig)s) :
\"""
%(doc)s
\"""
f = %(self)s.__getattr__('%(name)s')
return f(%(args)s)"""
property_wrapper_template = \
"""@property
def %(name)s(self) :
\"""%(doc)s\"""
return self.__getattr__('%(name)s')"""
for name in whitelist:
# don't override anything that was explicitly defined
# in the base class
if hasattr(base, name):
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass, name)
doc = f.__doc__
doc = doc if type(doc) == str else ''
if isinstance(f, types.MethodType):
wrapper_template = method_wrapper_template
decl, args = make_signature(f)
# pass args by name to f because otherwise
# GroupBy._make_wrapper won't know whether
# we passed in an axis parameter.
args_by_name = ['{0}={0}'.format(arg) for arg in args[1:]]
params = {'name': name,
'doc': doc,
'sig': ','.join(decl),
'self': args[0],
'args': ','.join(args_by_name)}
else:
wrapper_template = property_wrapper_template
params = {'name': name, 'doc': doc}
yield wrapper_template % params |
def get_placeholder_formats_list(self, format_string):
"""
Parses the format_string and returns a list of tuples
(placeholder, format).
"""
placeholders = []
# Tokenize the format string and process them
for token in self.tokens(format_string):
if token.group("placeholder"):
placeholders.append((token.group("key"), token.group("format")))
return placeholders | Parses the format_string and returns a list of tuples
(placeholder, format). | Below is the the instruction that describes the task:
### Input:
Parses the format_string and returns a list of tuples
(placeholder, format).
### Response:
def get_placeholder_formats_list(self, format_string):
"""
Parses the format_string and returns a list of tuples
(placeholder, format).
"""
placeholders = []
# Tokenize the format string and process them
for token in self.tokens(format_string):
if token.group("placeholder"):
placeholders.append((token.group("key"), token.group("format")))
return placeholders |
def on_resize(self, event):
"""Resize handler
Parameters
----------
event : instance of Event
The resize event.
"""
self._update_transforms()
if self._central_widget is not None:
self._central_widget.size = self.size
if len(self._vp_stack) == 0:
self.context.set_viewport(0, 0, *self.physical_size) | Resize handler
Parameters
----------
event : instance of Event
The resize event. | Below is the the instruction that describes the task:
### Input:
Resize handler
Parameters
----------
event : instance of Event
The resize event.
### Response:
def on_resize(self, event):
"""Resize handler
Parameters
----------
event : instance of Event
The resize event.
"""
self._update_transforms()
if self._central_widget is not None:
self._central_widget.size = self.size
if len(self._vp_stack) == 0:
self.context.set_viewport(0, 0, *self.physical_size) |
def get_asset_notification_session(self, asset_receiver):
"""Gets the notification session for notifications pertaining to asset changes.
arg: asset_receiver (osid.repository.AssetReceiver): the
notification callback
return: (osid.repository.AssetNotificationSession) - an
``AssetNotificationSession``
raise: NullArgument - ``asset_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_notification()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_asset_notification()`` is ``true``.*
"""
if not self.supports_asset_notification():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AssetNotificationSession(runtime=self._runtime, receiver=asset_receiver) | Gets the notification session for notifications pertaining to asset changes.
arg: asset_receiver (osid.repository.AssetReceiver): the
notification callback
return: (osid.repository.AssetNotificationSession) - an
``AssetNotificationSession``
raise: NullArgument - ``asset_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_notification()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_asset_notification()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the notification session for notifications pertaining to asset changes.
arg: asset_receiver (osid.repository.AssetReceiver): the
notification callback
return: (osid.repository.AssetNotificationSession) - an
``AssetNotificationSession``
raise: NullArgument - ``asset_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_notification()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_asset_notification()`` is ``true``.*
### Response:
def get_asset_notification_session(self, asset_receiver):
"""Gets the notification session for notifications pertaining to asset changes.
arg: asset_receiver (osid.repository.AssetReceiver): the
notification callback
return: (osid.repository.AssetNotificationSession) - an
``AssetNotificationSession``
raise: NullArgument - ``asset_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_notification()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_asset_notification()`` is ``true``.*
"""
if not self.supports_asset_notification():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AssetNotificationSession(runtime=self._runtime, receiver=asset_receiver) |
def _chk_docopt_kws(self, docdict, exp):
"""Check for common user errors when running from the command-line."""
for key, val in docdict.items():
if isinstance(val, str):
assert '=' not in val, self._err("'=' FOUND IN VALUE", key, val, exp)
elif key != 'help' and key not in self.exp_keys and key not in self.exp_elems:
raise RuntimeError(self._err("UNKNOWN KEY", key, val, exp)) | Check for common user errors when running from the command-line. | Below is the the instruction that describes the task:
### Input:
Check for common user errors when running from the command-line.
### Response:
def _chk_docopt_kws(self, docdict, exp):
"""Check for common user errors when running from the command-line."""
for key, val in docdict.items():
if isinstance(val, str):
assert '=' not in val, self._err("'=' FOUND IN VALUE", key, val, exp)
elif key != 'help' and key not in self.exp_keys and key not in self.exp_elems:
raise RuntimeError(self._err("UNKNOWN KEY", key, val, exp)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.