code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def info(gandi, resource, stat):
"""Display information about a PaaS instance.
Resource can be a vhost, a hostname, or an ID
Cache statistics are based on 24 hours data.
"""
output_keys = ['name', 'type', 'size', 'memory', 'console', 'vhost',
'dc', 'sftp_server', 'git_server', 'snapshot']
paas = gandi.paas.info(resource)
paas_hosts = []
list_vhost = gandi.vhost.list({'paas_id': paas['id']})
df = gandi.paas.quota(paas['id'])
paas.update({'df': df})
if stat:
cache = gandi.paas.cache(paas['id'])
paas.update({'cache': cache})
for host in list_vhost:
paas_hosts.append(host['name'])
output_paas(gandi, paas, [], paas_hosts, output_keys)
return paas | Display information about a PaaS instance.
Resource can be a vhost, a hostname, or an ID
Cache statistics are based on 24 hours data. | Below is the the instruction that describes the task:
### Input:
Display information about a PaaS instance.
Resource can be a vhost, a hostname, or an ID
Cache statistics are based on 24 hours data.
### Response:
def info(gandi, resource, stat):
"""Display information about a PaaS instance.
Resource can be a vhost, a hostname, or an ID
Cache statistics are based on 24 hours data.
"""
output_keys = ['name', 'type', 'size', 'memory', 'console', 'vhost',
'dc', 'sftp_server', 'git_server', 'snapshot']
paas = gandi.paas.info(resource)
paas_hosts = []
list_vhost = gandi.vhost.list({'paas_id': paas['id']})
df = gandi.paas.quota(paas['id'])
paas.update({'df': df})
if stat:
cache = gandi.paas.cache(paas['id'])
paas.update({'cache': cache})
for host in list_vhost:
paas_hosts.append(host['name'])
output_paas(gandi, paas, [], paas_hosts, output_keys)
return paas |
def _univariate_handler(df, cols, stat=None, w=None, inplace=True,
pvalue = 'sim', outvals = None, swapname='', **kwargs):
"""
Compute a univariate descriptive statistic `stat` over columns `cols` in
`df`.
Parameters
----------
df : pandas.DataFrame
the dataframe containing columns to compute the descriptive
statistics
cols : string or list of strings
one or more names of columns in `df` to use to compute
exploratory descriptive statistics.
stat : callable
a function that takes data as a first argument and any number
of configuration keyword arguments and returns an object
encapsulating the exploratory statistic results
w : pysal.weights.W
the spatial weights object corresponding to the dataframe
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic
"""
### Preprocess
if not inplace:
new_df = df.copy()
_univariate_handler(new_df, cols, stat=stat, w=w, pvalue=pvalue,
inplace=True, outvals=outvals,
swapname=swapname, **kwargs)
return new_df
if w is None:
for name in df._metadata:
this_obj = df.__dict__.get(name)
if isinstance(this_obj, W):
w = this_obj
if w is None:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
### Prep indexes
if outvals is None:
outvals = []
outvals.insert(0,'_statistic')
if pvalue.lower() in ['all', 'both', '*']:
raise NotImplementedError("If you want more than one type of PValue,add"
" the targeted pvalue type to outvals. For example:"
" Geary(df, cols=['HOVAL'], w=w, outvals=['p_z_sim', "
"'p_rand']")
# this is nontrivial, since we
# can't know which p_value types are on the object without computing it.
# This is because we don't flag them with @properties, so they're just
# arbitrarily assigned post-facto. One solution might be to post-process the
# objects, determine which pvalue types are available, and then grab them
# all if needed.
if pvalue is not '':
outvals.append('p_'+pvalue.lower())
if isinstance(cols, str):
cols = [cols]
### Make closure around weights & apply columnwise
def column_stat(column):
return stat(column.values, w=w, **kwargs)
stat_objs = df[cols].apply(column_stat)
### Assign into dataframe
for col in cols:
stat_obj = stat_objs[col]
y = kwargs.get('y')
if y is not None:
col += '-' + y.name
outcols = ['_'.join((col, val)) for val in outvals]
for colname, attname in zip(outcols, outvals):
df[colname] = stat_obj.__getattribute__(attname)
if swapname is not '':
df.columns = [_swap_ending(col, swapname) if col.endswith('_statistic') else col
for col in df.columns] | Compute a univariate descriptive statistic `stat` over columns `cols` in
`df`.
Parameters
----------
df : pandas.DataFrame
the dataframe containing columns to compute the descriptive
statistics
cols : string or list of strings
one or more names of columns in `df` to use to compute
exploratory descriptive statistics.
stat : callable
a function that takes data as a first argument and any number
of configuration keyword arguments and returns an object
encapsulating the exploratory statistic results
w : pysal.weights.W
the spatial weights object corresponding to the dataframe
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic | Below is the the instruction that describes the task:
### Input:
Compute a univariate descriptive statistic `stat` over columns `cols` in
`df`.
Parameters
----------
df : pandas.DataFrame
the dataframe containing columns to compute the descriptive
statistics
cols : string or list of strings
one or more names of columns in `df` to use to compute
exploratory descriptive statistics.
stat : callable
a function that takes data as a first argument and any number
of configuration keyword arguments and returns an object
encapsulating the exploratory statistic results
w : pysal.weights.W
the spatial weights object corresponding to the dataframe
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic
### Response:
def _univariate_handler(df, cols, stat=None, w=None, inplace=True,
pvalue = 'sim', outvals = None, swapname='', **kwargs):
"""
Compute a univariate descriptive statistic `stat` over columns `cols` in
`df`.
Parameters
----------
df : pandas.DataFrame
the dataframe containing columns to compute the descriptive
statistics
cols : string or list of strings
one or more names of columns in `df` to use to compute
exploratory descriptive statistics.
stat : callable
a function that takes data as a first argument and any number
of configuration keyword arguments and returns an object
encapsulating the exploratory statistic results
w : pysal.weights.W
the spatial weights object corresponding to the dataframe
inplace : bool
a flag denoting whether to add the statistic to the dataframe
in memory, or to construct a copy of the dataframe and append
the results to the copy
pvalue : string
the name of the pvalue on the results object wanted
outvals : list of strings
names of attributes of the dataframe to attempt to flatten
into a column
swapname : string
suffix to replace generic identifier with. Each caller of this
function should set this to a unique column suffix
**kwargs : optional keyword arguments
options that are passed directly to the statistic
"""
### Preprocess
if not inplace:
new_df = df.copy()
_univariate_handler(new_df, cols, stat=stat, w=w, pvalue=pvalue,
inplace=True, outvals=outvals,
swapname=swapname, **kwargs)
return new_df
if w is None:
for name in df._metadata:
this_obj = df.__dict__.get(name)
if isinstance(this_obj, W):
w = this_obj
if w is None:
raise Exception('Weights not provided and no weights attached to frame!'
' Please provide a weight or attach a weight to the'
' dataframe')
### Prep indexes
if outvals is None:
outvals = []
outvals.insert(0,'_statistic')
if pvalue.lower() in ['all', 'both', '*']:
raise NotImplementedError("If you want more than one type of PValue,add"
" the targeted pvalue type to outvals. For example:"
" Geary(df, cols=['HOVAL'], w=w, outvals=['p_z_sim', "
"'p_rand']")
# this is nontrivial, since we
# can't know which p_value types are on the object without computing it.
# This is because we don't flag them with @properties, so they're just
# arbitrarily assigned post-facto. One solution might be to post-process the
# objects, determine which pvalue types are available, and then grab them
# all if needed.
if pvalue is not '':
outvals.append('p_'+pvalue.lower())
if isinstance(cols, str):
cols = [cols]
### Make closure around weights & apply columnwise
def column_stat(column):
return stat(column.values, w=w, **kwargs)
stat_objs = df[cols].apply(column_stat)
### Assign into dataframe
for col in cols:
stat_obj = stat_objs[col]
y = kwargs.get('y')
if y is not None:
col += '-' + y.name
outcols = ['_'.join((col, val)) for val in outvals]
for colname, attname in zip(outcols, outvals):
df[colname] = stat_obj.__getattribute__(attname)
if swapname is not '':
df.columns = [_swap_ending(col, swapname) if col.endswith('_statistic') else col
for col in df.columns] |
def delete(self, key):
"""See memcache.Client."""
if self.servers:
key = self._qualified_key(key)
hashed_key = self.key_hasher(key)
self.client.delete(hashed_key) | See memcache.Client. | Below is the the instruction that describes the task:
### Input:
See memcache.Client.
### Response:
def delete(self, key):
"""See memcache.Client."""
if self.servers:
key = self._qualified_key(key)
hashed_key = self.key_hasher(key)
self.client.delete(hashed_key) |
def solve_prop(self, goal, reset_method=True):
r'''Method to solve for the temperature at which a property is at a
specified value. `T_dependent_property` is used to calculate the value
of the property as a function of temperature; if `reset_method` is True,
the best method is used at each temperature as the solver seeks a
solution. This slows the solution moderately.
Checks the given property value with `test_property_validity` first
and raises an exception if it is not valid. Requires that Tmin and
Tmax have been set to know what range to search within.
Search is performed with the brenth solver from SciPy.
Parameters
----------
goal : float
Propoerty value desired, [`units`]
reset_method : bool
Whether or not to reset the method as the solver searches
Returns
-------
T : float
Temperature at which the property is the specified value [K]
'''
if self.Tmin is None or self.Tmax is None:
raise Exception('Both a minimum and a maximum value are not present indicating there is not enough data for temperature dependency.')
if not self.test_property_validity(goal):
raise Exception('Input property is not considered plausible; no method would calculate it.')
def error(T):
if reset_method:
self.method = None
return self.T_dependent_property(T) - goal
try:
return brenth(error, self.Tmin, self.Tmax)
except ValueError:
raise Exception('To within the implemented temperature range, it is not possible to calculate the desired value.') | r'''Method to solve for the temperature at which a property is at a
specified value. `T_dependent_property` is used to calculate the value
of the property as a function of temperature; if `reset_method` is True,
the best method is used at each temperature as the solver seeks a
solution. This slows the solution moderately.
Checks the given property value with `test_property_validity` first
and raises an exception if it is not valid. Requires that Tmin and
Tmax have been set to know what range to search within.
Search is performed with the brenth solver from SciPy.
Parameters
----------
goal : float
Propoerty value desired, [`units`]
reset_method : bool
Whether or not to reset the method as the solver searches
Returns
-------
T : float
Temperature at which the property is the specified value [K] | Below is the the instruction that describes the task:
### Input:
r'''Method to solve for the temperature at which a property is at a
specified value. `T_dependent_property` is used to calculate the value
of the property as a function of temperature; if `reset_method` is True,
the best method is used at each temperature as the solver seeks a
solution. This slows the solution moderately.
Checks the given property value with `test_property_validity` first
and raises an exception if it is not valid. Requires that Tmin and
Tmax have been set to know what range to search within.
Search is performed with the brenth solver from SciPy.
Parameters
----------
goal : float
Propoerty value desired, [`units`]
reset_method : bool
Whether or not to reset the method as the solver searches
Returns
-------
T : float
Temperature at which the property is the specified value [K]
### Response:
def solve_prop(self, goal, reset_method=True):
r'''Method to solve for the temperature at which a property is at a
specified value. `T_dependent_property` is used to calculate the value
of the property as a function of temperature; if `reset_method` is True,
the best method is used at each temperature as the solver seeks a
solution. This slows the solution moderately.
Checks the given property value with `test_property_validity` first
and raises an exception if it is not valid. Requires that Tmin and
Tmax have been set to know what range to search within.
Search is performed with the brenth solver from SciPy.
Parameters
----------
goal : float
Propoerty value desired, [`units`]
reset_method : bool
Whether or not to reset the method as the solver searches
Returns
-------
T : float
Temperature at which the property is the specified value [K]
'''
if self.Tmin is None or self.Tmax is None:
raise Exception('Both a minimum and a maximum value are not present indicating there is not enough data for temperature dependency.')
if not self.test_property_validity(goal):
raise Exception('Input property is not considered plausible; no method would calculate it.')
def error(T):
if reset_method:
self.method = None
return self.T_dependent_property(T) - goal
try:
return brenth(error, self.Tmin, self.Tmax)
except ValueError:
raise Exception('To within the implemented temperature range, it is not possible to calculate the desired value.') |
def run(self, commands=None, default_command=None, context=None):
"""
Context: A dict of namespaces as the key, and their objects as the
value. Used to easily inject code into the shell's runtime env.
"""
if commands:
self._commands.update(commands)
# HACK: Overriding the old shell isn't cool.
# Should do it by default.
from alchemist.commands import Shell
self._commands['shell'] = Shell(context=context)
if default_command is not None and len(sys.argv) == 1:
sys.argv.append(default_command)
try:
result = self.handle(sys.argv[0], sys.argv[1:])
except SystemExit as e:
result = e.code
sys.exit(result or 0) | Context: A dict of namespaces as the key, and their objects as the
value. Used to easily inject code into the shell's runtime env. | Below is the the instruction that describes the task:
### Input:
Context: A dict of namespaces as the key, and their objects as the
value. Used to easily inject code into the shell's runtime env.
### Response:
def run(self, commands=None, default_command=None, context=None):
"""
Context: A dict of namespaces as the key, and their objects as the
value. Used to easily inject code into the shell's runtime env.
"""
if commands:
self._commands.update(commands)
# HACK: Overriding the old shell isn't cool.
# Should do it by default.
from alchemist.commands import Shell
self._commands['shell'] = Shell(context=context)
if default_command is not None and len(sys.argv) == 1:
sys.argv.append(default_command)
try:
result = self.handle(sys.argv[0], sys.argv[1:])
except SystemExit as e:
result = e.code
sys.exit(result or 0) |
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where C{server_addr} and C{server_port} are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
L{accept}.
@param address: the address to bind when forwarding
@type address: str
@param port: the port to forward, or 0 to ask the server to allocate
any port
@type port: int
@param handler: optional handler for incoming forwarded connections
@type handler: function(Channel, (str, int), (str, int))
@return: the port # allocated by the server
@rtype: int
@raise SSHException: if the server refused the TCP forward request
"""
if not self.active:
raise SSHException('SSH session not active')
address = str(address)
port = int(port)
response = self.global_request('tcpip-forward', (address, port), wait=True)
if response is None:
raise SSHException('TCP forwarding request denied')
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, (src_addr, src_port), (dest_addr, dest_port)):
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port | Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where C{server_addr} and C{server_port} are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
L{accept}.
@param address: the address to bind when forwarding
@type address: str
@param port: the port to forward, or 0 to ask the server to allocate
any port
@type port: int
@param handler: optional handler for incoming forwarded connections
@type handler: function(Channel, (str, int), (str, int))
@return: the port # allocated by the server
@rtype: int
@raise SSHException: if the server refused the TCP forward request | Below is the the instruction that describes the task:
### Input:
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where C{server_addr} and C{server_port} are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
L{accept}.
@param address: the address to bind when forwarding
@type address: str
@param port: the port to forward, or 0 to ask the server to allocate
any port
@type port: int
@param handler: optional handler for incoming forwarded connections
@type handler: function(Channel, (str, int), (str, int))
@return: the port # allocated by the server
@rtype: int
@raise SSHException: if the server refused the TCP forward request
### Response:
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where C{server_addr} and C{server_port} are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
L{accept}.
@param address: the address to bind when forwarding
@type address: str
@param port: the port to forward, or 0 to ask the server to allocate
any port
@type port: int
@param handler: optional handler for incoming forwarded connections
@type handler: function(Channel, (str, int), (str, int))
@return: the port # allocated by the server
@rtype: int
@raise SSHException: if the server refused the TCP forward request
"""
if not self.active:
raise SSHException('SSH session not active')
address = str(address)
port = int(port)
response = self.global_request('tcpip-forward', (address, port), wait=True)
if response is None:
raise SSHException('TCP forwarding request denied')
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, (src_addr, src_port), (dest_addr, dest_port)):
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port |
def _priority_range(policy=None):
"""Determine the priority range (min,max) for the given scheduler policy.
If no policy is specified, the current default policy is used.
"""
if policy is None:
policy = libc.sched_getscheduler(0)
if policy < 0:
raise OSError(get_errno(),"sched_getscheduler")
max = libc.sched_get_priority_max(policy)
if max < 0:
raise OSError(get_errno(),"sched_get_priority_max")
min = libc.sched_get_priority_min(policy)
if min < 0:
raise OSError(get_errno(),"sched_get_priority_min")
return (min,max) | Determine the priority range (min,max) for the given scheduler policy.
If no policy is specified, the current default policy is used. | Below is the the instruction that describes the task:
### Input:
Determine the priority range (min,max) for the given scheduler policy.
If no policy is specified, the current default policy is used.
### Response:
def _priority_range(policy=None):
"""Determine the priority range (min,max) for the given scheduler policy.
If no policy is specified, the current default policy is used.
"""
if policy is None:
policy = libc.sched_getscheduler(0)
if policy < 0:
raise OSError(get_errno(),"sched_getscheduler")
max = libc.sched_get_priority_max(policy)
if max < 0:
raise OSError(get_errno(),"sched_get_priority_max")
min = libc.sched_get_priority_min(policy)
if min < 0:
raise OSError(get_errno(),"sched_get_priority_min")
return (min,max) |
def save_data(self, trigger_id, **data):
"""
get the data from the service
:param trigger_id: id of the trigger
:params data, dict
:rtype: dict
"""
status = False
taiga = Taiga.objects.get(trigger_id=trigger_id)
title = self.set_title(data)
body = self.set_content(data)
# add a 'story' to the project
if taiga.project_name:
api = self.taiga_api()
new_project = api.projects.get_by_slug(taiga.project_name)
userstory = new_project.add_user_story(title, description=body)
if userstory:
status = True
return status | get the data from the service
:param trigger_id: id of the trigger
:params data, dict
:rtype: dict | Below is the the instruction that describes the task:
### Input:
get the data from the service
:param trigger_id: id of the trigger
:params data, dict
:rtype: dict
### Response:
def save_data(self, trigger_id, **data):
"""
get the data from the service
:param trigger_id: id of the trigger
:params data, dict
:rtype: dict
"""
status = False
taiga = Taiga.objects.get(trigger_id=trigger_id)
title = self.set_title(data)
body = self.set_content(data)
# add a 'story' to the project
if taiga.project_name:
api = self.taiga_api()
new_project = api.projects.get_by_slug(taiga.project_name)
userstory = new_project.add_user_story(title, description=body)
if userstory:
status = True
return status |
def ToJsonString(self):
"""Converts Timestamp to RFC 3339 date string format.
Returns:
A string converted from timestamp. The string is always Z-normalized
and uses 3, 6 or 9 fractional digits as required to represent the
exact time. Example of the return format: '1972-01-01T10:00:20.021Z'
"""
nanos = self.nanos % _NANOS_PER_SECOND
total_sec = self.seconds + (self.nanos - nanos) // _NANOS_PER_SECOND
seconds = total_sec % _SECONDS_PER_DAY
days = (total_sec - seconds) // _SECONDS_PER_DAY
dt = datetime(1970, 1, 1) + timedelta(days, seconds)
result = dt.isoformat()
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return result + 'Z'
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return result + '.%03dZ' % (nanos / 1e6)
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return result + '.%06dZ' % (nanos / 1e3)
# Serialize 9 fractional digits.
return result + '.%09dZ' % nanos | Converts Timestamp to RFC 3339 date string format.
Returns:
A string converted from timestamp. The string is always Z-normalized
and uses 3, 6 or 9 fractional digits as required to represent the
exact time. Example of the return format: '1972-01-01T10:00:20.021Z' | Below is the the instruction that describes the task:
### Input:
Converts Timestamp to RFC 3339 date string format.
Returns:
A string converted from timestamp. The string is always Z-normalized
and uses 3, 6 or 9 fractional digits as required to represent the
exact time. Example of the return format: '1972-01-01T10:00:20.021Z'
### Response:
def ToJsonString(self):
"""Converts Timestamp to RFC 3339 date string format.
Returns:
A string converted from timestamp. The string is always Z-normalized
and uses 3, 6 or 9 fractional digits as required to represent the
exact time. Example of the return format: '1972-01-01T10:00:20.021Z'
"""
nanos = self.nanos % _NANOS_PER_SECOND
total_sec = self.seconds + (self.nanos - nanos) // _NANOS_PER_SECOND
seconds = total_sec % _SECONDS_PER_DAY
days = (total_sec - seconds) // _SECONDS_PER_DAY
dt = datetime(1970, 1, 1) + timedelta(days, seconds)
result = dt.isoformat()
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return result + 'Z'
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return result + '.%03dZ' % (nanos / 1e6)
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return result + '.%06dZ' % (nanos / 1e3)
# Serialize 9 fractional digits.
return result + '.%09dZ' % nanos |
def verify_precompiled_checksums(self, precompiled_path: Path) -> None:
""" Compare source code checksums with those from a precompiled file. """
# We get the precompiled file data
contracts_precompiled = ContractManager(precompiled_path)
# Silence mypy
assert self.contracts_checksums is not None
# Compare each contract source code checksum with the one from the precompiled file
for contract, checksum in self.contracts_checksums.items():
try:
# Silence mypy
assert contracts_precompiled.contracts_checksums is not None
precompiled_checksum = contracts_precompiled.contracts_checksums[contract]
except KeyError:
raise ContractSourceManagerVerificationError(
f'No checksum for {contract}',
)
if precompiled_checksum != checksum:
raise ContractSourceManagerVerificationError(
f'checksum of {contract} does not match {precompiled_checksum} != {checksum}',
)
# Compare the overall source code checksum with the one from the precompiled file
if self.overall_checksum != contracts_precompiled.overall_checksum:
raise ContractSourceManagerVerificationError(
f'overall checksum does not match '
f'{self.overall_checksum} != {contracts_precompiled.overall_checksum}',
) | Compare source code checksums with those from a precompiled file. | Below is the the instruction that describes the task:
### Input:
Compare source code checksums with those from a precompiled file.
### Response:
def verify_precompiled_checksums(self, precompiled_path: Path) -> None:
""" Compare source code checksums with those from a precompiled file. """
# We get the precompiled file data
contracts_precompiled = ContractManager(precompiled_path)
# Silence mypy
assert self.contracts_checksums is not None
# Compare each contract source code checksum with the one from the precompiled file
for contract, checksum in self.contracts_checksums.items():
try:
# Silence mypy
assert contracts_precompiled.contracts_checksums is not None
precompiled_checksum = contracts_precompiled.contracts_checksums[contract]
except KeyError:
raise ContractSourceManagerVerificationError(
f'No checksum for {contract}',
)
if precompiled_checksum != checksum:
raise ContractSourceManagerVerificationError(
f'checksum of {contract} does not match {precompiled_checksum} != {checksum}',
)
# Compare the overall source code checksum with the one from the precompiled file
if self.overall_checksum != contracts_precompiled.overall_checksum:
raise ContractSourceManagerVerificationError(
f'overall checksum does not match '
f'{self.overall_checksum} != {contracts_precompiled.overall_checksum}',
) |
def current_state(self):
"""Return a dictionary with the current value of the variables defining
the state of the step method."""
state = {}
for s in self._state:
state[s] = getattr(self, s)
return state | Return a dictionary with the current value of the variables defining
the state of the step method. | Below is the the instruction that describes the task:
### Input:
Return a dictionary with the current value of the variables defining
the state of the step method.
### Response:
def current_state(self):
"""Return a dictionary with the current value of the variables defining
the state of the step method."""
state = {}
for s in self._state:
state[s] = getattr(self, s)
return state |
def _send_method(self, method_sig, args=bytes(), content=None):
"""
Send a method for our channel.
"""
if isinstance(args, AMQPWriter):
args = args.getvalue()
self.connection.method_writer.write_method(self.channel_id,
method_sig, args, content) | Send a method for our channel. | Below is the the instruction that describes the task:
### Input:
Send a method for our channel.
### Response:
def _send_method(self, method_sig, args=bytes(), content=None):
"""
Send a method for our channel.
"""
if isinstance(args, AMQPWriter):
args = args.getvalue()
self.connection.method_writer.write_method(self.channel_id,
method_sig, args, content) |
def run(self, loopinfo=None, batch_size=1):
""" Run consumer
"""
logger.info("{}.Starting...".format(self.__class__.__name__))
if loopinfo:
while True:
for topic in self.topics:
self.call_kafka(topic, batch_size)
time.sleep(loopinfo.sleep)
else:
for topic in self.topics:
self.call_kafka(topic, batch_size) | Run consumer | Below is the the instruction that describes the task:
### Input:
Run consumer
### Response:
def run(self, loopinfo=None, batch_size=1):
""" Run consumer
"""
logger.info("{}.Starting...".format(self.__class__.__name__))
if loopinfo:
while True:
for topic in self.topics:
self.call_kafka(topic, batch_size)
time.sleep(loopinfo.sleep)
else:
for topic in self.topics:
self.call_kafka(topic, batch_size) |
def run(self):
'''Run loop'''
logger.info("result_worker starting...")
while not self._quit:
try:
task, result = self.inqueue.get(timeout=1)
self.on_result(task, result)
except Queue.Empty as e:
continue
except KeyboardInterrupt:
break
except AssertionError as e:
logger.error(e)
continue
except Exception as e:
logger.exception(e)
continue
logger.info("result_worker exiting...") | Run loop | Below is the the instruction that describes the task:
### Input:
Run loop
### Response:
def run(self):
'''Run loop'''
logger.info("result_worker starting...")
while not self._quit:
try:
task, result = self.inqueue.get(timeout=1)
self.on_result(task, result)
except Queue.Empty as e:
continue
except KeyboardInterrupt:
break
except AssertionError as e:
logger.error(e)
continue
except Exception as e:
logger.exception(e)
continue
logger.info("result_worker exiting...") |
def _formatter_class(name, value):
"""Format the "klass" variable and value on class methods.
"""
__mname = value.__module__
if __mname != '__main__':
return "%s = <type '%s.%s'>" % (name, __mname, value.__name__)
else:
return "%s = <type '%s'>" % (name, value.__name__) | Format the "klass" variable and value on class methods. | Below is the the instruction that describes the task:
### Input:
Format the "klass" variable and value on class methods.
### Response:
def _formatter_class(name, value):
"""Format the "klass" variable and value on class methods.
"""
__mname = value.__module__
if __mname != '__main__':
return "%s = <type '%s.%s'>" % (name, __mname, value.__name__)
else:
return "%s = <type '%s'>" % (name, value.__name__) |
def dispatch(self, request, *args, **kwargs):
"""
Overrides Django's default dispatch to provide caching.
If the should_cache method returns True, this will call
two functions get_cache_version and get_cache_prefix
the results of those two functions are combined and passed to
the standard django caching middleware.
"""
self.request = request
self.args = args
self.kwargs = kwargs
self.cache_middleware = None
response = None
if self.should_cache():
prefix = "%s:%s" % (self.get_cache_version(),
self.get_cache_prefix())
# Using middleware here since that is what the decorator uses
# internally and it avoids making this code all complicated with
# all sorts of wrappers.
self.set_cache_middleware(self.cache_time, prefix)
response = self.cache_middleware.process_request(self.request)
else:
self.set_do_not_cache()
if not response:
response = super(CacheView, self).dispatch(self.request, *args,
**kwargs)
return self._finalize_cached_response(request, response) | Overrides Django's default dispatch to provide caching.
If the should_cache method returns True, this will call
two functions get_cache_version and get_cache_prefix
the results of those two functions are combined and passed to
the standard django caching middleware. | Below is the the instruction that describes the task:
### Input:
Overrides Django's default dispatch to provide caching.
If the should_cache method returns True, this will call
two functions get_cache_version and get_cache_prefix
the results of those two functions are combined and passed to
the standard django caching middleware.
### Response:
def dispatch(self, request, *args, **kwargs):
"""
Overrides Django's default dispatch to provide caching.
If the should_cache method returns True, this will call
two functions get_cache_version and get_cache_prefix
the results of those two functions are combined and passed to
the standard django caching middleware.
"""
self.request = request
self.args = args
self.kwargs = kwargs
self.cache_middleware = None
response = None
if self.should_cache():
prefix = "%s:%s" % (self.get_cache_version(),
self.get_cache_prefix())
# Using middleware here since that is what the decorator uses
# internally and it avoids making this code all complicated with
# all sorts of wrappers.
self.set_cache_middleware(self.cache_time, prefix)
response = self.cache_middleware.process_request(self.request)
else:
self.set_do_not_cache()
if not response:
response = super(CacheView, self).dispatch(self.request, *args,
**kwargs)
return self._finalize_cached_response(request, response) |
def round_sig_error(x, ex, n, paren=False):
'''Find ex rounded to n sig-figs and make the floating point x
match the number of decimals. If [paren], the string is
returned as quantity(error) format'''
stex = round_sig(ex,n)
if stex.find('.') < 0:
extra_zeros = len(stex) - n
sigfigs = len(str(int(x))) - extra_zeros
stx = round_sig(x,sigfigs)
else:
num_after_dec = len(string.split(stex,'.')[1])
stx = ("%%.%df" % num_after_dec) % (x)
if paren:
if stex.find('.') >= 0:
stex = stex[stex.find('.')+1:]
return "%s(%s)" % (stx,stex)
return stx,stex | Find ex rounded to n sig-figs and make the floating point x
match the number of decimals. If [paren], the string is
returned as quantity(error) format | Below is the the instruction that describes the task:
### Input:
Find ex rounded to n sig-figs and make the floating point x
match the number of decimals. If [paren], the string is
returned as quantity(error) format
### Response:
def round_sig_error(x, ex, n, paren=False):
'''Find ex rounded to n sig-figs and make the floating point x
match the number of decimals. If [paren], the string is
returned as quantity(error) format'''
stex = round_sig(ex,n)
if stex.find('.') < 0:
extra_zeros = len(stex) - n
sigfigs = len(str(int(x))) - extra_zeros
stx = round_sig(x,sigfigs)
else:
num_after_dec = len(string.split(stex,'.')[1])
stx = ("%%.%df" % num_after_dec) % (x)
if paren:
if stex.find('.') >= 0:
stex = stex[stex.find('.')+1:]
return "%s(%s)" % (stx,stex)
return stx,stex |
def index_model_field(model, field, value_field='pk', key_formatter=str.strip, value_formatter=str.strip, batch_len=10000, limit=10000000, verbosity=1):
'''Create dict {obj.<field>: obj.pk} for all field_values in a model or queryset.
'''
try:
qs = model.objects
except:
qs = model
N = qs.count()
if verbosity > 0:
print 'Indexing %d rows to aid in finding %s.%s values using %s.%s.' % (N, qs.model.__name__, value_field, qs.model.__name__, field)
index, dupes, rownum = {}, {}, 0
pbar, rownum = None, 0
if verbosity and N > min(1000000, max(0, 100000**(1./verbosity))):
widgets = [pb.Counter(), '/%d rows: ' % N, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
pbar = pb.ProgressBar(widgets=widgets, maxval=N).start()
# to determine the type of the field value and decide whether to strip() or normalize in any way
#obj0 = qs.filter(**{field + '__isnull': False}).all()[0]
for obj in qs.all():
field_value = getattr(obj, field)
try:
field_value = key_formatter(field_value)
except:
pass
if value_field:
entry_value = getattr(obj, value_field)
else:
entry_value = obj
try:
entry_value = value_formatter(entry_value)
except:
pass
if field_value in index:
dupes[field_value] = dupes.get(field_value, []) + [entry_value]
else:
index[field_value] = entry_value
rownum += 1
if rownum >= limit:
break
if pbar:
pbar.update(rownum)
if pbar:
pbar.finish()
if verbosity > 0:
print 'Found %d duplicate %s values among the %d records or %g%%' % (len(dupes), field, len(index), len(dupes)*100./(len(index) or 1.))
return index, dupes | Create dict {obj.<field>: obj.pk} for all field_values in a model or queryset. | Below is the the instruction that describes the task:
### Input:
Create dict {obj.<field>: obj.pk} for all field_values in a model or queryset.
### Response:
def index_model_field(model, field, value_field='pk', key_formatter=str.strip, value_formatter=str.strip, batch_len=10000, limit=10000000, verbosity=1):
'''Create dict {obj.<field>: obj.pk} for all field_values in a model or queryset.
'''
try:
qs = model.objects
except:
qs = model
N = qs.count()
if verbosity > 0:
print 'Indexing %d rows to aid in finding %s.%s values using %s.%s.' % (N, qs.model.__name__, value_field, qs.model.__name__, field)
index, dupes, rownum = {}, {}, 0
pbar, rownum = None, 0
if verbosity and N > min(1000000, max(0, 100000**(1./verbosity))):
widgets = [pb.Counter(), '/%d rows: ' % N, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
pbar = pb.ProgressBar(widgets=widgets, maxval=N).start()
# to determine the type of the field value and decide whether to strip() or normalize in any way
#obj0 = qs.filter(**{field + '__isnull': False}).all()[0]
for obj in qs.all():
field_value = getattr(obj, field)
try:
field_value = key_formatter(field_value)
except:
pass
if value_field:
entry_value = getattr(obj, value_field)
else:
entry_value = obj
try:
entry_value = value_formatter(entry_value)
except:
pass
if field_value in index:
dupes[field_value] = dupes.get(field_value, []) + [entry_value]
else:
index[field_value] = entry_value
rownum += 1
if rownum >= limit:
break
if pbar:
pbar.update(rownum)
if pbar:
pbar.finish()
if verbosity > 0:
print 'Found %d duplicate %s values among the %d records or %g%%' % (len(dupes), field, len(index), len(dupes)*100./(len(index) or 1.))
return index, dupes |
def str_or_unicode(text):
""" handle python 3 unicode and python 2.7 byte strings """
encoding = sys.stdout.encoding
if sys.version_info > (3, 0):
return text.encode(encoding).decode(encoding)
return text.encode(encoding) | handle python 3 unicode and python 2.7 byte strings | Below is the the instruction that describes the task:
### Input:
handle python 3 unicode and python 2.7 byte strings
### Response:
def str_or_unicode(text):
""" handle python 3 unicode and python 2.7 byte strings """
encoding = sys.stdout.encoding
if sys.version_info > (3, 0):
return text.encode(encoding).decode(encoding)
return text.encode(encoding) |
def as_text(bytes_or_text, encoding='utf-8'):
"""Returns the given argument as a unicode string.
Args:
bytes_or_text: A `bytes`, `str, or `unicode` object.
encoding: A string indicating the charset for decoding unicode.
Returns:
A `unicode` (Python 2) or `str` (Python 3) object.
Raises:
TypeError: If `bytes_or_text` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, _six.text_type):
return bytes_or_text
elif isinstance(bytes_or_text, bytes):
return bytes_or_text.decode(encoding)
else:
raise TypeError('Expected binary or unicode string, got %r' % bytes_or_text) | Returns the given argument as a unicode string.
Args:
bytes_or_text: A `bytes`, `str, or `unicode` object.
encoding: A string indicating the charset for decoding unicode.
Returns:
A `unicode` (Python 2) or `str` (Python 3) object.
Raises:
TypeError: If `bytes_or_text` is not a binary or unicode string. | Below is the the instruction that describes the task:
### Input:
Returns the given argument as a unicode string.
Args:
bytes_or_text: A `bytes`, `str, or `unicode` object.
encoding: A string indicating the charset for decoding unicode.
Returns:
A `unicode` (Python 2) or `str` (Python 3) object.
Raises:
TypeError: If `bytes_or_text` is not a binary or unicode string.
### Response:
def as_text(bytes_or_text, encoding='utf-8'):
"""Returns the given argument as a unicode string.
Args:
bytes_or_text: A `bytes`, `str, or `unicode` object.
encoding: A string indicating the charset for decoding unicode.
Returns:
A `unicode` (Python 2) or `str` (Python 3) object.
Raises:
TypeError: If `bytes_or_text` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, _six.text_type):
return bytes_or_text
elif isinstance(bytes_or_text, bytes):
return bytes_or_text.decode(encoding)
else:
raise TypeError('Expected binary or unicode string, got %r' % bytes_or_text) |
def fit(self, X, y=None, **kwargs):
"""Fit encoder according to X and y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# if the input dataset isn't already a dataframe, convert it to one (using default column names)
# first check the type
X = util.convert_input(X)
self._dim = X.shape[1]
# if columns aren't passed, just use every string column
if self.cols is None:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().bool():
raise ValueError('Columns to be encoded can not contain null')
# train an ordinal pre-encoder
self.ordinal_encoder = OrdinalEncoder(
verbose=self.verbose,
cols=self.cols,
handle_unknown='value',
handle_missing='value'
)
self.ordinal_encoder = self.ordinal_encoder.fit(X)
ordinal_mapping = self.ordinal_encoder.category_mapping
mappings_out = []
for switch in ordinal_mapping:
values = switch.get('mapping')
col = switch.get('col')
column_mapping = self.fit_sum_coding(col, values, self.handle_missing, self.handle_unknown)
mappings_out.append({'col': switch.get('col'), 'mapping': column_mapping, })
self.mapping = mappings_out
X_temp = self.transform(X, override_return_df=True)
self.feature_names = X_temp.columns.tolist()
# drop all output columns with 0 variance.
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self | Fit encoder according to X and y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self. | Below is the the instruction that describes the task:
### Input:
Fit encoder according to X and y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
### Response:
def fit(self, X, y=None, **kwargs):
"""Fit encoder according to X and y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : encoder
Returns self.
"""
# if the input dataset isn't already a dataframe, convert it to one (using default column names)
# first check the type
X = util.convert_input(X)
self._dim = X.shape[1]
# if columns aren't passed, just use every string column
if self.cols is None:
self.cols = util.get_obj_cols(X)
else:
self.cols = util.convert_cols_to_list(self.cols)
if self.handle_missing == 'error':
if X[self.cols].isnull().any().bool():
raise ValueError('Columns to be encoded can not contain null')
# train an ordinal pre-encoder
self.ordinal_encoder = OrdinalEncoder(
verbose=self.verbose,
cols=self.cols,
handle_unknown='value',
handle_missing='value'
)
self.ordinal_encoder = self.ordinal_encoder.fit(X)
ordinal_mapping = self.ordinal_encoder.category_mapping
mappings_out = []
for switch in ordinal_mapping:
values = switch.get('mapping')
col = switch.get('col')
column_mapping = self.fit_sum_coding(col, values, self.handle_missing, self.handle_unknown)
mappings_out.append({'col': switch.get('col'), 'mapping': column_mapping, })
self.mapping = mappings_out
X_temp = self.transform(X, override_return_df=True)
self.feature_names = X_temp.columns.tolist()
# drop all output columns with 0 variance.
if self.drop_invariant:
self.drop_cols = []
generated_cols = util.get_generated_cols(X, X_temp, self.cols)
self.drop_cols = [x for x in generated_cols if X_temp[x].var() <= 10e-5]
try:
[self.feature_names.remove(x) for x in self.drop_cols]
except KeyError as e:
if self.verbose > 0:
print("Could not remove column from feature names."
"Not found in generated cols.\n{}".format(e))
return self |
def _seconds_str(self):
"""
Returns:
str: human readable text
Example:
>>> self = Timerit(num=100, bestof=10, verbose=0)
>>> self.call(lambda : sum(range(100)))
>>> print(self._seconds_str())
... 'best=3.423 µs, ave=3.451 ± 0.027 µs'
"""
mean = self.mean()
unit, mag = _choose_unit(mean, self.unit, self._asciimode)
unit_min = self.min() / mag
unit_mean = mean / mag
# Is showing the std useful? It probably doesn't hurt.
std = self.std()
unit_std = std / mag
pm = _trychar('±', '+-', self._asciimode)
fmtstr = ('best={min:.{pr1}{t}} {unit}, '
'mean={mean:.{pr1}{t}} {pm} {std:.{pr2}{t}} {unit}')
pr1 = pr2 = self._precision
if isinstance(self._precision, int): # pragma: nobranch
pr2 = max(self._precision - 2, 1)
unit_str = fmtstr.format(min=unit_min, unit=unit, mean=unit_mean,
t=self._precision_type, pm=pm, std=unit_std,
pr1=pr1, pr2=pr2)
return unit_str | Returns:
str: human readable text
Example:
>>> self = Timerit(num=100, bestof=10, verbose=0)
>>> self.call(lambda : sum(range(100)))
>>> print(self._seconds_str())
... 'best=3.423 µs, ave=3.451 ± 0.027 µs' | Below is the the instruction that describes the task:
### Input:
Returns:
str: human readable text
Example:
>>> self = Timerit(num=100, bestof=10, verbose=0)
>>> self.call(lambda : sum(range(100)))
>>> print(self._seconds_str())
... 'best=3.423 µs, ave=3.451 ± 0.027 µs'
### Response:
def _seconds_str(self):
"""
Returns:
str: human readable text
Example:
>>> self = Timerit(num=100, bestof=10, verbose=0)
>>> self.call(lambda : sum(range(100)))
>>> print(self._seconds_str())
... 'best=3.423 µs, ave=3.451 ± 0.027 µs'
"""
mean = self.mean()
unit, mag = _choose_unit(mean, self.unit, self._asciimode)
unit_min = self.min() / mag
unit_mean = mean / mag
# Is showing the std useful? It probably doesn't hurt.
std = self.std()
unit_std = std / mag
pm = _trychar('±', '+-', self._asciimode)
fmtstr = ('best={min:.{pr1}{t}} {unit}, '
'mean={mean:.{pr1}{t}} {pm} {std:.{pr2}{t}} {unit}')
pr1 = pr2 = self._precision
if isinstance(self._precision, int): # pragma: nobranch
pr2 = max(self._precision - 2, 1)
unit_str = fmtstr.format(min=unit_min, unit=unit, mean=unit_mean,
t=self._precision_type, pm=pm, std=unit_std,
pr1=pr1, pr2=pr2)
return unit_str |
def json(self,attribs =None, recurse=True, ignorelist=False):
"""See :meth:`AbstractElement.json`"""
if not attribs: attribs = {}
if self.idref:
attribs['id'] = self.idref
return super(AbstractTextMarkup,self).json(attribs,recurse, ignorelist) | See :meth:`AbstractElement.json` | Below is the the instruction that describes the task:
### Input:
See :meth:`AbstractElement.json`
### Response:
def json(self,attribs =None, recurse=True, ignorelist=False):
"""See :meth:`AbstractElement.json`"""
if not attribs: attribs = {}
if self.idref:
attribs['id'] = self.idref
return super(AbstractTextMarkup,self).json(attribs,recurse, ignorelist) |
def singlerose(self, Width=1, Color=['red']):
'''
draw the rose map of single sample with different items~
'''
self.chooser_label.setText(self.ChooseItems[self.chooser.value() - 1])
self.MultipleRoseName = self.ChooseItems[self.chooser.value() - 1]
self.SingleRoseName = [(self.ChooseItems[self.chooser.value() - 1])]
Name = self.SingleRoseName
self.axes.clear()
# self.axes.set_xlim(-90, 450)
# self.axes.set_ylim(0, 90)
titles = list('NWSE')
titles = ['N', '330', '300', 'W', '240', '210', 'S', '150', '120', 'E', '60', '30']
self.n = len(titles)
self.angles = np.arange(90, 90 + 360, 360.0 / self.n)
self.angles = np.array([90., 120., 150., 180., 210., 240., 270., 300., 330.,
360., 30., 60.])
self.axes.set_thetagrids(self.angles, labels=titles, fontsize=14)
self.raw = self._df
real_max = []
for k in range(len(Name)):
Data = []
S = []
R = []
for i in range(len(self.raw)):
S.append(self.raw.at[i, Name[k]])
s = np.linspace(0, 360, 360 / self.Gap + 1)
t = tuple(s.tolist())
count = []
for i in range(len(t)):
tmp_count = 0
for j in S:
if i < len(t) - 1:
if t[i] < j <= t[i + 1]:
tmp_count += 1
count.append(tmp_count)
count_max = max(count)
real_max.append(count_max)
maxuse = max(real_max)
for k in range(len(Name)):
Data = []
S = []
R = []
for i in range(len(self.raw)):
S.append(self.raw.at[i, Name[k]])
s = np.linspace(0, 360, 360 / self.Gap + 1)
t = tuple(s.tolist())
count = []
for i in range(len(t)):
tmp_count = 0
for j in S:
if i < len(t) - 1:
if t[i] < j <= t[i + 1]:
tmp_count += 1
count.append(tmp_count)
s = np.linspace(0, 360, 360 / self.Gap + 1)
t = tuple(s.tolist())
R_factor = 90 / maxuse
for i in count:
TMP = 90 - i * R_factor
R.append(TMP)
m, n = self.Trans(t, R)
self.axes.plot(m, n, color=Color[k], linewidth=1, alpha=0.6, marker='')
self.axes.fill(m, n, Color=Color[k], Alpha=0.6, )
if (self.Type_cb.isChecked()):
self.Type_cb.setText('Wulf')
list1 = [self.eqan(x) for x in range(15, 90, 15)]
else:
self.Type_cb.setText('Schmidt')
list1 = [self.eqar(x) for x in range(15, 90, 15)]
list2= list1
print(maxuse + 1)
try:
list2 = [str(x) for x in range(0, int(maxuse + 1), int((maxuse + 1.0) / 7.0))]
except(ValueError):
pass
list2.reverse()
self.axes.set_rgrids(list1, list2)
#self.axes.set_thetagrids(range(360 + 90, 0 + 90, -15), [str(x) for x in range(0, 360, 15)])
if (self.legend_cb.isChecked()):
self.axes.legend(bbox_to_anchor=(1.5, 1), loc=2, borderaxespad=0, prop=fontprop) | draw the rose map of single sample with different items~ | Below is the the instruction that describes the task:
### Input:
draw the rose map of single sample with different items~
### Response:
def singlerose(self, Width=1, Color=['red']):
'''
draw the rose map of single sample with different items~
'''
self.chooser_label.setText(self.ChooseItems[self.chooser.value() - 1])
self.MultipleRoseName = self.ChooseItems[self.chooser.value() - 1]
self.SingleRoseName = [(self.ChooseItems[self.chooser.value() - 1])]
Name = self.SingleRoseName
self.axes.clear()
# self.axes.set_xlim(-90, 450)
# self.axes.set_ylim(0, 90)
titles = list('NWSE')
titles = ['N', '330', '300', 'W', '240', '210', 'S', '150', '120', 'E', '60', '30']
self.n = len(titles)
self.angles = np.arange(90, 90 + 360, 360.0 / self.n)
self.angles = np.array([90., 120., 150., 180., 210., 240., 270., 300., 330.,
360., 30., 60.])
self.axes.set_thetagrids(self.angles, labels=titles, fontsize=14)
self.raw = self._df
real_max = []
for k in range(len(Name)):
Data = []
S = []
R = []
for i in range(len(self.raw)):
S.append(self.raw.at[i, Name[k]])
s = np.linspace(0, 360, 360 / self.Gap + 1)
t = tuple(s.tolist())
count = []
for i in range(len(t)):
tmp_count = 0
for j in S:
if i < len(t) - 1:
if t[i] < j <= t[i + 1]:
tmp_count += 1
count.append(tmp_count)
count_max = max(count)
real_max.append(count_max)
maxuse = max(real_max)
for k in range(len(Name)):
Data = []
S = []
R = []
for i in range(len(self.raw)):
S.append(self.raw.at[i, Name[k]])
s = np.linspace(0, 360, 360 / self.Gap + 1)
t = tuple(s.tolist())
count = []
for i in range(len(t)):
tmp_count = 0
for j in S:
if i < len(t) - 1:
if t[i] < j <= t[i + 1]:
tmp_count += 1
count.append(tmp_count)
s = np.linspace(0, 360, 360 / self.Gap + 1)
t = tuple(s.tolist())
R_factor = 90 / maxuse
for i in count:
TMP = 90 - i * R_factor
R.append(TMP)
m, n = self.Trans(t, R)
self.axes.plot(m, n, color=Color[k], linewidth=1, alpha=0.6, marker='')
self.axes.fill(m, n, Color=Color[k], Alpha=0.6, )
if (self.Type_cb.isChecked()):
self.Type_cb.setText('Wulf')
list1 = [self.eqan(x) for x in range(15, 90, 15)]
else:
self.Type_cb.setText('Schmidt')
list1 = [self.eqar(x) for x in range(15, 90, 15)]
list2= list1
print(maxuse + 1)
try:
list2 = [str(x) for x in range(0, int(maxuse + 1), int((maxuse + 1.0) / 7.0))]
except(ValueError):
pass
list2.reverse()
self.axes.set_rgrids(list1, list2)
#self.axes.set_thetagrids(range(360 + 90, 0 + 90, -15), [str(x) for x in range(0, 360, 15)])
if (self.legend_cb.isChecked()):
self.axes.legend(bbox_to_anchor=(1.5, 1), loc=2, borderaxespad=0, prop=fontprop) |
def from_string(cls, string):
"""Convert a string into a Dimension"""
# Note: There is some ambiguity as to whether the string passed is intended
# to become a unit looked up by name or suffix, or a Dimension descriptor.
if string in units.UNITS_BY_ALL:
return cls(description=string, unit=units.Unit(string))
else:
return cls(description=string) | Convert a string into a Dimension | Below is the the instruction that describes the task:
### Input:
Convert a string into a Dimension
### Response:
def from_string(cls, string):
"""Convert a string into a Dimension"""
# Note: There is some ambiguity as to whether the string passed is intended
# to become a unit looked up by name or suffix, or a Dimension descriptor.
if string in units.UNITS_BY_ALL:
return cls(description=string, unit=units.Unit(string))
else:
return cls(description=string) |
def register(self, username, password, attr_map=None):
"""
Register the user. Other base attributes from AWS Cognito User Pools
are address, birthdate, email, family_name (last name), gender,
given_name (first name), locale, middle_name, name, nickname,
phone_number, picture, preferred_username, profile, zoneinfo,
updated at, website
:param username: User Pool username
:param password: User Pool password
:param attr_map: Attribute map to Cognito's attributes
:return response: Response from Cognito
Example response::
{
'UserConfirmed': True|False,
'CodeDeliveryDetails': {
'Destination': 'string', # This value will be obfuscated
'DeliveryMedium': 'SMS'|'EMAIL',
'AttributeName': 'string'
}
}
"""
attributes = self.base_attributes.copy()
if self.custom_attributes:
attributes.update(self.custom_attributes)
cognito_attributes = dict_to_cognito(attributes, attr_map)
params = {
'ClientId': self.client_id,
'Username': username,
'Password': password,
'UserAttributes': cognito_attributes
}
self._add_secret_hash(params, 'SecretHash')
response = self.client.sign_up(**params)
attributes.update(username=username, password=password)
self._set_attributes(response, attributes)
response.pop('ResponseMetadata')
return response | Register the user. Other base attributes from AWS Cognito User Pools
are address, birthdate, email, family_name (last name), gender,
given_name (first name), locale, middle_name, name, nickname,
phone_number, picture, preferred_username, profile, zoneinfo,
updated at, website
:param username: User Pool username
:param password: User Pool password
:param attr_map: Attribute map to Cognito's attributes
:return response: Response from Cognito
Example response::
{
'UserConfirmed': True|False,
'CodeDeliveryDetails': {
'Destination': 'string', # This value will be obfuscated
'DeliveryMedium': 'SMS'|'EMAIL',
'AttributeName': 'string'
}
} | Below is the the instruction that describes the task:
### Input:
Register the user. Other base attributes from AWS Cognito User Pools
are address, birthdate, email, family_name (last name), gender,
given_name (first name), locale, middle_name, name, nickname,
phone_number, picture, preferred_username, profile, zoneinfo,
updated at, website
:param username: User Pool username
:param password: User Pool password
:param attr_map: Attribute map to Cognito's attributes
:return response: Response from Cognito
Example response::
{
'UserConfirmed': True|False,
'CodeDeliveryDetails': {
'Destination': 'string', # This value will be obfuscated
'DeliveryMedium': 'SMS'|'EMAIL',
'AttributeName': 'string'
}
}
### Response:
def register(self, username, password, attr_map=None):
"""
Register the user. Other base attributes from AWS Cognito User Pools
are address, birthdate, email, family_name (last name), gender,
given_name (first name), locale, middle_name, name, nickname,
phone_number, picture, preferred_username, profile, zoneinfo,
updated at, website
:param username: User Pool username
:param password: User Pool password
:param attr_map: Attribute map to Cognito's attributes
:return response: Response from Cognito
Example response::
{
'UserConfirmed': True|False,
'CodeDeliveryDetails': {
'Destination': 'string', # This value will be obfuscated
'DeliveryMedium': 'SMS'|'EMAIL',
'AttributeName': 'string'
}
}
"""
attributes = self.base_attributes.copy()
if self.custom_attributes:
attributes.update(self.custom_attributes)
cognito_attributes = dict_to_cognito(attributes, attr_map)
params = {
'ClientId': self.client_id,
'Username': username,
'Password': password,
'UserAttributes': cognito_attributes
}
self._add_secret_hash(params, 'SecretHash')
response = self.client.sign_up(**params)
attributes.update(username=username, password=password)
self._set_attributes(response, attributes)
response.pop('ResponseMetadata')
return response |
def uniorbytes(s, result=str, enc="utf-8", err="strict"):
"""
This function was made to avoid byte / str type errors received in
packages like base64. Accepts all input types and will recursively
encode entire lists and dicts.
@s: the #bytes or #str item you are attempting to encode or decode
@result: the desired output, either #str or #bytes
@enc: the desired encoding
@err: passed to :meth:bytes.decode, tells the decoder what to do about
errors, e.g. 'replace'
-> type specified in @result
"""
if isinstance(s, result):
# the input is the desired one, return as is
return s
if isinstance(s, (bytes, str)):
# the input is either a byte or a string, convert to desired
# result (result=bytes or str)
if isinstance(s, bytes) and result == str:
return s.decode(enc, err)
elif isinstance(s, str) and result == bytes:
return s.encode(enc)
else:
return str(s or ("" if s is None else s), enc)
elif isinstance(s, (float, int, decimal.Decimal)):
return uniorbytes(str(s), result, enc, err)
elif isinstance(s, dict):
# the input is a dict {}
for k, item in list(s.items()):
s[k] = uniorbytes(item, result=result, enc=enc, err=err)
return s
elif hasattr(s, '__iter__'):
# the input is iterable
for i, item in enumerate(s):
s[i] = uniorbytes(item, result=result, enc=enc, err=err)
return s
return s | This function was made to avoid byte / str type errors received in
packages like base64. Accepts all input types and will recursively
encode entire lists and dicts.
@s: the #bytes or #str item you are attempting to encode or decode
@result: the desired output, either #str or #bytes
@enc: the desired encoding
@err: passed to :meth:bytes.decode, tells the decoder what to do about
errors, e.g. 'replace'
-> type specified in @result | Below is the the instruction that describes the task:
### Input:
This function was made to avoid byte / str type errors received in
packages like base64. Accepts all input types and will recursively
encode entire lists and dicts.
@s: the #bytes or #str item you are attempting to encode or decode
@result: the desired output, either #str or #bytes
@enc: the desired encoding
@err: passed to :meth:bytes.decode, tells the decoder what to do about
errors, e.g. 'replace'
-> type specified in @result
### Response:
def uniorbytes(s, result=str, enc="utf-8", err="strict"):
"""
This function was made to avoid byte / str type errors received in
packages like base64. Accepts all input types and will recursively
encode entire lists and dicts.
@s: the #bytes or #str item you are attempting to encode or decode
@result: the desired output, either #str or #bytes
@enc: the desired encoding
@err: passed to :meth:bytes.decode, tells the decoder what to do about
errors, e.g. 'replace'
-> type specified in @result
"""
if isinstance(s, result):
# the input is the desired one, return as is
return s
if isinstance(s, (bytes, str)):
# the input is either a byte or a string, convert to desired
# result (result=bytes or str)
if isinstance(s, bytes) and result == str:
return s.decode(enc, err)
elif isinstance(s, str) and result == bytes:
return s.encode(enc)
else:
return str(s or ("" if s is None else s), enc)
elif isinstance(s, (float, int, decimal.Decimal)):
return uniorbytes(str(s), result, enc, err)
elif isinstance(s, dict):
# the input is a dict {}
for k, item in list(s.items()):
s[k] = uniorbytes(item, result=result, enc=enc, err=err)
return s
elif hasattr(s, '__iter__'):
# the input is iterable
for i, item in enumerate(s):
s[i] = uniorbytes(item, result=result, enc=enc, err=err)
return s
return s |
def _set_default_vrf(self, v, load=False):
"""
Setter method for default_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_vrf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=default_vrf.default_vrf, is_container='container', presence=False, yang_name="default-vrf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv4 unicast address Family', u'cli-suppress-no': None, u'cli-add-mode': None, u'cli-drop-node-name': None, u'cli-full-command': None, u'callpoint': u'AfIpv6Ucast', u'cli-mode-name': u'config-bgp-ipv6u'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """default_vrf must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=default_vrf.default_vrf, is_container='container', presence=False, yang_name="default-vrf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv4 unicast address Family', u'cli-suppress-no': None, u'cli-add-mode': None, u'cli-drop-node-name': None, u'cli-full-command': None, u'callpoint': u'AfIpv6Ucast', u'cli-mode-name': u'config-bgp-ipv6u'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__default_vrf = t
if hasattr(self, '_set'):
self._set() | Setter method for default_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_vrf() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for default_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_vrf() directly.
### Response:
def _set_default_vrf(self, v, load=False):
"""
Setter method for default_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_vrf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=default_vrf.default_vrf, is_container='container', presence=False, yang_name="default-vrf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv4 unicast address Family', u'cli-suppress-no': None, u'cli-add-mode': None, u'cli-drop-node-name': None, u'cli-full-command': None, u'callpoint': u'AfIpv6Ucast', u'cli-mode-name': u'config-bgp-ipv6u'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """default_vrf must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=default_vrf.default_vrf, is_container='container', presence=False, yang_name="default-vrf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'IPv4 unicast address Family', u'cli-suppress-no': None, u'cli-add-mode': None, u'cli-drop-node-name': None, u'cli-full-command': None, u'callpoint': u'AfIpv6Ucast', u'cli-mode-name': u'config-bgp-ipv6u'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__default_vrf = t
if hasattr(self, '_set'):
self._set() |
def _file_model_from_path(self, path, content=False, format=None):
"""
Build a file model from database record.
"""
model = base_model(path)
model["type"] = "file"
if self.fs.isfile(path):
model["last_modified"] = model["created"] = self.fs.lstat(path)["ST_MTIME"]
else:
model["last_modified"] = model["created"] = DUMMY_CREATED_DATE
if content:
try:
content = self.fs.read(path)
except NoSuchFile as e:
self.no_such_entity(e.path)
except GenericFSError as e:
self.do_error(str(e), 500)
model["format"] = format or "text"
model["content"] = content
model["mimetype"] = mimetypes.guess_type(path)[0] or "text/plain"
if format == "base64":
model["format"] = format or "base64"
from base64 import b64decode
model["content"] = b64decode(content)
return model | Build a file model from database record. | Below is the the instruction that describes the task:
### Input:
Build a file model from database record.
### Response:
def _file_model_from_path(self, path, content=False, format=None):
"""
Build a file model from database record.
"""
model = base_model(path)
model["type"] = "file"
if self.fs.isfile(path):
model["last_modified"] = model["created"] = self.fs.lstat(path)["ST_MTIME"]
else:
model["last_modified"] = model["created"] = DUMMY_CREATED_DATE
if content:
try:
content = self.fs.read(path)
except NoSuchFile as e:
self.no_such_entity(e.path)
except GenericFSError as e:
self.do_error(str(e), 500)
model["format"] = format or "text"
model["content"] = content
model["mimetype"] = mimetypes.guess_type(path)[0] or "text/plain"
if format == "base64":
model["format"] = format or "base64"
from base64 import b64decode
model["content"] = b64decode(content)
return model |
def _create_parser(cls):
"""
Need to check the specific symbol "/" in attr_value part as well.
I checked some multipath configuraion files from the sosreport and got
although there are some more specific symbols like "-%", it is enclosed
in double quotes and will be accepted. Furthermore, I also checked the
source code of "device-mapper-multipath" and got if the attr_value in
"multipath.conf" include a "whitespace", it must be enclosed in double
quotation marks. So, we could just add one more specific symbol "/" to
check.
----------------------------------------------------------
udev_dir /dev
getuid_callout "/sbin/scsi_id -g -u -s /block/%n"
----------------------------------------------------------
"""
section_name = p.Word(p.alphas + "_")
attr_name = attr_value = p.Word(p.alphanums + "_/")
LBRACE, RBRACE = map(p.Suppress, "{}")
attr = p.Group(attr_name + (attr_value | p.quotedString.setParseAction(p.removeQuotes)))
attr_list = p.Dict(p.ZeroOrMore(attr))
simple_section = p.Group(section_name + LBRACE + attr_list + RBRACE)
complex_section = p.Group(section_name + LBRACE + p.OneOrMore(simple_section) + RBRACE)
simple_or_complex = p.Dict(simple_section | complex_section)
my_conf = p.Group(p.ZeroOrMore(simple_or_complex))
my_conf.ignore("#" + p.restOfLine)
return my_conf | Need to check the specific symbol "/" in attr_value part as well.
I checked some multipath configuraion files from the sosreport and got
although there are some more specific symbols like "-%", it is enclosed
in double quotes and will be accepted. Furthermore, I also checked the
source code of "device-mapper-multipath" and got if the attr_value in
"multipath.conf" include a "whitespace", it must be enclosed in double
quotation marks. So, we could just add one more specific symbol "/" to
check.
----------------------------------------------------------
udev_dir /dev
getuid_callout "/sbin/scsi_id -g -u -s /block/%n"
---------------------------------------------------------- | Below is the the instruction that describes the task:
### Input:
Need to check the specific symbol "/" in attr_value part as well.
I checked some multipath configuraion files from the sosreport and got
although there are some more specific symbols like "-%", it is enclosed
in double quotes and will be accepted. Furthermore, I also checked the
source code of "device-mapper-multipath" and got if the attr_value in
"multipath.conf" include a "whitespace", it must be enclosed in double
quotation marks. So, we could just add one more specific symbol "/" to
check.
----------------------------------------------------------
udev_dir /dev
getuid_callout "/sbin/scsi_id -g -u -s /block/%n"
----------------------------------------------------------
### Response:
def _create_parser(cls):
"""
Need to check the specific symbol "/" in attr_value part as well.
I checked some multipath configuraion files from the sosreport and got
although there are some more specific symbols like "-%", it is enclosed
in double quotes and will be accepted. Furthermore, I also checked the
source code of "device-mapper-multipath" and got if the attr_value in
"multipath.conf" include a "whitespace", it must be enclosed in double
quotation marks. So, we could just add one more specific symbol "/" to
check.
----------------------------------------------------------
udev_dir /dev
getuid_callout "/sbin/scsi_id -g -u -s /block/%n"
----------------------------------------------------------
"""
section_name = p.Word(p.alphas + "_")
attr_name = attr_value = p.Word(p.alphanums + "_/")
LBRACE, RBRACE = map(p.Suppress, "{}")
attr = p.Group(attr_name + (attr_value | p.quotedString.setParseAction(p.removeQuotes)))
attr_list = p.Dict(p.ZeroOrMore(attr))
simple_section = p.Group(section_name + LBRACE + attr_list + RBRACE)
complex_section = p.Group(section_name + LBRACE + p.OneOrMore(simple_section) + RBRACE)
simple_or_complex = p.Dict(simple_section | complex_section)
my_conf = p.Group(p.ZeroOrMore(simple_or_complex))
my_conf.ignore("#" + p.restOfLine)
return my_conf |
def stderrHandler(level, object, category, file, line, message):
"""
A log handler that writes to stderr.
@type level: string
@type object: string (or None)
@type category: string
@type message: string
"""
o = ""
if object:
o = '"' + object + '"'
where = "(%s:%d)" % (file, line)
# level pid object cat time
# 5 + 1 + 7 + 1 + 32 + 1 + 17 + 1 + 15 == 80
safeprintf(sys.stderr, '%s [%5d] %-32s %-17s %-15s ',
getFormattedLevelName(level), os.getpid(), o, category,
time.strftime("%b %d %H:%M:%S"))
try:
safeprintf(sys.stderr, '%-4s %s %s\n', "", message, where)
except UnicodeEncodeError:
# this can happen if message is a unicode object, convert it back into
# a string using the UTF-8 encoding
message = message.encode('UTF-8')
safeprintf(sys.stderr, '%-4s %s %s\n', "", message, where)
sys.stderr.flush() | A log handler that writes to stderr.
@type level: string
@type object: string (or None)
@type category: string
@type message: string | Below is the the instruction that describes the task:
### Input:
A log handler that writes to stderr.
@type level: string
@type object: string (or None)
@type category: string
@type message: string
### Response:
def stderrHandler(level, object, category, file, line, message):
"""
A log handler that writes to stderr.
@type level: string
@type object: string (or None)
@type category: string
@type message: string
"""
o = ""
if object:
o = '"' + object + '"'
where = "(%s:%d)" % (file, line)
# level pid object cat time
# 5 + 1 + 7 + 1 + 32 + 1 + 17 + 1 + 15 == 80
safeprintf(sys.stderr, '%s [%5d] %-32s %-17s %-15s ',
getFormattedLevelName(level), os.getpid(), o, category,
time.strftime("%b %d %H:%M:%S"))
try:
safeprintf(sys.stderr, '%-4s %s %s\n', "", message, where)
except UnicodeEncodeError:
# this can happen if message is a unicode object, convert it back into
# a string using the UTF-8 encoding
message = message.encode('UTF-8')
safeprintf(sys.stderr, '%-4s %s %s\n', "", message, where)
sys.stderr.flush() |
def main():
"""Starting point for the program execution."""
# Create command line parser.
parser = argparse.ArgumentParser()
# Adding command line arguments.
parser.add_argument("-o", "--out", help="Output file", default=None)
parser.add_argument(
"pyfile", help="Python file to be profiled", default=None
)
# Parse command line arguments.
arguments = parser.parse_args()
if arguments.pyfile is not None:
# Core functionality.
pyheat = PyHeat(arguments.pyfile)
pyheat.create_heatmap()
pyheat.show_heatmap(output_file=arguments.out, enable_scroll=True)
pyheat.close_heatmap()
else:
# Print command help
parser.print_help() | Starting point for the program execution. | Below is the the instruction that describes the task:
### Input:
Starting point for the program execution.
### Response:
def main():
"""Starting point for the program execution."""
# Create command line parser.
parser = argparse.ArgumentParser()
# Adding command line arguments.
parser.add_argument("-o", "--out", help="Output file", default=None)
parser.add_argument(
"pyfile", help="Python file to be profiled", default=None
)
# Parse command line arguments.
arguments = parser.parse_args()
if arguments.pyfile is not None:
# Core functionality.
pyheat = PyHeat(arguments.pyfile)
pyheat.create_heatmap()
pyheat.show_heatmap(output_file=arguments.out, enable_scroll=True)
pyheat.close_heatmap()
else:
# Print command help
parser.print_help() |
def validator_for(schema, default=_LATEST_VERSION):
"""
Retrieve the validator class appropriate for validating the given schema.
Uses the :validator:`$schema` property that should be present in the given
schema to look up the appropriate validator class.
Arguments:
schema (collections.Mapping or bool):
the schema to look at
default:
the default to return if the appropriate validator class cannot be
determined.
If unprovided, the default is to return
the latest supported draft.
"""
if schema is True or schema is False or u"$schema" not in schema:
return default
if schema[u"$schema"] not in meta_schemas:
warn(
(
"The metaschema specified by $schema was not found. "
"Using the latest draft to validate, but this will raise "
"an error in the future."
),
DeprecationWarning,
stacklevel=2,
)
return meta_schemas.get(schema[u"$schema"], _LATEST_VERSION) | Retrieve the validator class appropriate for validating the given schema.
Uses the :validator:`$schema` property that should be present in the given
schema to look up the appropriate validator class.
Arguments:
schema (collections.Mapping or bool):
the schema to look at
default:
the default to return if the appropriate validator class cannot be
determined.
If unprovided, the default is to return
the latest supported draft. | Below is the the instruction that describes the task:
### Input:
Retrieve the validator class appropriate for validating the given schema.
Uses the :validator:`$schema` property that should be present in the given
schema to look up the appropriate validator class.
Arguments:
schema (collections.Mapping or bool):
the schema to look at
default:
the default to return if the appropriate validator class cannot be
determined.
If unprovided, the default is to return
the latest supported draft.
### Response:
def validator_for(schema, default=_LATEST_VERSION):
"""
Retrieve the validator class appropriate for validating the given schema.
Uses the :validator:`$schema` property that should be present in the given
schema to look up the appropriate validator class.
Arguments:
schema (collections.Mapping or bool):
the schema to look at
default:
the default to return if the appropriate validator class cannot be
determined.
If unprovided, the default is to return
the latest supported draft.
"""
if schema is True or schema is False or u"$schema" not in schema:
return default
if schema[u"$schema"] not in meta_schemas:
warn(
(
"The metaschema specified by $schema was not found. "
"Using the latest draft to validate, but this will raise "
"an error in the future."
),
DeprecationWarning,
stacklevel=2,
)
return meta_schemas.get(schema[u"$schema"], _LATEST_VERSION) |
def dot_product_unmasked_self_attention_relative_v2(
q, k, v, bias, max_relative_position=None, dropout_rate=0.0,
image_shapes=None, name=None, make_image_summary=True,
dropout_broadcast_dims=None, heads_share_relative_embedding=False,
add_relative_to_values=False):
"""Calculate relative position-aware dot-product self-attention.
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v.
Args:
q: a Tensor with shape [batch, heads, length, depth].
k: a Tensor with shape [batch, heads, length, depth].
v: a Tensor with shape [batch, heads, length, depth].
bias: bias Tensor.
max_relative_position: an integer the max relative embedding considered.
Changing this invalidates checkpoints.
dropout_rate: a floating point number.
image_shapes: optional tuple of integer scalars.
name: an optional string.
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
heads_share_relative_embedding: a boolean indicating wheather to share
relative embeddings between attention heads.
add_relative_to_values: a boolean for whether to add relative component to
values.
Returns:
A Tensor.
Raises:
ValueError: if max_relative_position is not > 0.
"""
if not max_relative_position:
raise ValueError("Max relative position (%s) should be > 0 when using "
"relative self attention." % (max_relative_position))
with tf.variable_scope(
name,
default_name="dot_product_unmasked_self_attention_relative_v2",
values=[q, k, v]):
# This calculation only works for self attention.
# q, k and v must therefore have the same shape.
q.get_shape().assert_is_compatible_with(k.get_shape())
q.get_shape().assert_is_compatible_with(v.get_shape())
# [batch, num_heads, query_length, memory_length]
logits = tf.matmul(q, k, transpose_b=True)
length = common_layers.shape_list(q)[2]
k_shape = common_layers.shape_list(k)
num_heads = k_shape[1]
depth_k = k_shape[-1]
key_relative_embeddings = get_relative_embeddings_left_right(
max_relative_position, length, depth_k, num_heads,
heads_share_relative_embedding,
"key_relative_embeddings")
unmasked_rel_logits = matmul_with_relative_keys(
q, key_relative_embeddings, heads_share_relative_embedding)
unmasked_rel_logits = _relative_position_to_absolute_position_unmasked(
unmasked_rel_logits)
logits += unmasked_rel_logits
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
# dropping out the attention links for each of the heads
weights = common_layers.dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
# relative_weights.set_shape([None, None, None, max_length])
if common_layers.should_generate_summaries() and make_image_summary:
attention_image_summary(weights, image_shapes)
ret = tf.matmul(weights, v)
if add_relative_to_values:
# Adds the contribution of the weighted relative embeddings to the values.
# [batch, num_heads, query_length, 2*memory_length-1]
relative_weights = _absolute_position_to_relative_position_unmasked(
weights)
depth_v = common_layers.shape_list(v)[3]
value_relative_embeddings = get_relative_embeddings_left_right(
max_relative_position, length, depth_v, num_heads,
heads_share_relative_embedding, "value_relative_embeddings")
ret += matmul_with_relative_values(
relative_weights, value_relative_embeddings,
heads_share_relative_embedding)
return ret | Calculate relative position-aware dot-product self-attention.
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v.
Args:
q: a Tensor with shape [batch, heads, length, depth].
k: a Tensor with shape [batch, heads, length, depth].
v: a Tensor with shape [batch, heads, length, depth].
bias: bias Tensor.
max_relative_position: an integer the max relative embedding considered.
Changing this invalidates checkpoints.
dropout_rate: a floating point number.
image_shapes: optional tuple of integer scalars.
name: an optional string.
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
heads_share_relative_embedding: a boolean indicating wheather to share
relative embeddings between attention heads.
add_relative_to_values: a boolean for whether to add relative component to
values.
Returns:
A Tensor.
Raises:
ValueError: if max_relative_position is not > 0. | Below is the the instruction that describes the task:
### Input:
Calculate relative position-aware dot-product self-attention.
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v.
Args:
q: a Tensor with shape [batch, heads, length, depth].
k: a Tensor with shape [batch, heads, length, depth].
v: a Tensor with shape [batch, heads, length, depth].
bias: bias Tensor.
max_relative_position: an integer the max relative embedding considered.
Changing this invalidates checkpoints.
dropout_rate: a floating point number.
image_shapes: optional tuple of integer scalars.
name: an optional string.
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
heads_share_relative_embedding: a boolean indicating wheather to share
relative embeddings between attention heads.
add_relative_to_values: a boolean for whether to add relative component to
values.
Returns:
A Tensor.
Raises:
ValueError: if max_relative_position is not > 0.
### Response:
def dot_product_unmasked_self_attention_relative_v2(
q, k, v, bias, max_relative_position=None, dropout_rate=0.0,
image_shapes=None, name=None, make_image_summary=True,
dropout_broadcast_dims=None, heads_share_relative_embedding=False,
add_relative_to_values=False):
"""Calculate relative position-aware dot-product self-attention.
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v.
Args:
q: a Tensor with shape [batch, heads, length, depth].
k: a Tensor with shape [batch, heads, length, depth].
v: a Tensor with shape [batch, heads, length, depth].
bias: bias Tensor.
max_relative_position: an integer the max relative embedding considered.
Changing this invalidates checkpoints.
dropout_rate: a floating point number.
image_shapes: optional tuple of integer scalars.
name: an optional string.
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
heads_share_relative_embedding: a boolean indicating wheather to share
relative embeddings between attention heads.
add_relative_to_values: a boolean for whether to add relative component to
values.
Returns:
A Tensor.
Raises:
ValueError: if max_relative_position is not > 0.
"""
if not max_relative_position:
raise ValueError("Max relative position (%s) should be > 0 when using "
"relative self attention." % (max_relative_position))
with tf.variable_scope(
name,
default_name="dot_product_unmasked_self_attention_relative_v2",
values=[q, k, v]):
# This calculation only works for self attention.
# q, k and v must therefore have the same shape.
q.get_shape().assert_is_compatible_with(k.get_shape())
q.get_shape().assert_is_compatible_with(v.get_shape())
# [batch, num_heads, query_length, memory_length]
logits = tf.matmul(q, k, transpose_b=True)
length = common_layers.shape_list(q)[2]
k_shape = common_layers.shape_list(k)
num_heads = k_shape[1]
depth_k = k_shape[-1]
key_relative_embeddings = get_relative_embeddings_left_right(
max_relative_position, length, depth_k, num_heads,
heads_share_relative_embedding,
"key_relative_embeddings")
unmasked_rel_logits = matmul_with_relative_keys(
q, key_relative_embeddings, heads_share_relative_embedding)
unmasked_rel_logits = _relative_position_to_absolute_position_unmasked(
unmasked_rel_logits)
logits += unmasked_rel_logits
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
# dropping out the attention links for each of the heads
weights = common_layers.dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
# relative_weights.set_shape([None, None, None, max_length])
if common_layers.should_generate_summaries() and make_image_summary:
attention_image_summary(weights, image_shapes)
ret = tf.matmul(weights, v)
if add_relative_to_values:
# Adds the contribution of the weighted relative embeddings to the values.
# [batch, num_heads, query_length, 2*memory_length-1]
relative_weights = _absolute_position_to_relative_position_unmasked(
weights)
depth_v = common_layers.shape_list(v)[3]
value_relative_embeddings = get_relative_embeddings_left_right(
max_relative_position, length, depth_v, num_heads,
heads_share_relative_embedding, "value_relative_embeddings")
ret += matmul_with_relative_values(
relative_weights, value_relative_embeddings,
heads_share_relative_embedding)
return ret |
def removeAttributeNode(self, attr: Attr) -> Optional[Attr]:
"""Remove ``Attr`` node from this node."""
return self.attributes.removeNamedItem(attr) | Remove ``Attr`` node from this node. | Below is the the instruction that describes the task:
### Input:
Remove ``Attr`` node from this node.
### Response:
def removeAttributeNode(self, attr: Attr) -> Optional[Attr]:
"""Remove ``Attr`` node from this node."""
return self.attributes.removeNamedItem(attr) |
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False | Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool | Below is the the instruction that describes the task:
### Input:
Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
### Response:
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False |
def validate(self, value):
"""
Check if ``value`` is valid.
:returns: [errors] If ``value`` is invalid, otherwise [].
"""
errors = []
# Make sure the type validates first.
valid = self._is_valid(value)
if not valid:
errors.append(self.fail(value))
return errors
# Then validate all the constraints second.
for constraint in self._constraints_inst:
error = constraint.is_valid(value)
if error:
errors.append(error)
return errors | Check if ``value`` is valid.
:returns: [errors] If ``value`` is invalid, otherwise []. | Below is the the instruction that describes the task:
### Input:
Check if ``value`` is valid.
:returns: [errors] If ``value`` is invalid, otherwise [].
### Response:
def validate(self, value):
"""
Check if ``value`` is valid.
:returns: [errors] If ``value`` is invalid, otherwise [].
"""
errors = []
# Make sure the type validates first.
valid = self._is_valid(value)
if not valid:
errors.append(self.fail(value))
return errors
# Then validate all the constraints second.
for constraint in self._constraints_inst:
error = constraint.is_valid(value)
if error:
errors.append(error)
return errors |
def description_for_valid_number(numobj, lang, script=None, region=None):
"""Return a text description of a PhoneNumber object, in the language
provided.
The description might consist of the name of the country where the phone
number is from and/or the name of the geographical area the phone number
is from if more detailed information is available.
If the phone number is from the same region as the user, only a
lower-level description will be returned, if one exists. Otherwise, the
phone number's region will be returned, with optionally some more detailed
information.
For example, for a user from the region "US" (United States), we would
show "Mountain View, CA" for a particular number, omitting the United
States from the description. For a user from the United Kingdom (region
"GB"), for the same number we may show "Mountain View, CA, United States"
or even just "United States".
This function assumes the validity of the number passed in has already
been checked, and that the number is suitable for geocoding. We consider
fixed-line and mobile numbers possible candidates for geocoding.
Arguments:
numobj -- A valid PhoneNumber object for which we want to get a text
description.
lang -- A 2-letter lowercase ISO 639-1 language code for the language in
which the description should be returned (e.g. "en")
script -- A 4-letter titlecase (first letter uppercase, rest lowercase)
ISO script code as defined in ISO 15924, separated by an
underscore (e.g. "Hant")
region -- The region code for a given user. This region will be omitted
from the description if the phone number comes from this
region. It should be a two-letter upper-case CLDR region
code.
Returns a text description in the given language code, for the given phone
number, or an empty string if the number could come from multiple countries,
or the country code is in fact invalid."""
number_region = region_code_for_number(numobj)
if region is None or region == number_region:
mobile_token = country_mobile_token(numobj.country_code)
national_number = national_significant_number(numobj)
if mobile_token != U_EMPTY_STRING and national_number.startswith(mobile_token):
# In some countries, eg. Argentina, mobile numbers have a mobile token
# before the national destination code, this should be removed before
# geocoding.
national_number = national_number[len(mobile_token):]
region = region_code_for_country_code(numobj.country_code)
try:
copied_numobj = parse(national_number, region)
except NumberParseException:
# If this happens, just re-use what we had.
copied_numobj = numobj
area_description = _prefix_description_for_number(GEOCODE_DATA, GEOCODE_LONGEST_PREFIX,
copied_numobj, lang, script, region)
else:
area_description = _prefix_description_for_number(GEOCODE_DATA, GEOCODE_LONGEST_PREFIX,
numobj, lang, script, region)
if area_description != "":
return area_description
else:
# Fall back to the description of the number's region
return country_name_for_number(numobj, lang, script, region)
else:
# Otherwise, we just show the region(country) name for now.
return _region_display_name(number_region, lang, script, region) | Return a text description of a PhoneNumber object, in the language
provided.
The description might consist of the name of the country where the phone
number is from and/or the name of the geographical area the phone number
is from if more detailed information is available.
If the phone number is from the same region as the user, only a
lower-level description will be returned, if one exists. Otherwise, the
phone number's region will be returned, with optionally some more detailed
information.
For example, for a user from the region "US" (United States), we would
show "Mountain View, CA" for a particular number, omitting the United
States from the description. For a user from the United Kingdom (region
"GB"), for the same number we may show "Mountain View, CA, United States"
or even just "United States".
This function assumes the validity of the number passed in has already
been checked, and that the number is suitable for geocoding. We consider
fixed-line and mobile numbers possible candidates for geocoding.
Arguments:
numobj -- A valid PhoneNumber object for which we want to get a text
description.
lang -- A 2-letter lowercase ISO 639-1 language code for the language in
which the description should be returned (e.g. "en")
script -- A 4-letter titlecase (first letter uppercase, rest lowercase)
ISO script code as defined in ISO 15924, separated by an
underscore (e.g. "Hant")
region -- The region code for a given user. This region will be omitted
from the description if the phone number comes from this
region. It should be a two-letter upper-case CLDR region
code.
Returns a text description in the given language code, for the given phone
number, or an empty string if the number could come from multiple countries,
or the country code is in fact invalid. | Below is the the instruction that describes the task:
### Input:
Return a text description of a PhoneNumber object, in the language
provided.
The description might consist of the name of the country where the phone
number is from and/or the name of the geographical area the phone number
is from if more detailed information is available.
If the phone number is from the same region as the user, only a
lower-level description will be returned, if one exists. Otherwise, the
phone number's region will be returned, with optionally some more detailed
information.
For example, for a user from the region "US" (United States), we would
show "Mountain View, CA" for a particular number, omitting the United
States from the description. For a user from the United Kingdom (region
"GB"), for the same number we may show "Mountain View, CA, United States"
or even just "United States".
This function assumes the validity of the number passed in has already
been checked, and that the number is suitable for geocoding. We consider
fixed-line and mobile numbers possible candidates for geocoding.
Arguments:
numobj -- A valid PhoneNumber object for which we want to get a text
description.
lang -- A 2-letter lowercase ISO 639-1 language code for the language in
which the description should be returned (e.g. "en")
script -- A 4-letter titlecase (first letter uppercase, rest lowercase)
ISO script code as defined in ISO 15924, separated by an
underscore (e.g. "Hant")
region -- The region code for a given user. This region will be omitted
from the description if the phone number comes from this
region. It should be a two-letter upper-case CLDR region
code.
Returns a text description in the given language code, for the given phone
number, or an empty string if the number could come from multiple countries,
or the country code is in fact invalid.
### Response:
def description_for_valid_number(numobj, lang, script=None, region=None):
"""Return a text description of a PhoneNumber object, in the language
provided.
The description might consist of the name of the country where the phone
number is from and/or the name of the geographical area the phone number
is from if more detailed information is available.
If the phone number is from the same region as the user, only a
lower-level description will be returned, if one exists. Otherwise, the
phone number's region will be returned, with optionally some more detailed
information.
For example, for a user from the region "US" (United States), we would
show "Mountain View, CA" for a particular number, omitting the United
States from the description. For a user from the United Kingdom (region
"GB"), for the same number we may show "Mountain View, CA, United States"
or even just "United States".
This function assumes the validity of the number passed in has already
been checked, and that the number is suitable for geocoding. We consider
fixed-line and mobile numbers possible candidates for geocoding.
Arguments:
numobj -- A valid PhoneNumber object for which we want to get a text
description.
lang -- A 2-letter lowercase ISO 639-1 language code for the language in
which the description should be returned (e.g. "en")
script -- A 4-letter titlecase (first letter uppercase, rest lowercase)
ISO script code as defined in ISO 15924, separated by an
underscore (e.g. "Hant")
region -- The region code for a given user. This region will be omitted
from the description if the phone number comes from this
region. It should be a two-letter upper-case CLDR region
code.
Returns a text description in the given language code, for the given phone
number, or an empty string if the number could come from multiple countries,
or the country code is in fact invalid."""
number_region = region_code_for_number(numobj)
if region is None or region == number_region:
mobile_token = country_mobile_token(numobj.country_code)
national_number = national_significant_number(numobj)
if mobile_token != U_EMPTY_STRING and national_number.startswith(mobile_token):
# In some countries, eg. Argentina, mobile numbers have a mobile token
# before the national destination code, this should be removed before
# geocoding.
national_number = national_number[len(mobile_token):]
region = region_code_for_country_code(numobj.country_code)
try:
copied_numobj = parse(national_number, region)
except NumberParseException:
# If this happens, just re-use what we had.
copied_numobj = numobj
area_description = _prefix_description_for_number(GEOCODE_DATA, GEOCODE_LONGEST_PREFIX,
copied_numobj, lang, script, region)
else:
area_description = _prefix_description_for_number(GEOCODE_DATA, GEOCODE_LONGEST_PREFIX,
numobj, lang, script, region)
if area_description != "":
return area_description
else:
# Fall back to the description of the number's region
return country_name_for_number(numobj, lang, script, region)
else:
# Otherwise, we just show the region(country) name for now.
return _region_display_name(number_region, lang, script, region) |
def resume(self, mask, filename, port, pos):
"""Resume a DCC send"""
self.connections['send']['masks'][mask][port].offset = pos
message = 'DCC ACCEPT %s %d %d' % (filename, port, pos)
self.bot.ctcp(mask, message) | Resume a DCC send | Below is the the instruction that describes the task:
### Input:
Resume a DCC send
### Response:
def resume(self, mask, filename, port, pos):
"""Resume a DCC send"""
self.connections['send']['masks'][mask][port].offset = pos
message = 'DCC ACCEPT %s %d %d' % (filename, port, pos)
self.bot.ctcp(mask, message) |
def ReadAllClientActionRequests(self, client_id, cursor=None):
"""Reads all client messages available for a given client_id."""
query = ("SELECT request, UNIX_TIMESTAMP(leased_until), leased_by, "
"leased_count "
"FROM client_action_requests "
"WHERE client_id = %s")
cursor.execute(query, [db_utils.ClientIDToInt(client_id)])
ret = []
for req, leased_until, leased_by, leased_count in cursor.fetchall():
request = rdf_flows.ClientActionRequest.FromSerializedString(req)
if leased_until is not None:
request.leased_by = leased_by
request.leased_until = mysql_utils.TimestampToRDFDatetime(leased_until)
else:
request.leased_by = None
request.leased_until = None
request.ttl = db.Database.CLIENT_MESSAGES_TTL - leased_count
ret.append(request)
return sorted(ret, key=lambda req: (req.flow_id, req.request_id)) | Reads all client messages available for a given client_id. | Below is the the instruction that describes the task:
### Input:
Reads all client messages available for a given client_id.
### Response:
def ReadAllClientActionRequests(self, client_id, cursor=None):
"""Reads all client messages available for a given client_id."""
query = ("SELECT request, UNIX_TIMESTAMP(leased_until), leased_by, "
"leased_count "
"FROM client_action_requests "
"WHERE client_id = %s")
cursor.execute(query, [db_utils.ClientIDToInt(client_id)])
ret = []
for req, leased_until, leased_by, leased_count in cursor.fetchall():
request = rdf_flows.ClientActionRequest.FromSerializedString(req)
if leased_until is not None:
request.leased_by = leased_by
request.leased_until = mysql_utils.TimestampToRDFDatetime(leased_until)
else:
request.leased_by = None
request.leased_until = None
request.ttl = db.Database.CLIENT_MESSAGES_TTL - leased_count
ret.append(request)
return sorted(ret, key=lambda req: (req.flow_id, req.request_id)) |
def _deduce_settings_from_file(self, opened_raw_data_file): # TODO: parse better
'''Tries to get the scan parameters needed for analysis from the raw data file
'''
try: # take infos raw data files (not avalable in old files)
flavor = opened_raw_data_file.root.configuration.miscellaneous[:][np.where(opened_raw_data_file.root.configuration.miscellaneous[:]['name'] == 'Flavor')]['value'][0]
self._settings_from_file_set = True
# adding this for special cases e.g., stop-mode scan
if "trig_count" in opened_raw_data_file.root.configuration.run_conf[:]['name']:
trig_count = opened_raw_data_file.root.configuration.run_conf[:][np.where(opened_raw_data_file.root.configuration.run_conf[:]['name'] == 'trig_count')]['value'][0]
else:
trig_count = opened_raw_data_file.root.configuration.global_register[:][np.where(opened_raw_data_file.root.configuration.global_register[:]['name'] == 'Trig_Count')]['value'][0]
vcal_c0 = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'Vcal_Coeff_0')]['value'][0]
vcal_c1 = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'Vcal_Coeff_1')]['value'][0]
c_low = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'C_Inj_Low')]['value'][0]
c_mid = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'C_Inj_Med')]['value'][0]
c_high = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'C_Inj_High')]['value'][0]
self.c_low_mask = opened_raw_data_file.root.configuration.C_Low[:]
self.c_high_mask = opened_raw_data_file.root.configuration.C_High[:]
self.fei4b = False if str(flavor) == 'fei4a' else True
self.trig_count = int(trig_count)
self.vcal_c0 = float(vcal_c0)
self.vcal_c1 = float(vcal_c1)
self.c_low = float(c_low)
self.c_mid = float(c_mid)
self.c_high = float(c_high)
self.n_injections = int(opened_raw_data_file.root.configuration.run_conf[:][np.where(opened_raw_data_file.root.configuration.run_conf[:]['name'] == 'n_injections')]['value'][0])
except tb.exceptions.NoSuchNodeError:
if not self._settings_from_file_set:
logging.warning('No settings stored in raw data file %s, use standard settings', opened_raw_data_file.filename)
else:
logging.info('No settings provided in raw data file %s, use already set settings', opened_raw_data_file.filename)
except IndexError: # happens if setting is not available (e.g. repeat_command)
pass | Tries to get the scan parameters needed for analysis from the raw data file | Below is the the instruction that describes the task:
### Input:
Tries to get the scan parameters needed for analysis from the raw data file
### Response:
def _deduce_settings_from_file(self, opened_raw_data_file): # TODO: parse better
'''Tries to get the scan parameters needed for analysis from the raw data file
'''
try: # take infos raw data files (not avalable in old files)
flavor = opened_raw_data_file.root.configuration.miscellaneous[:][np.where(opened_raw_data_file.root.configuration.miscellaneous[:]['name'] == 'Flavor')]['value'][0]
self._settings_from_file_set = True
# adding this for special cases e.g., stop-mode scan
if "trig_count" in opened_raw_data_file.root.configuration.run_conf[:]['name']:
trig_count = opened_raw_data_file.root.configuration.run_conf[:][np.where(opened_raw_data_file.root.configuration.run_conf[:]['name'] == 'trig_count')]['value'][0]
else:
trig_count = opened_raw_data_file.root.configuration.global_register[:][np.where(opened_raw_data_file.root.configuration.global_register[:]['name'] == 'Trig_Count')]['value'][0]
vcal_c0 = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'Vcal_Coeff_0')]['value'][0]
vcal_c1 = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'Vcal_Coeff_1')]['value'][0]
c_low = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'C_Inj_Low')]['value'][0]
c_mid = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'C_Inj_Med')]['value'][0]
c_high = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'C_Inj_High')]['value'][0]
self.c_low_mask = opened_raw_data_file.root.configuration.C_Low[:]
self.c_high_mask = opened_raw_data_file.root.configuration.C_High[:]
self.fei4b = False if str(flavor) == 'fei4a' else True
self.trig_count = int(trig_count)
self.vcal_c0 = float(vcal_c0)
self.vcal_c1 = float(vcal_c1)
self.c_low = float(c_low)
self.c_mid = float(c_mid)
self.c_high = float(c_high)
self.n_injections = int(opened_raw_data_file.root.configuration.run_conf[:][np.where(opened_raw_data_file.root.configuration.run_conf[:]['name'] == 'n_injections')]['value'][0])
except tb.exceptions.NoSuchNodeError:
if not self._settings_from_file_set:
logging.warning('No settings stored in raw data file %s, use standard settings', opened_raw_data_file.filename)
else:
logging.info('No settings provided in raw data file %s, use already set settings', opened_raw_data_file.filename)
except IndexError: # happens if setting is not available (e.g. repeat_command)
pass |
def unload(self, source: Source) -> None:
"""
Unloads a registered source, causing all of its associated bugs, tools,
and blueprints to also be unloaded. If the given source is not loaded,
this function will do nothing.
"""
logger.info('unloading source: %s', source.name)
try:
contents = self.contents(source)
del self.__contents[source.name]
del self.__sources[source.name]
for name in contents.bugs:
bug = self.__installation.bugs[name]
self.__installation.bugs.remove(bug)
for name in contents.blueprints:
blueprint = self.__installation.build[name]
self.__installation.build.remove(blueprint)
for name in contents.tools:
tool = self.__installation.tools[name]
self.__installation.tools.remove(tool)
except KeyError:
pass
logger.info('unloaded source: %s', source.name) | Unloads a registered source, causing all of its associated bugs, tools,
and blueprints to also be unloaded. If the given source is not loaded,
this function will do nothing. | Below is the the instruction that describes the task:
### Input:
Unloads a registered source, causing all of its associated bugs, tools,
and blueprints to also be unloaded. If the given source is not loaded,
this function will do nothing.
### Response:
def unload(self, source: Source) -> None:
"""
Unloads a registered source, causing all of its associated bugs, tools,
and blueprints to also be unloaded. If the given source is not loaded,
this function will do nothing.
"""
logger.info('unloading source: %s', source.name)
try:
contents = self.contents(source)
del self.__contents[source.name]
del self.__sources[source.name]
for name in contents.bugs:
bug = self.__installation.bugs[name]
self.__installation.bugs.remove(bug)
for name in contents.blueprints:
blueprint = self.__installation.build[name]
self.__installation.build.remove(blueprint)
for name in contents.tools:
tool = self.__installation.tools[name]
self.__installation.tools.remove(tool)
except KeyError:
pass
logger.info('unloaded source: %s', source.name) |
def _roll(a, shift):
"""
Roll 1D array elements. Improves the performance of numpy.roll() by reducing the overhead introduced from the
flexibility of the numpy.roll() method such as the support for rolling over multiple dimensions.
Elements that roll beyond the last position are re-introduced at the beginning. Similarly, elements that roll
back beyond the first position are re-introduced at the end (with negative shift).
Examples
--------
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=2)
>>> array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=-2)
>>> array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=12)
>>> array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
Benchmark
---------
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> %timeit _roll(x, shift=2)
>>> 1.89 µs ± 341 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> %timeit np.roll(x, shift=2)
>>> 11.4 µs ± 776 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
:param a: the input array
:type a: array_like
:param shift: the number of places by which elements are shifted
:type shift: int
:return: shifted array with the same shape as a
:return type: ndarray
"""
if not isinstance(a, np.ndarray):
a = np.asarray(a)
idx = shift % len(a)
return np.concatenate([a[-idx:], a[:-idx]]) | Roll 1D array elements. Improves the performance of numpy.roll() by reducing the overhead introduced from the
flexibility of the numpy.roll() method such as the support for rolling over multiple dimensions.
Elements that roll beyond the last position are re-introduced at the beginning. Similarly, elements that roll
back beyond the first position are re-introduced at the end (with negative shift).
Examples
--------
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=2)
>>> array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=-2)
>>> array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=12)
>>> array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
Benchmark
---------
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> %timeit _roll(x, shift=2)
>>> 1.89 µs ± 341 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> %timeit np.roll(x, shift=2)
>>> 11.4 µs ± 776 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
:param a: the input array
:type a: array_like
:param shift: the number of places by which elements are shifted
:type shift: int
:return: shifted array with the same shape as a
:return type: ndarray | Below is the the instruction that describes the task:
### Input:
Roll 1D array elements. Improves the performance of numpy.roll() by reducing the overhead introduced from the
flexibility of the numpy.roll() method such as the support for rolling over multiple dimensions.
Elements that roll beyond the last position are re-introduced at the beginning. Similarly, elements that roll
back beyond the first position are re-introduced at the end (with negative shift).
Examples
--------
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=2)
>>> array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=-2)
>>> array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=12)
>>> array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
Benchmark
---------
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> %timeit _roll(x, shift=2)
>>> 1.89 µs ± 341 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> %timeit np.roll(x, shift=2)
>>> 11.4 µs ± 776 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
:param a: the input array
:type a: array_like
:param shift: the number of places by which elements are shifted
:type shift: int
:return: shifted array with the same shape as a
:return type: ndarray
### Response:
def _roll(a, shift):
"""
Roll 1D array elements. Improves the performance of numpy.roll() by reducing the overhead introduced from the
flexibility of the numpy.roll() method such as the support for rolling over multiple dimensions.
Elements that roll beyond the last position are re-introduced at the beginning. Similarly, elements that roll
back beyond the first position are re-introduced at the end (with negative shift).
Examples
--------
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=2)
>>> array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=-2)
>>> array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> _roll(x, shift=12)
>>> array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
Benchmark
---------
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> %timeit _roll(x, shift=2)
>>> 1.89 µs ± 341 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> %timeit np.roll(x, shift=2)
>>> 11.4 µs ± 776 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
:param a: the input array
:type a: array_like
:param shift: the number of places by which elements are shifted
:type shift: int
:return: shifted array with the same shape as a
:return type: ndarray
"""
if not isinstance(a, np.ndarray):
a = np.asarray(a)
idx = shift % len(a)
return np.concatenate([a[-idx:], a[:-idx]]) |
def _in_tag(self, tagname, attributes=None):
"""
Determine if we are already in a certain tag.
If we give attributes, make sure they match.
"""
node = self.cur_node
while not node is None:
if node.tag == tagname:
if attributes and node.attrib == attributes:
return True
elif attributes:
return False
return True
node = node.getparent()
return False | Determine if we are already in a certain tag.
If we give attributes, make sure they match. | Below is the the instruction that describes the task:
### Input:
Determine if we are already in a certain tag.
If we give attributes, make sure they match.
### Response:
def _in_tag(self, tagname, attributes=None):
"""
Determine if we are already in a certain tag.
If we give attributes, make sure they match.
"""
node = self.cur_node
while not node is None:
if node.tag == tagname:
if attributes and node.attrib == attributes:
return True
elif attributes:
return False
return True
node = node.getparent()
return False |
def populateViewTree(self, view):
'''
Populates the View tree.
'''
vuid = view.getUniqueId()
text = view.__smallStr__()
if view.getParent() is None:
self.viewTree.insert('', Tkinter.END, vuid, text=text)
else:
self.viewTree.insert(view.getParent().getUniqueId(), Tkinter.END, vuid, text=text, tags=('ttk'))
self.viewTree.set(vuid, 'T', '*' if view.isTarget() else ' ')
self.viewTree.tag_bind('ttk', '<1>', self.viewTreeItemClicked) | Populates the View tree. | Below is the the instruction that describes the task:
### Input:
Populates the View tree.
### Response:
def populateViewTree(self, view):
'''
Populates the View tree.
'''
vuid = view.getUniqueId()
text = view.__smallStr__()
if view.getParent() is None:
self.viewTree.insert('', Tkinter.END, vuid, text=text)
else:
self.viewTree.insert(view.getParent().getUniqueId(), Tkinter.END, vuid, text=text, tags=('ttk'))
self.viewTree.set(vuid, 'T', '*' if view.isTarget() else ' ')
self.viewTree.tag_bind('ttk', '<1>', self.viewTreeItemClicked) |
def get_route(self, route_id):
"""
Gets specified route.
Will be detail-level if owned by authenticated user; otherwise summary-level.
https://strava.github.io/api/v3/routes/#retreive
:param route_id: The ID of route to fetch.
:type route_id: int
:rtype: :class:`stravalib.model.Route`
"""
raw = self.protocol.get('/routes/{id}', id=route_id)
return model.Route.deserialize(raw, bind_client=self) | Gets specified route.
Will be detail-level if owned by authenticated user; otherwise summary-level.
https://strava.github.io/api/v3/routes/#retreive
:param route_id: The ID of route to fetch.
:type route_id: int
:rtype: :class:`stravalib.model.Route` | Below is the the instruction that describes the task:
### Input:
Gets specified route.
Will be detail-level if owned by authenticated user; otherwise summary-level.
https://strava.github.io/api/v3/routes/#retreive
:param route_id: The ID of route to fetch.
:type route_id: int
:rtype: :class:`stravalib.model.Route`
### Response:
def get_route(self, route_id):
"""
Gets specified route.
Will be detail-level if owned by authenticated user; otherwise summary-level.
https://strava.github.io/api/v3/routes/#retreive
:param route_id: The ID of route to fetch.
:type route_id: int
:rtype: :class:`stravalib.model.Route`
"""
raw = self.protocol.get('/routes/{id}', id=route_id)
return model.Route.deserialize(raw, bind_client=self) |
def nlmsg_convert(hdr):
"""Convert a Netlink message received from a Netlink socket to an nl_msg.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L382
Allocates a new Netlink message and copies all of the data in `hdr` into the new message object.
Positional arguments:
hdr -- Netlink message received from netlink socket (nlmsghdr class instance).
Returns:
Newly allocated Netlink message (nl_msg class instance) or None.
"""
nm = nlmsg_alloc(hdr.nlmsg_len)
if not nm:
return None
nm.nm_nlh.bytearray = hdr.bytearray.copy()[:hdr.nlmsg_len]
return nm | Convert a Netlink message received from a Netlink socket to an nl_msg.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L382
Allocates a new Netlink message and copies all of the data in `hdr` into the new message object.
Positional arguments:
hdr -- Netlink message received from netlink socket (nlmsghdr class instance).
Returns:
Newly allocated Netlink message (nl_msg class instance) or None. | Below is the the instruction that describes the task:
### Input:
Convert a Netlink message received from a Netlink socket to an nl_msg.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L382
Allocates a new Netlink message and copies all of the data in `hdr` into the new message object.
Positional arguments:
hdr -- Netlink message received from netlink socket (nlmsghdr class instance).
Returns:
Newly allocated Netlink message (nl_msg class instance) or None.
### Response:
def nlmsg_convert(hdr):
"""Convert a Netlink message received from a Netlink socket to an nl_msg.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L382
Allocates a new Netlink message and copies all of the data in `hdr` into the new message object.
Positional arguments:
hdr -- Netlink message received from netlink socket (nlmsghdr class instance).
Returns:
Newly allocated Netlink message (nl_msg class instance) or None.
"""
nm = nlmsg_alloc(hdr.nlmsg_len)
if not nm:
return None
nm.nm_nlh.bytearray = hdr.bytearray.copy()[:hdr.nlmsg_len]
return nm |
def _get_proposed_values(self):
"""
Method to perform time splitting using leapfrog
"""
# Take half step in time for updating momentum
momentum_bar = self.momentum + 0.5 * self.stepsize * self.grad_log_position
# Take full step in time for updating position position
position_bar = self.position + self.stepsize * momentum_bar
grad_log, _ = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf()
# Take remaining half step in time for updating momentum
momentum_bar = momentum_bar + 0.5 * self.stepsize * grad_log
return position_bar, momentum_bar, grad_log | Method to perform time splitting using leapfrog | Below is the the instruction that describes the task:
### Input:
Method to perform time splitting using leapfrog
### Response:
def _get_proposed_values(self):
"""
Method to perform time splitting using leapfrog
"""
# Take half step in time for updating momentum
momentum_bar = self.momentum + 0.5 * self.stepsize * self.grad_log_position
# Take full step in time for updating position position
position_bar = self.position + self.stepsize * momentum_bar
grad_log, _ = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf()
# Take remaining half step in time for updating momentum
momentum_bar = momentum_bar + 0.5 * self.stepsize * grad_log
return position_bar, momentum_bar, grad_log |
def vertices_per_edge(self):
"""Returns an Ex2 array of adjacencies between vertices, where
each element in the array is a vertex index. Each edge is included
only once. Edges that are not shared by 2 faces are not included."""
import numpy as np
return np.asarray([vertices_in_common(e[0], e[1]) for e in self.f[self.faces_per_edge]]) | Returns an Ex2 array of adjacencies between vertices, where
each element in the array is a vertex index. Each edge is included
only once. Edges that are not shared by 2 faces are not included. | Below is the the instruction that describes the task:
### Input:
Returns an Ex2 array of adjacencies between vertices, where
each element in the array is a vertex index. Each edge is included
only once. Edges that are not shared by 2 faces are not included.
### Response:
def vertices_per_edge(self):
"""Returns an Ex2 array of adjacencies between vertices, where
each element in the array is a vertex index. Each edge is included
only once. Edges that are not shared by 2 faces are not included."""
import numpy as np
return np.asarray([vertices_in_common(e[0], e[1]) for e in self.f[self.faces_per_edge]]) |
def from_xdr_object(cls, tx_xdr_object):
"""Create a :class:`Transaction` object from a Transaction XDR
object.
"""
source = encode_check('account', tx_xdr_object.sourceAccount.ed25519)
sequence = tx_xdr_object.seqNum - 1
time_bounds_in_xdr = tx_xdr_object.timeBounds # TODO test
if time_bounds_in_xdr:
time_bounds = {
'maxTime': time_bounds_in_xdr[0].maxTime,
'minTime': time_bounds_in_xdr[0].minTime
}
else:
time_bounds = None
memo = xdr_to_memo(tx_xdr_object.memo)
operations = list(map(
Operation.from_xdr_object, tx_xdr_object.operations
))
return cls(
source=source,
sequence=sequence,
time_bounds=time_bounds,
memo=memo,
fee=tx_xdr_object.fee,
operations=operations) | Create a :class:`Transaction` object from a Transaction XDR
object. | Below is the the instruction that describes the task:
### Input:
Create a :class:`Transaction` object from a Transaction XDR
object.
### Response:
def from_xdr_object(cls, tx_xdr_object):
"""Create a :class:`Transaction` object from a Transaction XDR
object.
"""
source = encode_check('account', tx_xdr_object.sourceAccount.ed25519)
sequence = tx_xdr_object.seqNum - 1
time_bounds_in_xdr = tx_xdr_object.timeBounds # TODO test
if time_bounds_in_xdr:
time_bounds = {
'maxTime': time_bounds_in_xdr[0].maxTime,
'minTime': time_bounds_in_xdr[0].minTime
}
else:
time_bounds = None
memo = xdr_to_memo(tx_xdr_object.memo)
operations = list(map(
Operation.from_xdr_object, tx_xdr_object.operations
))
return cls(
source=source,
sequence=sequence,
time_bounds=time_bounds,
memo=memo,
fee=tx_xdr_object.fee,
operations=operations) |
def E(self,*args,**kwargs):
"""
NAME:
E
PURPOSE:
calculate the energy
INPUT:
t - (optional) time at which to get the energy (can be Quantity)
pot= Potential instance or list of such instances
vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity)
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
energy
HISTORY:
2010-09-15 - Written - Bovy (NYU)
"""
if not kwargs.get('pot',None) is None: kwargs['pot']= flatten_potential(kwargs.get('pot'))
_check_consistent_units(self,kwargs.get('pot',None))
return self._orb.E(*args,**kwargs) | NAME:
E
PURPOSE:
calculate the energy
INPUT:
t - (optional) time at which to get the energy (can be Quantity)
pot= Potential instance or list of such instances
vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity)
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
energy
HISTORY:
2010-09-15 - Written - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
E
PURPOSE:
calculate the energy
INPUT:
t - (optional) time at which to get the energy (can be Quantity)
pot= Potential instance or list of such instances
vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity)
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
energy
HISTORY:
2010-09-15 - Written - Bovy (NYU)
### Response:
def E(self,*args,**kwargs):
"""
NAME:
E
PURPOSE:
calculate the energy
INPUT:
t - (optional) time at which to get the energy (can be Quantity)
pot= Potential instance or list of such instances
vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity)
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
energy
HISTORY:
2010-09-15 - Written - Bovy (NYU)
"""
if not kwargs.get('pot',None) is None: kwargs['pot']= flatten_potential(kwargs.get('pot'))
_check_consistent_units(self,kwargs.get('pot',None))
return self._orb.E(*args,**kwargs) |
def get_item_metric_pair(item_lst, metric_lst, id_lst):
"""
align bleu and specific score in item_lst, reconstruct the data as (rank_score, bleu) pairs, query_dic.
Detail:
query dict is input parameter used by metrics: top-x-bleu, kendall-tau
query dict is reconstructed dict type data container,
query dict's key is qid and value is list type, whose elements are tuple eg: count of words, bleu score pairs
:param item_lst: the score value lst that used to rank candidates
:param metric_lst: the metric value aligned with item_lst
:return: query_dic
"""
query_dic = {} # key is qid, value is list, whose elements are tuple eg: count of words, bleu score pairs
for index in range(len(metric_lst)):
current_id = id_lst[index]
current_bleu = metric_lst[index]
current_rank_score = item_lst[index]
if current_id in query_dic:
query_dic[current_id].append((current_rank_score, current_bleu))
else:
query_dic[current_id] = []
query_dic[current_id].append((current_rank_score, current_bleu))
return query_dic | align bleu and specific score in item_lst, reconstruct the data as (rank_score, bleu) pairs, query_dic.
Detail:
query dict is input parameter used by metrics: top-x-bleu, kendall-tau
query dict is reconstructed dict type data container,
query dict's key is qid and value is list type, whose elements are tuple eg: count of words, bleu score pairs
:param item_lst: the score value lst that used to rank candidates
:param metric_lst: the metric value aligned with item_lst
:return: query_dic | Below is the the instruction that describes the task:
### Input:
align bleu and specific score in item_lst, reconstruct the data as (rank_score, bleu) pairs, query_dic.
Detail:
query dict is input parameter used by metrics: top-x-bleu, kendall-tau
query dict is reconstructed dict type data container,
query dict's key is qid and value is list type, whose elements are tuple eg: count of words, bleu score pairs
:param item_lst: the score value lst that used to rank candidates
:param metric_lst: the metric value aligned with item_lst
:return: query_dic
### Response:
def get_item_metric_pair(item_lst, metric_lst, id_lst):
"""
align bleu and specific score in item_lst, reconstruct the data as (rank_score, bleu) pairs, query_dic.
Detail:
query dict is input parameter used by metrics: top-x-bleu, kendall-tau
query dict is reconstructed dict type data container,
query dict's key is qid and value is list type, whose elements are tuple eg: count of words, bleu score pairs
:param item_lst: the score value lst that used to rank candidates
:param metric_lst: the metric value aligned with item_lst
:return: query_dic
"""
query_dic = {} # key is qid, value is list, whose elements are tuple eg: count of words, bleu score pairs
for index in range(len(metric_lst)):
current_id = id_lst[index]
current_bleu = metric_lst[index]
current_rank_score = item_lst[index]
if current_id in query_dic:
query_dic[current_id].append((current_rank_score, current_bleu))
else:
query_dic[current_id] = []
query_dic[current_id].append((current_rank_score, current_bleu))
return query_dic |
def _handle_parameter(self, default):
"""Handle a case where a parameter is at the head of the tokens.
*default* is the value to use if no parameter name is defined.
"""
key = None
showkey = False
self._push()
while self._tokens:
token = self._tokens.pop()
if isinstance(token, tokens.TemplateParamEquals):
key = self._pop()
showkey = True
self._push()
elif isinstance(token, (tokens.TemplateParamSeparator,
tokens.TemplateClose)):
self._tokens.append(token)
value = self._pop()
if key is None:
key = Wikicode(SmartList([Text(str(default))]))
return Parameter(key, value, showkey)
else:
self._write(self._handle_token(token))
raise ParserError("_handle_parameter() missed a close token") | Handle a case where a parameter is at the head of the tokens.
*default* is the value to use if no parameter name is defined. | Below is the the instruction that describes the task:
### Input:
Handle a case where a parameter is at the head of the tokens.
*default* is the value to use if no parameter name is defined.
### Response:
def _handle_parameter(self, default):
"""Handle a case where a parameter is at the head of the tokens.
*default* is the value to use if no parameter name is defined.
"""
key = None
showkey = False
self._push()
while self._tokens:
token = self._tokens.pop()
if isinstance(token, tokens.TemplateParamEquals):
key = self._pop()
showkey = True
self._push()
elif isinstance(token, (tokens.TemplateParamSeparator,
tokens.TemplateClose)):
self._tokens.append(token)
value = self._pop()
if key is None:
key = Wikicode(SmartList([Text(str(default))]))
return Parameter(key, value, showkey)
else:
self._write(self._handle_token(token))
raise ParserError("_handle_parameter() missed a close token") |
def _bnd(self, xloc, left, right, cache):
"""
Distribution bounds.
Example:
>>> print(chaospy.Uniform().range([-2, 0, 2, 4]))
[[0. 0. 0. 0.]
[1. 1. 1. 1.]]
>>> print(chaospy.Trunc(chaospy.Uniform(), 0.6).range([-2, 0, 2, 4]))
[[0. 0. 0. 0. ]
[0.6 0.6 0.6 0.6]]
>>> print(chaospy.Trunc(0.4, chaospy.Uniform()).range([-2, 0, 2, 4]))
[[0.4 0.4 0.4 0.4]
[1. 1. 1. 1. ]]
"""
if isinstance(left, Dist):
if left in cache:
left = cache[left]
else:
left = evaluation.evaluate_bound(left, xloc, cache=cache)
else:
left = (numpy.array(left).T * numpy.ones((2,)+xloc.shape).T).T
if isinstance(right, Dist):
if right in cache:
right = cache[right]
else:
right = evaluation.evaluate_bound(right, xloc, cache=cache)
else:
right = (numpy.array(right).T * numpy.ones((2,)+xloc.shape).T).T
return left[0], right[1] | Distribution bounds.
Example:
>>> print(chaospy.Uniform().range([-2, 0, 2, 4]))
[[0. 0. 0. 0.]
[1. 1. 1. 1.]]
>>> print(chaospy.Trunc(chaospy.Uniform(), 0.6).range([-2, 0, 2, 4]))
[[0. 0. 0. 0. ]
[0.6 0.6 0.6 0.6]]
>>> print(chaospy.Trunc(0.4, chaospy.Uniform()).range([-2, 0, 2, 4]))
[[0.4 0.4 0.4 0.4]
[1. 1. 1. 1. ]] | Below is the the instruction that describes the task:
### Input:
Distribution bounds.
Example:
>>> print(chaospy.Uniform().range([-2, 0, 2, 4]))
[[0. 0. 0. 0.]
[1. 1. 1. 1.]]
>>> print(chaospy.Trunc(chaospy.Uniform(), 0.6).range([-2, 0, 2, 4]))
[[0. 0. 0. 0. ]
[0.6 0.6 0.6 0.6]]
>>> print(chaospy.Trunc(0.4, chaospy.Uniform()).range([-2, 0, 2, 4]))
[[0.4 0.4 0.4 0.4]
[1. 1. 1. 1. ]]
### Response:
def _bnd(self, xloc, left, right, cache):
"""
Distribution bounds.
Example:
>>> print(chaospy.Uniform().range([-2, 0, 2, 4]))
[[0. 0. 0. 0.]
[1. 1. 1. 1.]]
>>> print(chaospy.Trunc(chaospy.Uniform(), 0.6).range([-2, 0, 2, 4]))
[[0. 0. 0. 0. ]
[0.6 0.6 0.6 0.6]]
>>> print(chaospy.Trunc(0.4, chaospy.Uniform()).range([-2, 0, 2, 4]))
[[0.4 0.4 0.4 0.4]
[1. 1. 1. 1. ]]
"""
if isinstance(left, Dist):
if left in cache:
left = cache[left]
else:
left = evaluation.evaluate_bound(left, xloc, cache=cache)
else:
left = (numpy.array(left).T * numpy.ones((2,)+xloc.shape).T).T
if isinstance(right, Dist):
if right in cache:
right = cache[right]
else:
right = evaluation.evaluate_bound(right, xloc, cache=cache)
else:
right = (numpy.array(right).T * numpy.ones((2,)+xloc.shape).T).T
return left[0], right[1] |
def allowMethodWithConditions(self, verb, resource, conditions):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition"""
self._addMethod("Allow", verb, resource, conditions) | Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition | Below is the the instruction that describes the task:
### Input:
Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition
### Response:
def allowMethodWithConditions(self, verb, resource, conditions):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition"""
self._addMethod("Allow", verb, resource, conditions) |
def _get_request_fields_from_parent(self):
"""Get request fields from the parent serializer."""
if not self.parent:
return None
if not getattr(self.parent, 'request_fields'):
return None
if not isinstance(self.parent.request_fields, dict):
return None
return self.parent.request_fields.get(self.field_name) | Get request fields from the parent serializer. | Below is the the instruction that describes the task:
### Input:
Get request fields from the parent serializer.
### Response:
def _get_request_fields_from_parent(self):
"""Get request fields from the parent serializer."""
if not self.parent:
return None
if not getattr(self.parent, 'request_fields'):
return None
if not isinstance(self.parent.request_fields, dict):
return None
return self.parent.request_fields.get(self.field_name) |
def send(self, sender: PytgbotApiBot):
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:rtype: PytgbotApiMessage
"""
return sender.send_video(
# receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id
video=self.video, chat_id=self.receiver, reply_to_message_id=self.reply_id, duration=self.duration, width=self.width, height=self.height, thumb=self.thumb, caption=self.caption, parse_mode=self.parse_mode, supports_streaming=self.supports_streaming, disable_notification=self.disable_notification, reply_markup=self.reply_markup
) | Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:rtype: PytgbotApiMessage | Below is the the instruction that describes the task:
### Input:
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:rtype: PytgbotApiMessage
### Response:
def send(self, sender: PytgbotApiBot):
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:rtype: PytgbotApiMessage
"""
return sender.send_video(
# receiver, self.media, disable_notification=self.disable_notification, reply_to_message_id=reply_id
video=self.video, chat_id=self.receiver, reply_to_message_id=self.reply_id, duration=self.duration, width=self.width, height=self.height, thumb=self.thumb, caption=self.caption, parse_mode=self.parse_mode, supports_streaming=self.supports_streaming, disable_notification=self.disable_notification, reply_markup=self.reply_markup
) |
def _handle_api(self, handler, handler_args, handler_kwargs):
""" Handle call to subclasses and convert the output to an appropriate value """
try:
status_code, return_value = handler(*handler_args, **handler_kwargs)
except APIError as error:
return error.send()
web.ctx.status = _convert_http_status(status_code)
return _api_convert_output(return_value) | Handle call to subclasses and convert the output to an appropriate value | Below is the the instruction that describes the task:
### Input:
Handle call to subclasses and convert the output to an appropriate value
### Response:
def _handle_api(self, handler, handler_args, handler_kwargs):
""" Handle call to subclasses and convert the output to an appropriate value """
try:
status_code, return_value = handler(*handler_args, **handler_kwargs)
except APIError as error:
return error.send()
web.ctx.status = _convert_http_status(status_code)
return _api_convert_output(return_value) |
def _binop_handler(nodetype):
"""
Factory function for binary operator handlers.
"""
def _handler(toplevel, stack_builders):
right = make_expr(stack_builders)
left = make_expr(stack_builders)
return ast.BinOp(left=left, op=nodetype(), right=right)
return _handler | Factory function for binary operator handlers. | Below is the the instruction that describes the task:
### Input:
Factory function for binary operator handlers.
### Response:
def _binop_handler(nodetype):
"""
Factory function for binary operator handlers.
"""
def _handler(toplevel, stack_builders):
right = make_expr(stack_builders)
left = make_expr(stack_builders)
return ast.BinOp(left=left, op=nodetype(), right=right)
return _handler |
def _update_prx(self):
"""Update `prx` from `phi`, `pi_codon`, and `beta`."""
qx = scipy.ones(N_CODON, dtype='float')
for j in range(3):
for w in range(N_NT):
qx[CODON_NT[j][w]] *= self.phi[w]
frx = self.pi_codon**self.beta
self.prx = frx * qx
with scipy.errstate(divide='raise', under='raise', over='raise',
invalid='raise'):
for r in range(self.nsites):
self.prx[r] /= self.prx[r].sum() | Update `prx` from `phi`, `pi_codon`, and `beta`. | Below is the the instruction that describes the task:
### Input:
Update `prx` from `phi`, `pi_codon`, and `beta`.
### Response:
def _update_prx(self):
"""Update `prx` from `phi`, `pi_codon`, and `beta`."""
qx = scipy.ones(N_CODON, dtype='float')
for j in range(3):
for w in range(N_NT):
qx[CODON_NT[j][w]] *= self.phi[w]
frx = self.pi_codon**self.beta
self.prx = frx * qx
with scipy.errstate(divide='raise', under='raise', over='raise',
invalid='raise'):
for r in range(self.nsites):
self.prx[r] /= self.prx[r].sum() |
def initSchd_1_to_4(self):
""" Initialize first tariff schedule :class:`~ekmmeters.SerialBlock`. """
self.m_schd_1_to_4["reserved_40"] = [6, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_schd_1_to_4["Schedule_1_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_41"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_42"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_43"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_44"] = [79, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["crc16"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False]
pass | Initialize first tariff schedule :class:`~ekmmeters.SerialBlock`. | Below is the the instruction that describes the task:
### Input:
Initialize first tariff schedule :class:`~ekmmeters.SerialBlock`.
### Response:
def initSchd_1_to_4(self):
""" Initialize first tariff schedule :class:`~ekmmeters.SerialBlock`. """
self.m_schd_1_to_4["reserved_40"] = [6, FieldType.Hex, ScaleType.No, "", 0, False, False]
self.m_schd_1_to_4["Schedule_1_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_1_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_41"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_2_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_42"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_3_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_43"] = [24, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_1_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_1_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_1_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_2_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_2_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_2_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_3_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_3_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_3_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_4_Hour"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_4_Min"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["Schedule_4_Period_4_Tariff"] = [2, FieldType.Int, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["reserved_44"] = [79, FieldType.Hex, ScaleType.No, "", 0, False, True]
self.m_schd_1_to_4["crc16"] = [2, FieldType.Hex, ScaleType.No, "", 0, False, False]
pass |
def embedded_tweet(self):
"""
Get the retweeted Tweet OR the quoted Tweet and return it as a Tweet object
Returns:
Tweet (or None, if the Tweet is neither a quote tweet or a Retweet):
a Tweet representing the quote Tweet or the Retweet
(see tweet_embeds.get_embedded_tweet, this is that value as a Tweet)
Raises:
NotATweetError: if embedded tweet is malformed
"""
embedded_tweet = tweet_embeds.get_embedded_tweet(self)
if embedded_tweet is not None:
try:
return Tweet(embedded_tweet)
except NotATweetError as nate:
raise(NotATweetError("The embedded tweet payload {} appears malformed." +
" Failed with '{}'".format(embedded_tweet, nate)))
else:
return None | Get the retweeted Tweet OR the quoted Tweet and return it as a Tweet object
Returns:
Tweet (or None, if the Tweet is neither a quote tweet or a Retweet):
a Tweet representing the quote Tweet or the Retweet
(see tweet_embeds.get_embedded_tweet, this is that value as a Tweet)
Raises:
NotATweetError: if embedded tweet is malformed | Below is the the instruction that describes the task:
### Input:
Get the retweeted Tweet OR the quoted Tweet and return it as a Tweet object
Returns:
Tweet (or None, if the Tweet is neither a quote tweet or a Retweet):
a Tweet representing the quote Tweet or the Retweet
(see tweet_embeds.get_embedded_tweet, this is that value as a Tweet)
Raises:
NotATweetError: if embedded tweet is malformed
### Response:
def embedded_tweet(self):
"""
Get the retweeted Tweet OR the quoted Tweet and return it as a Tweet object
Returns:
Tweet (or None, if the Tweet is neither a quote tweet or a Retweet):
a Tweet representing the quote Tweet or the Retweet
(see tweet_embeds.get_embedded_tweet, this is that value as a Tweet)
Raises:
NotATweetError: if embedded tweet is malformed
"""
embedded_tweet = tweet_embeds.get_embedded_tweet(self)
if embedded_tweet is not None:
try:
return Tweet(embedded_tweet)
except NotATweetError as nate:
raise(NotATweetError("The embedded tweet payload {} appears malformed." +
" Failed with '{}'".format(embedded_tweet, nate)))
else:
return None |
def reinitialize_all_clients(self):
"""
Send a new initialize message to each LSP server when the project
path has changed so they can update the respective server root paths.
"""
for language in self.clients:
language_client = self.clients[language]
if language_client['status'] == self.RUNNING:
folder = self.get_root_path(language)
instance = language_client['instance']
instance.folder = folder
instance.initialize() | Send a new initialize message to each LSP server when the project
path has changed so they can update the respective server root paths. | Below is the the instruction that describes the task:
### Input:
Send a new initialize message to each LSP server when the project
path has changed so they can update the respective server root paths.
### Response:
def reinitialize_all_clients(self):
"""
Send a new initialize message to each LSP server when the project
path has changed so they can update the respective server root paths.
"""
for language in self.clients:
language_client = self.clients[language]
if language_client['status'] == self.RUNNING:
folder = self.get_root_path(language)
instance = language_client['instance']
instance.folder = folder
instance.initialize() |
def parse(self, tokens):
"""Sequence(a) -> b
Applies the parser to a sequence of tokens producing a parsing result.
It provides a way to invoke a parser hiding details related to the
parser state. Also it makes error messages more readable by specifying
the position of the rightmost token that has been reached.
"""
try:
(tree, _) = self.run(tokens, State())
return tree
except NoParseError, e:
max = e.state.max
if len(tokens) > max:
tok = tokens[max]
else:
tok = u'<EOF>'
raise NoParseError(u'%s: %s' % (e.msg, tok), e.state) | Sequence(a) -> b
Applies the parser to a sequence of tokens producing a parsing result.
It provides a way to invoke a parser hiding details related to the
parser state. Also it makes error messages more readable by specifying
the position of the rightmost token that has been reached. | Below is the the instruction that describes the task:
### Input:
Sequence(a) -> b
Applies the parser to a sequence of tokens producing a parsing result.
It provides a way to invoke a parser hiding details related to the
parser state. Also it makes error messages more readable by specifying
the position of the rightmost token that has been reached.
### Response:
def parse(self, tokens):
"""Sequence(a) -> b
Applies the parser to a sequence of tokens producing a parsing result.
It provides a way to invoke a parser hiding details related to the
parser state. Also it makes error messages more readable by specifying
the position of the rightmost token that has been reached.
"""
try:
(tree, _) = self.run(tokens, State())
return tree
except NoParseError, e:
max = e.state.max
if len(tokens) > max:
tok = tokens[max]
else:
tok = u'<EOF>'
raise NoParseError(u'%s: %s' % (e.msg, tok), e.state) |
def initialize(self, gyro_rate, slices=None, skip_estimation=False):
"""Prepare calibrator for calibration
This method does three things:
1. Create slices from the video stream, if not already provided
2. Estimate time offset
3. Estimate rotation between camera and gyroscope
Parameters
------------------
gyro_rate : float
Estimated gyroscope sample rate
slices : list of Slice, optional
Slices to use for optimization
skip_estimation : bool
Do not estimate initial time offset and rotation.
Raises
--------------------
InitializationError
If the initialization fails
"""
self.params['user']['gyro_rate'] = gyro_rate
for p in ('gbias_x', 'gbias_y', 'gbias_z'):
self.params['initialized'][p] = 0.0
if slices is not None:
self.slices = slices
if self.slices is None:
self.slices = videoslice.Slice.from_stream_randomly(self.video)
logger.debug("Number of slices: {:d}".format(len(self.slices)))
if len(self.slices) < 2:
logger.error("Calibration requires at least 2 video slices to proceed, got %d", len(self.slices))
raise InitializationError("Calibration requires at least 2 video slices to proceed, got {:d}".format(len(self.slices)))
if not skip_estimation:
time_offset = self.find_initial_offset()
# TODO: Detect when time offset initialization fails, and raise InitializationError
R = self.find_initial_rotation()
if R is None:
raise InitializationError("Failed to calculate initial rotation") | Prepare calibrator for calibration
This method does three things:
1. Create slices from the video stream, if not already provided
2. Estimate time offset
3. Estimate rotation between camera and gyroscope
Parameters
------------------
gyro_rate : float
Estimated gyroscope sample rate
slices : list of Slice, optional
Slices to use for optimization
skip_estimation : bool
Do not estimate initial time offset and rotation.
Raises
--------------------
InitializationError
If the initialization fails | Below is the the instruction that describes the task:
### Input:
Prepare calibrator for calibration
This method does three things:
1. Create slices from the video stream, if not already provided
2. Estimate time offset
3. Estimate rotation between camera and gyroscope
Parameters
------------------
gyro_rate : float
Estimated gyroscope sample rate
slices : list of Slice, optional
Slices to use for optimization
skip_estimation : bool
Do not estimate initial time offset and rotation.
Raises
--------------------
InitializationError
If the initialization fails
### Response:
def initialize(self, gyro_rate, slices=None, skip_estimation=False):
"""Prepare calibrator for calibration
This method does three things:
1. Create slices from the video stream, if not already provided
2. Estimate time offset
3. Estimate rotation between camera and gyroscope
Parameters
------------------
gyro_rate : float
Estimated gyroscope sample rate
slices : list of Slice, optional
Slices to use for optimization
skip_estimation : bool
Do not estimate initial time offset and rotation.
Raises
--------------------
InitializationError
If the initialization fails
"""
self.params['user']['gyro_rate'] = gyro_rate
for p in ('gbias_x', 'gbias_y', 'gbias_z'):
self.params['initialized'][p] = 0.0
if slices is not None:
self.slices = slices
if self.slices is None:
self.slices = videoslice.Slice.from_stream_randomly(self.video)
logger.debug("Number of slices: {:d}".format(len(self.slices)))
if len(self.slices) < 2:
logger.error("Calibration requires at least 2 video slices to proceed, got %d", len(self.slices))
raise InitializationError("Calibration requires at least 2 video slices to proceed, got {:d}".format(len(self.slices)))
if not skip_estimation:
time_offset = self.find_initial_offset()
# TODO: Detect when time offset initialization fails, and raise InitializationError
R = self.find_initial_rotation()
if R is None:
raise InitializationError("Failed to calculate initial rotation") |
def highshelf(self, gain=-20.0, frequency=3000, slope=0.5):
"""highshelf takes 3 parameters: a signed number for gain or
attenuation in dB, filter frequency in Hz and slope (default=0.5).
Beware of clipping when using positive gain.
"""
self.command.append('treble')
self.command.append(gain)
self.command.append(frequency)
self.command.append(slope)
return self | highshelf takes 3 parameters: a signed number for gain or
attenuation in dB, filter frequency in Hz and slope (default=0.5).
Beware of clipping when using positive gain. | Below is the the instruction that describes the task:
### Input:
highshelf takes 3 parameters: a signed number for gain or
attenuation in dB, filter frequency in Hz and slope (default=0.5).
Beware of clipping when using positive gain.
### Response:
def highshelf(self, gain=-20.0, frequency=3000, slope=0.5):
"""highshelf takes 3 parameters: a signed number for gain or
attenuation in dB, filter frequency in Hz and slope (default=0.5).
Beware of clipping when using positive gain.
"""
self.command.append('treble')
self.command.append(gain)
self.command.append(frequency)
self.command.append(slope)
return self |
def wr_txt_gos(self, fout_txt, **kws_usr):
"""Write an Excel spreadsheet with user GO ids, grouped under broader GO terms."""
# Keyword arguments: control content: hdrgo_prt section_prt top_n use_sections
desc2nts = self.sortobj.get_desc2nts(**kws_usr)
# Keyword arguments: control txt format
self.wr_txt_nts(fout_txt, desc2nts)
return desc2nts | Write an Excel spreadsheet with user GO ids, grouped under broader GO terms. | Below is the the instruction that describes the task:
### Input:
Write an Excel spreadsheet with user GO ids, grouped under broader GO terms.
### Response:
def wr_txt_gos(self, fout_txt, **kws_usr):
"""Write an Excel spreadsheet with user GO ids, grouped under broader GO terms."""
# Keyword arguments: control content: hdrgo_prt section_prt top_n use_sections
desc2nts = self.sortobj.get_desc2nts(**kws_usr)
# Keyword arguments: control txt format
self.wr_txt_nts(fout_txt, desc2nts)
return desc2nts |
def deploy_image(self, vcenter_data_model, image_params, logger):
"""
Receives ovf image parameters and deploy it on the designated vcenter
:param VMwarevCenterResourceModel vcenter_data_model:
:type image_params: vCenterShell.vm.ovf_image_params.OvfImageParams
:param logger:
"""
ovf_tool_exe_path = vcenter_data_model.ovf_tool_path
self._validate_url_exists(ovf_tool_exe_path, 'OVF Tool', logger)
args = self._get_args(ovf_tool_exe_path, image_params, logger)
logger.debug('opening ovf tool process with the params: {0}'.format(','.join(args)))
process = subprocess.Popen(args, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
logger.debug('communicating with ovf tool')
result = process.communicate()
process.stdin.close()
if result:
res = '\n\r'.join(result)
else:
if image_params.user_arguments.find('--quiet') == -1:
raise Exception('no result has return from the ovftool')
res = COMPLETED_SUCCESSFULLY
logger.info('communication with ovf tool results: {0}'.format(res))
if res.find(COMPLETED_SUCCESSFULLY) > -1:
return True
image_params.connectivity.password = '******'
args_for_error = ' '.join(self._get_args(ovf_tool_exe_path, image_params, logger))
logger.error('error deploying image with the args: {0}, error: {1}'.format(args_for_error, res))
raise Exception('error deploying image with the args: {0}, error: {1}'.format(args_for_error, res)) | Receives ovf image parameters and deploy it on the designated vcenter
:param VMwarevCenterResourceModel vcenter_data_model:
:type image_params: vCenterShell.vm.ovf_image_params.OvfImageParams
:param logger: | Below is the the instruction that describes the task:
### Input:
Receives ovf image parameters and deploy it on the designated vcenter
:param VMwarevCenterResourceModel vcenter_data_model:
:type image_params: vCenterShell.vm.ovf_image_params.OvfImageParams
:param logger:
### Response:
def deploy_image(self, vcenter_data_model, image_params, logger):
"""
Receives ovf image parameters and deploy it on the designated vcenter
:param VMwarevCenterResourceModel vcenter_data_model:
:type image_params: vCenterShell.vm.ovf_image_params.OvfImageParams
:param logger:
"""
ovf_tool_exe_path = vcenter_data_model.ovf_tool_path
self._validate_url_exists(ovf_tool_exe_path, 'OVF Tool', logger)
args = self._get_args(ovf_tool_exe_path, image_params, logger)
logger.debug('opening ovf tool process with the params: {0}'.format(','.join(args)))
process = subprocess.Popen(args, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
logger.debug('communicating with ovf tool')
result = process.communicate()
process.stdin.close()
if result:
res = '\n\r'.join(result)
else:
if image_params.user_arguments.find('--quiet') == -1:
raise Exception('no result has return from the ovftool')
res = COMPLETED_SUCCESSFULLY
logger.info('communication with ovf tool results: {0}'.format(res))
if res.find(COMPLETED_SUCCESSFULLY) > -1:
return True
image_params.connectivity.password = '******'
args_for_error = ' '.join(self._get_args(ovf_tool_exe_path, image_params, logger))
logger.error('error deploying image with the args: {0}, error: {1}'.format(args_for_error, res))
raise Exception('error deploying image with the args: {0}, error: {1}'.format(args_for_error, res)) |
def _init_code(self, code: int) -> None:
""" Initialize from an int terminal code. """
if -1 < code < 256:
self.code = '{:02}'.format(code)
self.hexval = term2hex(code)
self.rgb = hex2rgb(self.hexval)
else:
raise ValueError(' '.join((
'Code must be in the range 0-255, inclusive.',
'Got: {} ({})'
)).format(code, getattr(code, '__name__', type(code).__name__))) | Initialize from an int terminal code. | Below is the the instruction that describes the task:
### Input:
Initialize from an int terminal code.
### Response:
def _init_code(self, code: int) -> None:
""" Initialize from an int terminal code. """
if -1 < code < 256:
self.code = '{:02}'.format(code)
self.hexval = term2hex(code)
self.rgb = hex2rgb(self.hexval)
else:
raise ValueError(' '.join((
'Code must be in the range 0-255, inclusive.',
'Got: {} ({})'
)).format(code, getattr(code, '__name__', type(code).__name__))) |
def src_process(state, input_data, output_data, ratio, end_of_input=0):
"""Standard processing function.
Returns non zero on error.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
data.end_of_input = end_of_input
error = _lib.src_process(state, data)
return error, data.input_frames_used, data.output_frames_gen | Standard processing function.
Returns non zero on error. | Below is the the instruction that describes the task:
### Input:
Standard processing function.
Returns non zero on error.
### Response:
def src_process(state, input_data, output_data, ratio, end_of_input=0):
"""Standard processing function.
Returns non zero on error.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
data.end_of_input = end_of_input
error = _lib.src_process(state, data)
return error, data.input_frames_used, data.output_frames_gen |
def func_from_string(callable_str):
"""Return a live function from a full dotted path. Must be either a plain function
directly in a module, a class function, or a static function. (No modules, classes,
or instance methods, since those can't be called as tasks.)"""
components = callable_str.split('.')
func = None
if len(components) < 2:
raise ValueError("Need full dotted path to task function")
elif len(components) == 2:
mod_name = components[0]
func_name = components[1]
try:
mod = import_module(mod_name)
except ModuleNotFoundError:
raise ValueError(f"Module {mod_name} not found")
func = get_member(mod, func_name)
if func is None:
raise ValueError(f"{func_name} is not a member of {mod_name}")
else:
mod_name = '.'.join(components[:-1])
func_name = components[-1]
try:
mod = import_module(mod_name)
except ModuleNotFoundError:
mod_name = '.'.join(components[:-2])
class_name = components[-2]
try:
mod = import_module(mod_name)
except ModuleNotFoundError:
raise ValueError(f"Module {mod_name} not found")
klass = get_member(mod, class_name)
if klass is None:
raise ValueError(f"Class {class_name} is not a member of {mod_name}")
func = get_member(klass, func_name)
if func is None:
raise ValueError(f"Function {func_name} is not a member of {mod_name}.{class_name}")
if func is None:
func = get_member(mod, func_name)
if func is None:
raise ValueError(f"Function {func_name} is not a member of {mod_name}")
if inspect.ismodule(func):
raise ValueError("Cannot call module directly")
if inspect.isclass(func):
raise ValueError("Cannot call class directly")
try:
sig = [x for x in inspect.signature(func).parameters]
except TypeError:
raise ValueError(f"{callable_str} ({str(type(func))[1:-1]}) is not a callable object")
if len(sig) == 1:
if sig[0] == 'message':
return func
else:
raise ValueError("Task function must have one parameter, named 'message'")
elif len(sig)==2 and sig[0]=='self' and sig[1]=='message':
# We only check for the conventional 'self', but if you're using something else,
# you deserve the pain you'll have trying to debug this.
raise ValueError("Can't call instance method without an instance! (Try sisy.models.task_with_callable)")
else:
raise ValueError("Improper signature for task function (needs only 'message')") | Return a live function from a full dotted path. Must be either a plain function
directly in a module, a class function, or a static function. (No modules, classes,
or instance methods, since those can't be called as tasks.) | Below is the the instruction that describes the task:
### Input:
Return a live function from a full dotted path. Must be either a plain function
directly in a module, a class function, or a static function. (No modules, classes,
or instance methods, since those can't be called as tasks.)
### Response:
def func_from_string(callable_str):
"""Return a live function from a full dotted path. Must be either a plain function
directly in a module, a class function, or a static function. (No modules, classes,
or instance methods, since those can't be called as tasks.)"""
components = callable_str.split('.')
func = None
if len(components) < 2:
raise ValueError("Need full dotted path to task function")
elif len(components) == 2:
mod_name = components[0]
func_name = components[1]
try:
mod = import_module(mod_name)
except ModuleNotFoundError:
raise ValueError(f"Module {mod_name} not found")
func = get_member(mod, func_name)
if func is None:
raise ValueError(f"{func_name} is not a member of {mod_name}")
else:
mod_name = '.'.join(components[:-1])
func_name = components[-1]
try:
mod = import_module(mod_name)
except ModuleNotFoundError:
mod_name = '.'.join(components[:-2])
class_name = components[-2]
try:
mod = import_module(mod_name)
except ModuleNotFoundError:
raise ValueError(f"Module {mod_name} not found")
klass = get_member(mod, class_name)
if klass is None:
raise ValueError(f"Class {class_name} is not a member of {mod_name}")
func = get_member(klass, func_name)
if func is None:
raise ValueError(f"Function {func_name} is not a member of {mod_name}.{class_name}")
if func is None:
func = get_member(mod, func_name)
if func is None:
raise ValueError(f"Function {func_name} is not a member of {mod_name}")
if inspect.ismodule(func):
raise ValueError("Cannot call module directly")
if inspect.isclass(func):
raise ValueError("Cannot call class directly")
try:
sig = [x for x in inspect.signature(func).parameters]
except TypeError:
raise ValueError(f"{callable_str} ({str(type(func))[1:-1]}) is not a callable object")
if len(sig) == 1:
if sig[0] == 'message':
return func
else:
raise ValueError("Task function must have one parameter, named 'message'")
elif len(sig)==2 and sig[0]=='self' and sig[1]=='message':
# We only check for the conventional 'self', but if you're using something else,
# you deserve the pain you'll have trying to debug this.
raise ValueError("Can't call instance method without an instance! (Try sisy.models.task_with_callable)")
else:
raise ValueError("Improper signature for task function (needs only 'message')") |
def _bgzip_from_fastq(data):
"""Prepare a bgzipped file from a fastq input, potentially gzipped (or bgzipped already).
"""
in_file = data["in_file"]
if isinstance(in_file, (list, tuple)):
in_file = in_file[0]
needs_convert = dd.get_quality_format(data).lower() == "illumina"
# special case, empty files that have been cleaned
if not objectstore.is_remote(in_file) and os.path.getsize(in_file) == 0:
needs_bgzip, needs_gunzip = False, False
elif in_file.endswith(".gz") and not objectstore.is_remote(in_file):
if needs_convert or dd.get_trim_ends(data):
needs_bgzip, needs_gunzip = True, True
else:
needs_bgzip, needs_gunzip = _check_gzipped_input(in_file, data)
elif in_file.endswith(".bz2"):
needs_bgzip, needs_gunzip = True, True
elif objectstore.is_remote(in_file) and not tz.get_in(["config", "algorithm", "align_split_size"], data):
needs_bgzip, needs_gunzip = False, False
else:
needs_bgzip, needs_gunzip = True, False
work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "align_prep"))
if (needs_bgzip or needs_gunzip or needs_convert or dd.get_trim_ends(data) or
objectstore.is_remote(in_file) or
(isinstance(data["in_file"], (tuple, list)) and len(data["in_file"]) > 1)):
out_file = _bgzip_file(data["in_file"], data["config"], work_dir,
needs_bgzip, needs_gunzip, needs_convert, data)
else:
out_file = os.path.join(work_dir, "%s_%s" % (dd.get_sample_name(data), os.path.basename(in_file)))
out_file = _symlink_or_copy_grabix(in_file, out_file, data)
return out_file | Prepare a bgzipped file from a fastq input, potentially gzipped (or bgzipped already). | Below is the the instruction that describes the task:
### Input:
Prepare a bgzipped file from a fastq input, potentially gzipped (or bgzipped already).
### Response:
def _bgzip_from_fastq(data):
"""Prepare a bgzipped file from a fastq input, potentially gzipped (or bgzipped already).
"""
in_file = data["in_file"]
if isinstance(in_file, (list, tuple)):
in_file = in_file[0]
needs_convert = dd.get_quality_format(data).lower() == "illumina"
# special case, empty files that have been cleaned
if not objectstore.is_remote(in_file) and os.path.getsize(in_file) == 0:
needs_bgzip, needs_gunzip = False, False
elif in_file.endswith(".gz") and not objectstore.is_remote(in_file):
if needs_convert or dd.get_trim_ends(data):
needs_bgzip, needs_gunzip = True, True
else:
needs_bgzip, needs_gunzip = _check_gzipped_input(in_file, data)
elif in_file.endswith(".bz2"):
needs_bgzip, needs_gunzip = True, True
elif objectstore.is_remote(in_file) and not tz.get_in(["config", "algorithm", "align_split_size"], data):
needs_bgzip, needs_gunzip = False, False
else:
needs_bgzip, needs_gunzip = True, False
work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "align_prep"))
if (needs_bgzip or needs_gunzip or needs_convert or dd.get_trim_ends(data) or
objectstore.is_remote(in_file) or
(isinstance(data["in_file"], (tuple, list)) and len(data["in_file"]) > 1)):
out_file = _bgzip_file(data["in_file"], data["config"], work_dir,
needs_bgzip, needs_gunzip, needs_convert, data)
else:
out_file = os.path.join(work_dir, "%s_%s" % (dd.get_sample_name(data), os.path.basename(in_file)))
out_file = _symlink_or_copy_grabix(in_file, out_file, data)
return out_file |
def new(self):
# type: () -> None
'''
A method to create a new UDF Logical Volume Descriptor.
Parameters:
None.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Descriptor already initialized')
self.desc_tag = UDFTag()
self.desc_tag.new(6) # FIXME: we should let the user set serial_number
self.vol_desc_seqnum = 3
self.desc_char_set = _unicodecharset()
self.logical_vol_ident = _ostaunicode_zero_pad('CDROM', 128)
self.domain_ident = UDFEntityID()
self.domain_ident.new(0, b'*OSTA UDF Compliant', b'\x02\x01\x03')
self.logical_volume_contents_use = UDFLongAD()
self.logical_volume_contents_use.new(4096, 0)
self.impl_ident = UDFEntityID()
self.impl_ident.new(0, b'*pycdlib')
self.implementation_use = b'\x00' * 128 # FIXME: let the user set this
self.integrity_sequence_length = 4096
self.integrity_sequence_extent = 0 # This will get set later
self.partition_map = UDFPartitionMap()
self.partition_map.new()
self._initialized = True | A method to create a new UDF Logical Volume Descriptor.
Parameters:
None.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
A method to create a new UDF Logical Volume Descriptor.
Parameters:
None.
Returns:
Nothing.
### Response:
def new(self):
# type: () -> None
'''
A method to create a new UDF Logical Volume Descriptor.
Parameters:
None.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Descriptor already initialized')
self.desc_tag = UDFTag()
self.desc_tag.new(6) # FIXME: we should let the user set serial_number
self.vol_desc_seqnum = 3
self.desc_char_set = _unicodecharset()
self.logical_vol_ident = _ostaunicode_zero_pad('CDROM', 128)
self.domain_ident = UDFEntityID()
self.domain_ident.new(0, b'*OSTA UDF Compliant', b'\x02\x01\x03')
self.logical_volume_contents_use = UDFLongAD()
self.logical_volume_contents_use.new(4096, 0)
self.impl_ident = UDFEntityID()
self.impl_ident.new(0, b'*pycdlib')
self.implementation_use = b'\x00' * 128 # FIXME: let the user set this
self.integrity_sequence_length = 4096
self.integrity_sequence_extent = 0 # This will get set later
self.partition_map = UDFPartitionMap()
self.partition_map.new()
self._initialized = True |
def handler(self, event, context):
"""
An AWS Lambda function which parses specific API Gateway input into a
WSGI request, feeds it to our WSGI app, procceses the response, and returns
that back to the API Gateway.
"""
settings = self.settings
# If in DEBUG mode, log all raw incoming events.
if settings.DEBUG:
logger.debug('Zappa Event: {}'.format(event))
# Set any API Gateway defined Stage Variables
# as env vars
if event.get('stageVariables'):
for key in event['stageVariables'].keys():
os.environ[str(key)] = event['stageVariables'][key]
# This is the result of a keep alive, recertify
# or scheduled event.
if event.get('detail-type') == u'Scheduled Event':
whole_function = event['resources'][0].split('/')[-1].split('-')[-1]
# This is a scheduled function.
if '.' in whole_function:
app_function = self.import_module_and_get_function(whole_function)
# Execute the function!
return self.run_function(app_function, event, context)
# Else, let this execute as it were.
# This is a direct command invocation.
elif event.get('command', None):
whole_function = event['command']
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
print("Result of %s:" % whole_function)
print(result)
return result
# This is a direct, raw python invocation.
# It's _extremely_ important we don't allow this event source
# to be overridden by unsanitized, non-admin user input.
elif event.get('raw_command', None):
raw_command = event['raw_command']
exec(raw_command)
return
# This is a Django management command invocation.
elif event.get('manage', None):
from django.core import management
try: # Support both for tests
from zappa.ext.django_zappa import get_django_wsgi
except ImportError as e: # pragma: no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
# We don't actually need the function,
# but we do need to do all of the required setup for it.
app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS)
# Couldn't figure out how to get the value into stdout with StringIO..
# Read the log for now. :[]
management.call_command(*event['manage'].split(' '))
return {}
# This is an AWS-event triggered invocation.
elif event.get('Records', None):
records = event.get('Records')
result = None
whole_function = self.get_function_for_aws_event(records[0])
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# this is an AWS-event triggered from Lex bot's intent
elif event.get('bot'):
result = None
whole_function = self.get_function_from_bot_intent_trigger(event)
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# This is an API Gateway authorizer event
elif event.get('type') == u'TOKEN':
whole_function = self.settings.AUTHORIZER_FUNCTION
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
policy = self.run_function(app_function, event, context)
return policy
else:
logger.error("Cannot find a function to process the authorization request.")
raise Exception('Unauthorized')
# This is an AWS Cognito Trigger Event
elif event.get('triggerSource', None):
triggerSource = event.get('triggerSource')
whole_function = self.get_function_for_cognito_trigger(triggerSource)
result = event
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to handle cognito trigger {}".format(triggerSource))
return result
# Normal web app flow
try:
# Timing
time_start = datetime.datetime.now()
# This is a normal HTTP request
if event.get('httpMethod', None):
script_name = ''
is_elb_context = False
headers = merge_headers(event)
if event.get('requestContext', None) and event['requestContext'].get('elb', None):
# Related: https://github.com/Miserlou/Zappa/issues/1715
# inputs/outputs for lambda loadbalancer
# https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html
is_elb_context = True
# host is lower-case when forwarded from ELB
host = headers.get('host')
# TODO: pathParameters is a first-class citizen in apigateway but not available without
# some parsing work for ELB (is this parameter used for anything?)
event['pathParameters'] = ''
else:
if headers:
host = headers.get('Host')
else:
host = None
logger.debug('host found: [{}]'.format(host))
if host:
if 'amazonaws.com' in host:
logger.debug('amazonaws found in host')
# The path provided in th event doesn't include the
# stage, so we must tell Flask to include the API
# stage in the url it calculates. See https://github.com/Miserlou/Zappa/issues/1014
script_name = '/' + settings.API_STAGE
else:
# This is a test request sent from the AWS console
if settings.DOMAIN:
# Assume the requests received will be on the specified
# domain. No special handling is required
pass
else:
# Assume the requests received will be to the
# amazonaws.com endpoint, so tell Flask to include the
# API stage
script_name = '/' + settings.API_STAGE
base_path = getattr(settings, 'BASE_PATH', None)
# Create the environment for WSGI and handle the request
environ = create_wsgi_request(
event,
script_name=script_name,
base_path=base_path,
trailing_slash=self.trailing_slash,
binary_support=settings.BINARY_SUPPORT,
context_header_mappings=settings.CONTEXT_HEADER_MAPPINGS
)
# We are always on https on Lambda, so tell our wsgi app that.
environ['HTTPS'] = 'on'
environ['wsgi.url_scheme'] = 'https'
environ['lambda.context'] = context
environ['lambda.event'] = event
# Execute the application
with Response.from_app(self.wsgi_app, environ) as response:
# This is the object we're going to return.
# Pack the WSGI response into our special dictionary.
zappa_returndict = dict()
# Issue #1715: ALB support. ALB responses must always include
# base64 encoding and status description
if is_elb_context:
zappa_returndict.setdefault('isBase64Encoded', False)
zappa_returndict.setdefault('statusDescription', response.status)
if response.data:
if settings.BINARY_SUPPORT:
if not response.mimetype.startswith("text/") \
or response.mimetype != "application/json":
zappa_returndict['body'] = base64.b64encode(response.data).decode('utf-8')
zappa_returndict["isBase64Encoded"] = True
else:
zappa_returndict['body'] = response.data
else:
zappa_returndict['body'] = response.get_data(as_text=True)
zappa_returndict['statusCode'] = response.status_code
if 'headers' in event:
zappa_returndict['headers'] = {}
for key, value in response.headers:
zappa_returndict['headers'][key] = value
if 'multiValueHeaders' in event:
zappa_returndict['multiValueHeaders'] = {}
for key, value in response.headers:
zappa_returndict['multiValueHeaders'][key] = response.headers.getlist(key)
# Calculate the total response time,
# and log it in the Common Log format.
time_end = datetime.datetime.now()
delta = time_end - time_start
response_time_ms = delta.total_seconds() * 1000
response.content = response.data
common_log(environ, response, response_time=response_time_ms)
return zappa_returndict
except Exception as e: # pragma: no cover
# Print statements are visible in the logs either way
print(e)
exc_info = sys.exc_info()
message = ('An uncaught exception happened while servicing this request. '
'You can investigate this with the `zappa tail` command.')
# If we didn't even build an app_module, just raise.
if not settings.DJANGO_SETTINGS:
try:
self.app_module
except NameError as ne:
message = 'Failed to import module: {}'.format(ne.message)
# Call exception handler for unhandled exceptions
exception_handler = self.settings.EXCEPTION_HANDLER
self._process_exception(exception_handler=exception_handler,
event=event, context=context, exception=e)
# Return this unspecified exception as a 500, using template that API Gateway expects.
content = collections.OrderedDict()
content['statusCode'] = 500
body = {'message': message}
if settings.DEBUG: # only include traceback if debug is on.
body['traceback'] = traceback.format_exception(*exc_info) # traceback as a list for readability.
content['body'] = json.dumps(str(body), sort_keys=True, indent=4)
return content | An AWS Lambda function which parses specific API Gateway input into a
WSGI request, feeds it to our WSGI app, procceses the response, and returns
that back to the API Gateway. | Below is the the instruction that describes the task:
### Input:
An AWS Lambda function which parses specific API Gateway input into a
WSGI request, feeds it to our WSGI app, procceses the response, and returns
that back to the API Gateway.
### Response:
def handler(self, event, context):
"""
An AWS Lambda function which parses specific API Gateway input into a
WSGI request, feeds it to our WSGI app, procceses the response, and returns
that back to the API Gateway.
"""
settings = self.settings
# If in DEBUG mode, log all raw incoming events.
if settings.DEBUG:
logger.debug('Zappa Event: {}'.format(event))
# Set any API Gateway defined Stage Variables
# as env vars
if event.get('stageVariables'):
for key in event['stageVariables'].keys():
os.environ[str(key)] = event['stageVariables'][key]
# This is the result of a keep alive, recertify
# or scheduled event.
if event.get('detail-type') == u'Scheduled Event':
whole_function = event['resources'][0].split('/')[-1].split('-')[-1]
# This is a scheduled function.
if '.' in whole_function:
app_function = self.import_module_and_get_function(whole_function)
# Execute the function!
return self.run_function(app_function, event, context)
# Else, let this execute as it were.
# This is a direct command invocation.
elif event.get('command', None):
whole_function = event['command']
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
print("Result of %s:" % whole_function)
print(result)
return result
# This is a direct, raw python invocation.
# It's _extremely_ important we don't allow this event source
# to be overridden by unsanitized, non-admin user input.
elif event.get('raw_command', None):
raw_command = event['raw_command']
exec(raw_command)
return
# This is a Django management command invocation.
elif event.get('manage', None):
from django.core import management
try: # Support both for tests
from zappa.ext.django_zappa import get_django_wsgi
except ImportError as e: # pragma: no cover
from django_zappa_app import get_django_wsgi
# Get the Django WSGI app from our extension
# We don't actually need the function,
# but we do need to do all of the required setup for it.
app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS)
# Couldn't figure out how to get the value into stdout with StringIO..
# Read the log for now. :[]
management.call_command(*event['manage'].split(' '))
return {}
# This is an AWS-event triggered invocation.
elif event.get('Records', None):
records = event.get('Records')
result = None
whole_function = self.get_function_for_aws_event(records[0])
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# this is an AWS-event triggered from Lex bot's intent
elif event.get('bot'):
result = None
whole_function = self.get_function_from_bot_intent_trigger(event)
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to process the triggered event.")
return result
# This is an API Gateway authorizer event
elif event.get('type') == u'TOKEN':
whole_function = self.settings.AUTHORIZER_FUNCTION
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
policy = self.run_function(app_function, event, context)
return policy
else:
logger.error("Cannot find a function to process the authorization request.")
raise Exception('Unauthorized')
# This is an AWS Cognito Trigger Event
elif event.get('triggerSource', None):
triggerSource = event.get('triggerSource')
whole_function = self.get_function_for_cognito_trigger(triggerSource)
result = event
if whole_function:
app_function = self.import_module_and_get_function(whole_function)
result = self.run_function(app_function, event, context)
logger.debug(result)
else:
logger.error("Cannot find a function to handle cognito trigger {}".format(triggerSource))
return result
# Normal web app flow
try:
# Timing
time_start = datetime.datetime.now()
# This is a normal HTTP request
if event.get('httpMethod', None):
script_name = ''
is_elb_context = False
headers = merge_headers(event)
if event.get('requestContext', None) and event['requestContext'].get('elb', None):
# Related: https://github.com/Miserlou/Zappa/issues/1715
# inputs/outputs for lambda loadbalancer
# https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html
is_elb_context = True
# host is lower-case when forwarded from ELB
host = headers.get('host')
# TODO: pathParameters is a first-class citizen in apigateway but not available without
# some parsing work for ELB (is this parameter used for anything?)
event['pathParameters'] = ''
else:
if headers:
host = headers.get('Host')
else:
host = None
logger.debug('host found: [{}]'.format(host))
if host:
if 'amazonaws.com' in host:
logger.debug('amazonaws found in host')
# The path provided in th event doesn't include the
# stage, so we must tell Flask to include the API
# stage in the url it calculates. See https://github.com/Miserlou/Zappa/issues/1014
script_name = '/' + settings.API_STAGE
else:
# This is a test request sent from the AWS console
if settings.DOMAIN:
# Assume the requests received will be on the specified
# domain. No special handling is required
pass
else:
# Assume the requests received will be to the
# amazonaws.com endpoint, so tell Flask to include the
# API stage
script_name = '/' + settings.API_STAGE
base_path = getattr(settings, 'BASE_PATH', None)
# Create the environment for WSGI and handle the request
environ = create_wsgi_request(
event,
script_name=script_name,
base_path=base_path,
trailing_slash=self.trailing_slash,
binary_support=settings.BINARY_SUPPORT,
context_header_mappings=settings.CONTEXT_HEADER_MAPPINGS
)
# We are always on https on Lambda, so tell our wsgi app that.
environ['HTTPS'] = 'on'
environ['wsgi.url_scheme'] = 'https'
environ['lambda.context'] = context
environ['lambda.event'] = event
# Execute the application
with Response.from_app(self.wsgi_app, environ) as response:
# This is the object we're going to return.
# Pack the WSGI response into our special dictionary.
zappa_returndict = dict()
# Issue #1715: ALB support. ALB responses must always include
# base64 encoding and status description
if is_elb_context:
zappa_returndict.setdefault('isBase64Encoded', False)
zappa_returndict.setdefault('statusDescription', response.status)
if response.data:
if settings.BINARY_SUPPORT:
if not response.mimetype.startswith("text/") \
or response.mimetype != "application/json":
zappa_returndict['body'] = base64.b64encode(response.data).decode('utf-8')
zappa_returndict["isBase64Encoded"] = True
else:
zappa_returndict['body'] = response.data
else:
zappa_returndict['body'] = response.get_data(as_text=True)
zappa_returndict['statusCode'] = response.status_code
if 'headers' in event:
zappa_returndict['headers'] = {}
for key, value in response.headers:
zappa_returndict['headers'][key] = value
if 'multiValueHeaders' in event:
zappa_returndict['multiValueHeaders'] = {}
for key, value in response.headers:
zappa_returndict['multiValueHeaders'][key] = response.headers.getlist(key)
# Calculate the total response time,
# and log it in the Common Log format.
time_end = datetime.datetime.now()
delta = time_end - time_start
response_time_ms = delta.total_seconds() * 1000
response.content = response.data
common_log(environ, response, response_time=response_time_ms)
return zappa_returndict
except Exception as e: # pragma: no cover
# Print statements are visible in the logs either way
print(e)
exc_info = sys.exc_info()
message = ('An uncaught exception happened while servicing this request. '
'You can investigate this with the `zappa tail` command.')
# If we didn't even build an app_module, just raise.
if not settings.DJANGO_SETTINGS:
try:
self.app_module
except NameError as ne:
message = 'Failed to import module: {}'.format(ne.message)
# Call exception handler for unhandled exceptions
exception_handler = self.settings.EXCEPTION_HANDLER
self._process_exception(exception_handler=exception_handler,
event=event, context=context, exception=e)
# Return this unspecified exception as a 500, using template that API Gateway expects.
content = collections.OrderedDict()
content['statusCode'] = 500
body = {'message': message}
if settings.DEBUG: # only include traceback if debug is on.
body['traceback'] = traceback.format_exception(*exc_info) # traceback as a list for readability.
content['body'] = json.dumps(str(body), sort_keys=True, indent=4)
return content |
def _parse_header(self, data):
"""Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
"""
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list | Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list | Below is the the instruction that describes the task:
### Input:
Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
### Response:
def _parse_header(self, data):
"""Parse header (xheader or yheader)
:param data: data to be parsed
:type data: str
:return: list with header's data
:rtype: list
"""
return_list = []
headers = data.split(':')
for header in headers:
header = re.split(' IN ', header, flags=re.I) # ignore case
xheader = {'name': header[0].strip()}
if len(header) > 1:
xheader['units'] = header[1].strip()
return_list.append(xheader)
return return_list |
def guess_locktime(redeem_script):
'''
str -> int
If OP_CLTV is used, guess an appropriate lock_time
Otherwise return 0 (no lock time)
Fails if there's not a constant before OP_CLTV
'''
try:
script_array = redeem_script.split()
loc = script_array.index('OP_CHECKLOCKTIMEVERIFY')
return int(script_array[loc - 1], 16)
except ValueError:
return 0 | str -> int
If OP_CLTV is used, guess an appropriate lock_time
Otherwise return 0 (no lock time)
Fails if there's not a constant before OP_CLTV | Below is the the instruction that describes the task:
### Input:
str -> int
If OP_CLTV is used, guess an appropriate lock_time
Otherwise return 0 (no lock time)
Fails if there's not a constant before OP_CLTV
### Response:
def guess_locktime(redeem_script):
'''
str -> int
If OP_CLTV is used, guess an appropriate lock_time
Otherwise return 0 (no lock time)
Fails if there's not a constant before OP_CLTV
'''
try:
script_array = redeem_script.split()
loc = script_array.index('OP_CHECKLOCKTIMEVERIFY')
return int(script_array[loc - 1], 16)
except ValueError:
return 0 |
def process_event(self, event, hover_focus):
"""
Process any input event.
:param event: The event that was triggered.
:param hover_focus: Whether to trigger focus change on mouse moves.
:returns: None if the Effect processed the event, else the original event.
"""
# Check whether this Layout is read-only - i.e. has no active focus.
if self._live_col < 0 or self._live_widget < 0:
# Might just be that we've unset the focus - so check we can't find a focus.
self._find_next_widget(1)
if self._live_col < 0 or self._live_widget < 0:
return event
# Give the active widget the first refusal for this event.
event = self._columns[
self._live_col][self._live_widget].process_event(event)
# Check for any movement keys if the widget refused them.
if event is not None:
if isinstance(event, KeyboardEvent):
if event.key_code == Screen.KEY_TAB:
# Move on to next widget, unless it is the last in the
# Layout.
self._columns[self._live_col][self._live_widget].blur()
self._find_next_widget(1)
if self._live_col >= len(self._columns):
self._live_col = 0
self._live_widget = -1
self._find_next_widget(1)
return event
# If we got here, we still should have the focus.
self._columns[self._live_col][self._live_widget].focus()
event = None
elif event.key_code == Screen.KEY_BACK_TAB:
# Move on to previous widget, unless it is the first in the
# Layout.
self._columns[self._live_col][self._live_widget].blur()
self._find_next_widget(-1)
if self._live_col < 0:
self._live_col = len(self._columns) - 1
self._live_widget = len(self._columns[self._live_col])
self._find_next_widget(-1)
return event
# If we got here, we still should have the focus.
self._columns[self._live_col][self._live_widget].focus()
event = None
elif event.key_code == Screen.KEY_DOWN:
# Move on to next widget in this column
wid = self._live_widget
self._columns[self._live_col][self._live_widget].blur()
self._find_next_widget(1, stay_in_col=True)
self._columns[self._live_col][self._live_widget].focus()
# Don't swallow the event if it had no effect.
event = event if wid == self._live_widget else None
elif event.key_code == Screen.KEY_UP:
# Move on to previous widget, unless it is the first in the
# Layout.
wid = self._live_widget
self._columns[self._live_col][self._live_widget].blur()
self._find_next_widget(-1, stay_in_col=True)
self._columns[self._live_col][self._live_widget].focus()
# Don't swallow the event if it had no effect.
event = event if wid == self._live_widget else None
elif event.key_code == Screen.KEY_LEFT:
# Move on to last widget in the previous column
self._columns[self._live_col][self._live_widget].blur()
self._find_next_widget(-1, start_at=0, wrap=True)
self._columns[self._live_col][self._live_widget].focus()
event = None
elif event.key_code == Screen.KEY_RIGHT:
# Move on to first widget in the next column.
self._columns[self._live_col][self._live_widget].blur()
self._find_next_widget(
1,
start_at=len(self._columns[self._live_col]),
wrap=True)
self._columns[self._live_col][self._live_widget].focus()
event = None
elif isinstance(event, MouseEvent):
logger.debug("Check layout: %d, %d", event.x, event.y)
if ((hover_focus and event.buttons >= 0) or
event.buttons > 0):
# Mouse click - look to move focus.
for i, column in enumerate(self._columns):
for j, widget in enumerate(column):
if widget.is_mouse_over(event):
self._frame.switch_focus(self, i, j)
widget.process_event(event)
return None
return event | Process any input event.
:param event: The event that was triggered.
:param hover_focus: Whether to trigger focus change on mouse moves.
:returns: None if the Effect processed the event, else the original event. | Below is the the instruction that describes the task:
### Input:
Process any input event.
:param event: The event that was triggered.
:param hover_focus: Whether to trigger focus change on mouse moves.
:returns: None if the Effect processed the event, else the original event.
### Response:
def process_event(self, event, hover_focus):
"""
Process any input event.
:param event: The event that was triggered.
:param hover_focus: Whether to trigger focus change on mouse moves.
:returns: None if the Effect processed the event, else the original event.
"""
# Check whether this Layout is read-only - i.e. has no active focus.
if self._live_col < 0 or self._live_widget < 0:
# Might just be that we've unset the focus - so check we can't find a focus.
self._find_next_widget(1)
if self._live_col < 0 or self._live_widget < 0:
return event
# Give the active widget the first refusal for this event.
event = self._columns[
self._live_col][self._live_widget].process_event(event)
# Check for any movement keys if the widget refused them.
if event is not None:
if isinstance(event, KeyboardEvent):
if event.key_code == Screen.KEY_TAB:
# Move on to next widget, unless it is the last in the
# Layout.
self._columns[self._live_col][self._live_widget].blur()
self._find_next_widget(1)
if self._live_col >= len(self._columns):
self._live_col = 0
self._live_widget = -1
self._find_next_widget(1)
return event
# If we got here, we still should have the focus.
self._columns[self._live_col][self._live_widget].focus()
event = None
elif event.key_code == Screen.KEY_BACK_TAB:
# Move on to previous widget, unless it is the first in the
# Layout.
self._columns[self._live_col][self._live_widget].blur()
self._find_next_widget(-1)
if self._live_col < 0:
self._live_col = len(self._columns) - 1
self._live_widget = len(self._columns[self._live_col])
self._find_next_widget(-1)
return event
# If we got here, we still should have the focus.
self._columns[self._live_col][self._live_widget].focus()
event = None
elif event.key_code == Screen.KEY_DOWN:
# Move on to next widget in this column
wid = self._live_widget
self._columns[self._live_col][self._live_widget].blur()
self._find_next_widget(1, stay_in_col=True)
self._columns[self._live_col][self._live_widget].focus()
# Don't swallow the event if it had no effect.
event = event if wid == self._live_widget else None
elif event.key_code == Screen.KEY_UP:
# Move on to previous widget, unless it is the first in the
# Layout.
wid = self._live_widget
self._columns[self._live_col][self._live_widget].blur()
self._find_next_widget(-1, stay_in_col=True)
self._columns[self._live_col][self._live_widget].focus()
# Don't swallow the event if it had no effect.
event = event if wid == self._live_widget else None
elif event.key_code == Screen.KEY_LEFT:
# Move on to last widget in the previous column
self._columns[self._live_col][self._live_widget].blur()
self._find_next_widget(-1, start_at=0, wrap=True)
self._columns[self._live_col][self._live_widget].focus()
event = None
elif event.key_code == Screen.KEY_RIGHT:
# Move on to first widget in the next column.
self._columns[self._live_col][self._live_widget].blur()
self._find_next_widget(
1,
start_at=len(self._columns[self._live_col]),
wrap=True)
self._columns[self._live_col][self._live_widget].focus()
event = None
elif isinstance(event, MouseEvent):
logger.debug("Check layout: %d, %d", event.x, event.y)
if ((hover_focus and event.buttons >= 0) or
event.buttons > 0):
# Mouse click - look to move focus.
for i, column in enumerate(self._columns):
for j, widget in enumerate(column):
if widget.is_mouse_over(event):
self._frame.switch_focus(self, i, j)
widget.process_event(event)
return None
return event |
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out | Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator. | Below is the the instruction that describes the task:
### Input:
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
### Response:
def scalars_for_mapping_ion_drifts(glats, glons, alts, dates, step_size=None,
max_steps=None, e_field_scaling_only=False):
"""
Calculates scalars for translating ion motions at position
glat, glon, and alt, for date, to the footpoints of the field line
as well as at the magnetic equator.
All inputs are assumed to be 1D arrays.
Note
----
Directions refer to the ion motion direction e.g. the zonal
scalar applies to zonal ion motions (meridional E field assuming ExB ion motion)
Parameters
----------
glats : list-like of floats (degrees)
Geodetic (WGS84) latitude
glons : list-like of floats (degrees)
Geodetic (WGS84) longitude
alts : list-like of floats (km)
Geodetic (WGS84) altitude, height above surface
dates : list-like of datetimes
Date and time for determination of scalars
e_field_scaling_only : boolean (False)
If True, method only calculates the electric field scalar, ignoring
changes in magnitude of B. Note ion velocity related to E/B.
Returns
-------
dict
array-like of scalars for translating ion drifts. Keys are,
'north_zonal_drifts_scalar', 'north_mer_drifts_scalar', and similarly
for southern locations. 'equator_mer_drifts_scalar' and
'equator_zonal_drifts_scalar' cover the mappings to the equator.
"""
if step_size is None:
step_size = 100.
if max_steps is None:
max_steps = 1000
steps = np.arange(max_steps)
# use spacecraft location to get ECEF
ecef_xs, ecef_ys, ecef_zs = geodetic_to_ecef(glats, glons, alts)
# prepare output
eq_zon_drifts_scalar = []
eq_mer_drifts_scalar = []
# magnetic field info
north_mag_scalar = []
south_mag_scalar = []
eq_mag_scalar = []
out = {}
# meridional e-field scalar map, can also be
# zonal ion drift scalar map
# print ('Starting Northern')
north_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
north_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'north',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Southern')
south_zon_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'meridional',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
south_mer_drifts_scalar, mind_plus, mind_minus = closed_loop_edge_lengths_via_footpoint(glats,
glons, alts, dates, 'south',
'zonal',
step_size=step_size,
max_steps=max_steps,
edge_length=25.,
edge_steps=5)
# print ('Starting Equatorial')
# , step_zon_apex2, mind_plus, mind_minus
eq_zon_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'meridional',
edge_length=25.,
edge_steps=5)
# , step_mer_apex2, mind_plus, mind_minus
eq_mer_drifts_scalar = closed_loop_edge_lengths_via_equator(glats, glons, alts, dates,
'zonal',
edge_length=25.,
edge_steps=5)
# print ('Done with core')
north_zon_drifts_scalar = north_zon_drifts_scalar/50.
south_zon_drifts_scalar = south_zon_drifts_scalar/50.
north_mer_drifts_scalar = north_mer_drifts_scalar/50.
south_mer_drifts_scalar = south_mer_drifts_scalar/50.
# equatorial
eq_zon_drifts_scalar = 50./eq_zon_drifts_scalar
eq_mer_drifts_scalar = 50./eq_mer_drifts_scalar
if e_field_scaling_only:
# prepare output
out['north_mer_fields_scalar'] = north_zon_drifts_scalar
out['south_mer_fields_scalar'] = south_zon_drifts_scalar
out['north_zon_fields_scalar'] = north_mer_drifts_scalar
out['south_zon_fields_scalar'] = south_mer_drifts_scalar
out['equator_mer_fields_scalar'] = eq_zon_drifts_scalar
out['equator_zon_fields_scalar'] = eq_mer_drifts_scalar
else:
# figure out scaling for drifts based upon change in magnetic field
# strength
for ecef_x, ecef_y, ecef_z, glat, glon, alt, date in zip(ecef_xs, ecef_ys, ecef_zs,
glats, glons, alts,
dates):
yr, doy = pysat.utils.getyrdoy(date)
double_date = float(yr) + float(doy) / 366.
# get location of apex for s/c field line
apex_x, apex_y, apex_z, apex_lat, apex_lon, apex_alt = apex_location_info(
[glat], [glon],
[alt], [date])
# trace to northern footpoint
sc_root = np.array([ecef_x, ecef_y, ecef_z])
trace_north = field_line_trace(sc_root, double_date, 1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# southern tracing
trace_south = field_line_trace(sc_root, double_date, -1., 120.,
steps=steps,
step_size=step_size,
max_steps=max_steps)
# footpoint location
north_ftpnt = trace_north[-1, :]
nft_glat, nft_glon, nft_alt = ecef_to_geodetic(*north_ftpnt)
south_ftpnt = trace_south[-1, :]
sft_glat, sft_glon, sft_alt = ecef_to_geodetic(*south_ftpnt)
# scalar for the northern footpoint electric field based on distances
# for drift also need to include the magnetic field, drift = E/B
tbn, tbe, tbd, b_sc = igrf.igrf12syn(0, double_date, 1, alt,
np.deg2rad(90.-glat),
np.deg2rad(glon))
# get mag field and scalar for northern footpoint
tbn, tbe, tbd, b_nft = igrf.igrf12syn(0, double_date, 1, nft_alt,
np.deg2rad(90.-nft_glat),
np.deg2rad(nft_glon))
north_mag_scalar.append(b_sc/b_nft)
# equatorial values
tbn, tbe, tbd, b_eq = igrf.igrf12syn(0, double_date, 1, apex_alt,
np.deg2rad(90.-apex_lat),
np.deg2rad(apex_lon))
eq_mag_scalar.append(b_sc/b_eq)
# scalar for the southern footpoint
tbn, tbe, tbd, b_sft = igrf.igrf12syn(0, double_date, 1, sft_alt,
np.deg2rad(90.-sft_glat),
np.deg2rad(sft_glon))
south_mag_scalar.append(b_sc/b_sft)
# make E-Field scalars to drifts
# lists to arrays
north_mag_scalar = np.array(north_mag_scalar)
south_mag_scalar = np.array(south_mag_scalar)
eq_mag_scalar = np.array(eq_mag_scalar)
# apply to electric field scaling to get ion drift values
north_zon_drifts_scalar = north_zon_drifts_scalar*north_mag_scalar
south_zon_drifts_scalar = south_zon_drifts_scalar*south_mag_scalar
north_mer_drifts_scalar = north_mer_drifts_scalar*north_mag_scalar
south_mer_drifts_scalar = south_mer_drifts_scalar*south_mag_scalar
# equatorial
eq_zon_drifts_scalar = eq_zon_drifts_scalar*eq_mag_scalar
eq_mer_drifts_scalar = eq_mer_drifts_scalar*eq_mag_scalar
# output
out['north_zonal_drifts_scalar'] = north_zon_drifts_scalar
out['south_zonal_drifts_scalar'] = south_zon_drifts_scalar
out['north_mer_drifts_scalar'] = north_mer_drifts_scalar
out['south_mer_drifts_scalar'] = south_mer_drifts_scalar
out['equator_zonal_drifts_scalar'] = eq_zon_drifts_scalar
out['equator_mer_drifts_scalar'] = eq_mer_drifts_scalar
return out |
def _implementation():
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version} | Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms. | Below is the the instruction that describes the task:
### Input:
Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
### Response:
def _implementation():
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version} |
def _set_dacl_inheritance(path, objectType, inheritance=True, copy=True, clear=False):
'''
helper function to set the inheritance
Args:
path (str): The path to the object
objectType (str): The type of object
inheritance (bool): True enables inheritance, False disables
copy (bool): Copy inherited ACEs to the DACL before disabling
inheritance
clear (bool): Remove non-inherited ACEs from the DACL
'''
ret = {'result': False,
'comment': '',
'changes': {}}
if path:
try:
sd = win32security.GetNamedSecurityInfo(path, objectType, win32security.DACL_SECURITY_INFORMATION)
tdacl = sd.GetSecurityDescriptorDacl()
if inheritance:
if clear:
counter = 0
removedAces = []
while counter < tdacl.GetAceCount():
tAce = tdacl.GetAce(counter)
if (tAce[0][1] & win32security.INHERITED_ACE) != win32security.INHERITED_ACE:
tdacl.DeleteAce(counter)
removedAces.append(_ace_to_text(tAce, objectType))
else:
counter = counter + 1
if removedAces:
ret['changes']['Removed ACEs'] = removedAces
else:
ret['changes']['Non-Inherited ACEs'] = 'Left in the DACL'
win32security.SetNamedSecurityInfo(
path, objectType,
win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION,
None, None, tdacl, None)
ret['changes']['Inheritance'] = 'Enabled'
else:
if not copy:
counter = 0
inheritedAcesRemoved = []
while counter < tdacl.GetAceCount():
tAce = tdacl.GetAce(counter)
if (tAce[0][1] & win32security.INHERITED_ACE) == win32security.INHERITED_ACE:
tdacl.DeleteAce(counter)
inheritedAcesRemoved.append(_ace_to_text(tAce, objectType))
else:
counter = counter + 1
if inheritedAcesRemoved:
ret['changes']['Removed ACEs'] = inheritedAcesRemoved
else:
ret['changes']['Previously Inherited ACEs'] = 'Copied to the DACL'
win32security.SetNamedSecurityInfo(
path, objectType,
win32security.DACL_SECURITY_INFORMATION | win32security.PROTECTED_DACL_SECURITY_INFORMATION,
None, None, tdacl, None)
ret['changes']['Inheritance'] = 'Disabled'
ret['result'] = True
except Exception as e:
ret['result'] = False
ret['comment'] = 'Error attempting to set the inheritance. The error was {0}.'.format(e)
return ret | helper function to set the inheritance
Args:
path (str): The path to the object
objectType (str): The type of object
inheritance (bool): True enables inheritance, False disables
copy (bool): Copy inherited ACEs to the DACL before disabling
inheritance
clear (bool): Remove non-inherited ACEs from the DACL | Below is the the instruction that describes the task:
### Input:
helper function to set the inheritance
Args:
path (str): The path to the object
objectType (str): The type of object
inheritance (bool): True enables inheritance, False disables
copy (bool): Copy inherited ACEs to the DACL before disabling
inheritance
clear (bool): Remove non-inherited ACEs from the DACL
### Response:
def _set_dacl_inheritance(path, objectType, inheritance=True, copy=True, clear=False):
'''
helper function to set the inheritance
Args:
path (str): The path to the object
objectType (str): The type of object
inheritance (bool): True enables inheritance, False disables
copy (bool): Copy inherited ACEs to the DACL before disabling
inheritance
clear (bool): Remove non-inherited ACEs from the DACL
'''
ret = {'result': False,
'comment': '',
'changes': {}}
if path:
try:
sd = win32security.GetNamedSecurityInfo(path, objectType, win32security.DACL_SECURITY_INFORMATION)
tdacl = sd.GetSecurityDescriptorDacl()
if inheritance:
if clear:
counter = 0
removedAces = []
while counter < tdacl.GetAceCount():
tAce = tdacl.GetAce(counter)
if (tAce[0][1] & win32security.INHERITED_ACE) != win32security.INHERITED_ACE:
tdacl.DeleteAce(counter)
removedAces.append(_ace_to_text(tAce, objectType))
else:
counter = counter + 1
if removedAces:
ret['changes']['Removed ACEs'] = removedAces
else:
ret['changes']['Non-Inherited ACEs'] = 'Left in the DACL'
win32security.SetNamedSecurityInfo(
path, objectType,
win32security.DACL_SECURITY_INFORMATION | win32security.UNPROTECTED_DACL_SECURITY_INFORMATION,
None, None, tdacl, None)
ret['changes']['Inheritance'] = 'Enabled'
else:
if not copy:
counter = 0
inheritedAcesRemoved = []
while counter < tdacl.GetAceCount():
tAce = tdacl.GetAce(counter)
if (tAce[0][1] & win32security.INHERITED_ACE) == win32security.INHERITED_ACE:
tdacl.DeleteAce(counter)
inheritedAcesRemoved.append(_ace_to_text(tAce, objectType))
else:
counter = counter + 1
if inheritedAcesRemoved:
ret['changes']['Removed ACEs'] = inheritedAcesRemoved
else:
ret['changes']['Previously Inherited ACEs'] = 'Copied to the DACL'
win32security.SetNamedSecurityInfo(
path, objectType,
win32security.DACL_SECURITY_INFORMATION | win32security.PROTECTED_DACL_SECURITY_INFORMATION,
None, None, tdacl, None)
ret['changes']['Inheritance'] = 'Disabled'
ret['result'] = True
except Exception as e:
ret['result'] = False
ret['comment'] = 'Error attempting to set the inheritance. The error was {0}.'.format(e)
return ret |
def normalise_angle(th):
"""Normalise an angle to be in the range [-pi, pi]."""
return th - (2.0 * np.pi) * np.floor((th + np.pi) / (2.0 * np.pi)) | Normalise an angle to be in the range [-pi, pi]. | Below is the the instruction that describes the task:
### Input:
Normalise an angle to be in the range [-pi, pi].
### Response:
def normalise_angle(th):
"""Normalise an angle to be in the range [-pi, pi]."""
return th - (2.0 * np.pi) * np.floor((th + np.pi) / (2.0 * np.pi)) |
def report(self):
""" Present all information that was gathered in an html file that
allows browsing the results.
"""
# make this prettier
html = u'<hr/><a name="%s"></a>\n' % self.name()
# Intro
html = html + "<h2> Plugin <em>" + self.name() + "</em></h2>\n"
# Files
if len(self.copied_files):
html = html + "<p>Files copied:<br><ul>\n"
for afile in self.copied_files:
html = html + '<li><a href="%s">%s</a>' % \
(u'..' + _to_u(afile['dstpath']), _to_u(afile['srcpath']))
if afile['symlink'] == "yes":
html = html + " (symlink to %s)" % _to_u(afile['pointsto'])
html = html + '</li>\n'
html = html + "</ul></p>\n"
# Command Output
if len(self.executed_commands):
html = html + "<p>Commands Executed:<br><ul>\n"
# convert file name to relative path from our root
# don't use relpath - these are HTML paths not OS paths.
for cmd in self.executed_commands:
if cmd["file"] and len(cmd["file"]):
cmd_rel_path = u"../" + _to_u(self.commons['cmddir']) \
+ "/" + _to_u(cmd['file'])
html = html + '<li><a href="%s">%s</a></li>\n' % \
(cmd_rel_path, _to_u(cmd['exe']))
else:
html = html + '<li>%s</li>\n' % (_to_u(cmd['exe']))
html = html + "</ul></p>\n"
# Alerts
if len(self.alerts):
html = html + "<p>Alerts:<br><ul>\n"
for alert in self.alerts:
html = html + '<li>%s</li>\n' % _to_u(alert)
html = html + "</ul></p>\n"
# Custom Text
if self.custom_text != "":
html = html + "<p>Additional Information:<br>\n"
html = html + _to_u(self.custom_text) + "</p>\n"
if six.PY2:
return html.encode('utf8')
else:
return html | Present all information that was gathered in an html file that
allows browsing the results. | Below is the the instruction that describes the task:
### Input:
Present all information that was gathered in an html file that
allows browsing the results.
### Response:
def report(self):
""" Present all information that was gathered in an html file that
allows browsing the results.
"""
# make this prettier
html = u'<hr/><a name="%s"></a>\n' % self.name()
# Intro
html = html + "<h2> Plugin <em>" + self.name() + "</em></h2>\n"
# Files
if len(self.copied_files):
html = html + "<p>Files copied:<br><ul>\n"
for afile in self.copied_files:
html = html + '<li><a href="%s">%s</a>' % \
(u'..' + _to_u(afile['dstpath']), _to_u(afile['srcpath']))
if afile['symlink'] == "yes":
html = html + " (symlink to %s)" % _to_u(afile['pointsto'])
html = html + '</li>\n'
html = html + "</ul></p>\n"
# Command Output
if len(self.executed_commands):
html = html + "<p>Commands Executed:<br><ul>\n"
# convert file name to relative path from our root
# don't use relpath - these are HTML paths not OS paths.
for cmd in self.executed_commands:
if cmd["file"] and len(cmd["file"]):
cmd_rel_path = u"../" + _to_u(self.commons['cmddir']) \
+ "/" + _to_u(cmd['file'])
html = html + '<li><a href="%s">%s</a></li>\n' % \
(cmd_rel_path, _to_u(cmd['exe']))
else:
html = html + '<li>%s</li>\n' % (_to_u(cmd['exe']))
html = html + "</ul></p>\n"
# Alerts
if len(self.alerts):
html = html + "<p>Alerts:<br><ul>\n"
for alert in self.alerts:
html = html + '<li>%s</li>\n' % _to_u(alert)
html = html + "</ul></p>\n"
# Custom Text
if self.custom_text != "":
html = html + "<p>Additional Information:<br>\n"
html = html + _to_u(self.custom_text) + "</p>\n"
if six.PY2:
return html.encode('utf8')
else:
return html |
def TableDrivenVacuumAgent():
"[Fig. 2.3]"
table = {((loc_A, 'Clean'),): 'Right',
((loc_A, 'Dirty'),): 'Suck',
((loc_B, 'Clean'),): 'Left',
((loc_B, 'Dirty'),): 'Suck',
((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
}
return Agent(TableDrivenAgentProgram(table)) | [Fig. 2.3] | Below is the the instruction that describes the task:
### Input:
[Fig. 2.3]
### Response:
def TableDrivenVacuumAgent():
"[Fig. 2.3]"
table = {((loc_A, 'Clean'),): 'Right',
((loc_A, 'Dirty'),): 'Suck',
((loc_B, 'Clean'),): 'Left',
((loc_B, 'Dirty'),): 'Suck',
((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
}
return Agent(TableDrivenAgentProgram(table)) |
def symget(self, name):
"""
:param name: name of the macro varable to set:
- name is a character
"""
ll = self.submit("%put " + name + "=&" + name + ";\n")
l2 = ll['LOG'].rpartition(name + "=")
l2 = l2[2].partition("\n")
try:
var = int(l2[0])
except:
try:
var = float(l2[0])
except:
var = l2[0]
return var | :param name: name of the macro varable to set:
- name is a character | Below is the the instruction that describes the task:
### Input:
:param name: name of the macro varable to set:
- name is a character
### Response:
def symget(self, name):
"""
:param name: name of the macro varable to set:
- name is a character
"""
ll = self.submit("%put " + name + "=&" + name + ";\n")
l2 = ll['LOG'].rpartition(name + "=")
l2 = l2[2].partition("\n")
try:
var = int(l2[0])
except:
try:
var = float(l2[0])
except:
var = l2[0]
return var |
def handle_state_change(self, state_change: StateChange) -> List[Greenlet]:
""" Dispatch the state change and return the processing threads.
Use this for error reporting, failures in the returned greenlets,
should be re-raised using `gevent.joinall` with `raise_error=True`.
"""
assert self.wal, f'WAL not restored. node:{self!r}'
log.debug(
'State change',
node=pex(self.address),
state_change=_redact_secret(serialize.JSONSerializer.serialize(state_change)),
)
old_state = views.state_from_raiden(self)
raiden_event_list = self.wal.log_and_dispatch(state_change)
current_state = views.state_from_raiden(self)
for changed_balance_proof in views.detect_balance_proof_change(old_state, current_state):
update_services_from_balance_proof(self, current_state, changed_balance_proof)
log.debug(
'Raiden events',
node=pex(self.address),
raiden_events=[
_redact_secret(serialize.JSONSerializer.serialize(event))
for event in raiden_event_list
],
)
greenlets: List[Greenlet] = list()
if self.ready_to_process_events:
for raiden_event in raiden_event_list:
greenlets.append(
self.handle_event(raiden_event=raiden_event),
)
state_changes_count = self.wal.storage.count_state_changes()
new_snapshot_group = (
state_changes_count // SNAPSHOT_STATE_CHANGES_COUNT
)
if new_snapshot_group > self.snapshot_group:
log.debug('Storing snapshot', snapshot_id=new_snapshot_group)
self.wal.snapshot()
self.snapshot_group = new_snapshot_group
return greenlets | Dispatch the state change and return the processing threads.
Use this for error reporting, failures in the returned greenlets,
should be re-raised using `gevent.joinall` with `raise_error=True`. | Below is the the instruction that describes the task:
### Input:
Dispatch the state change and return the processing threads.
Use this for error reporting, failures in the returned greenlets,
should be re-raised using `gevent.joinall` with `raise_error=True`.
### Response:
def handle_state_change(self, state_change: StateChange) -> List[Greenlet]:
""" Dispatch the state change and return the processing threads.
Use this for error reporting, failures in the returned greenlets,
should be re-raised using `gevent.joinall` with `raise_error=True`.
"""
assert self.wal, f'WAL not restored. node:{self!r}'
log.debug(
'State change',
node=pex(self.address),
state_change=_redact_secret(serialize.JSONSerializer.serialize(state_change)),
)
old_state = views.state_from_raiden(self)
raiden_event_list = self.wal.log_and_dispatch(state_change)
current_state = views.state_from_raiden(self)
for changed_balance_proof in views.detect_balance_proof_change(old_state, current_state):
update_services_from_balance_proof(self, current_state, changed_balance_proof)
log.debug(
'Raiden events',
node=pex(self.address),
raiden_events=[
_redact_secret(serialize.JSONSerializer.serialize(event))
for event in raiden_event_list
],
)
greenlets: List[Greenlet] = list()
if self.ready_to_process_events:
for raiden_event in raiden_event_list:
greenlets.append(
self.handle_event(raiden_event=raiden_event),
)
state_changes_count = self.wal.storage.count_state_changes()
new_snapshot_group = (
state_changes_count // SNAPSHOT_STATE_CHANGES_COUNT
)
if new_snapshot_group > self.snapshot_group:
log.debug('Storing snapshot', snapshot_id=new_snapshot_group)
self.wal.snapshot()
self.snapshot_group = new_snapshot_group
return greenlets |
def list(context, resource, **kwargs):
"""List all resources"""
data = utils.sanitize_kwargs(**kwargs)
id = data.pop('id', None)
subresource = data.pop('subresource', None)
if subresource:
uri = '%s/%s/%s/%s' % (context.dci_cs_api, resource, id, subresource)
else:
uri = '%s/%s' % (context.dci_cs_api, resource)
return context.session.get(uri, timeout=HTTP_TIMEOUT, params=data) | List all resources | Below is the the instruction that describes the task:
### Input:
List all resources
### Response:
def list(context, resource, **kwargs):
"""List all resources"""
data = utils.sanitize_kwargs(**kwargs)
id = data.pop('id', None)
subresource = data.pop('subresource', None)
if subresource:
uri = '%s/%s/%s/%s' % (context.dci_cs_api, resource, id, subresource)
else:
uri = '%s/%s' % (context.dci_cs_api, resource)
return context.session.get(uri, timeout=HTTP_TIMEOUT, params=data) |
def onchange_dates(self):
'''
This method gives the duration between check in and checkout
if customer will leave only for some hour it would be considers
as a whole day.If customer will check in checkout for more or equal
hours, which configured in company as additional hours than it would
be consider as full days
--------------------------------------------------------------------
@param self: object pointer
@return: Duration and checkout_date
'''
configured_addition_hours = 0
wid = self.warehouse_id
whouse_com_id = wid or wid.company_id
if whouse_com_id:
configured_addition_hours = wid.company_id.additional_hours
myduration = 0
chckin = self.checkin_date
chckout = self.checkout_date
if chckin and chckout:
server_dt = DEFAULT_SERVER_DATETIME_FORMAT
chkin_dt = datetime.datetime.strptime(chckin, server_dt)
chkout_dt = datetime.datetime.strptime(chckout, server_dt)
dur = chkout_dt - chkin_dt
sec_dur = dur.seconds
if (not dur.days and not sec_dur) or (dur.days and not sec_dur):
myduration = dur.days
else:
myduration = dur.days + 1
# To calculate additional hours in hotel room as per minutes
if configured_addition_hours > 0:
additional_hours = abs((dur.seconds / 60) / 60)
if additional_hours >= configured_addition_hours:
myduration += 1
self.duration = myduration
self.duration_dummy = self.duration | This method gives the duration between check in and checkout
if customer will leave only for some hour it would be considers
as a whole day.If customer will check in checkout for more or equal
hours, which configured in company as additional hours than it would
be consider as full days
--------------------------------------------------------------------
@param self: object pointer
@return: Duration and checkout_date | Below is the the instruction that describes the task:
### Input:
This method gives the duration between check in and checkout
if customer will leave only for some hour it would be considers
as a whole day.If customer will check in checkout for more or equal
hours, which configured in company as additional hours than it would
be consider as full days
--------------------------------------------------------------------
@param self: object pointer
@return: Duration and checkout_date
### Response:
def onchange_dates(self):
'''
This method gives the duration between check in and checkout
if customer will leave only for some hour it would be considers
as a whole day.If customer will check in checkout for more or equal
hours, which configured in company as additional hours than it would
be consider as full days
--------------------------------------------------------------------
@param self: object pointer
@return: Duration and checkout_date
'''
configured_addition_hours = 0
wid = self.warehouse_id
whouse_com_id = wid or wid.company_id
if whouse_com_id:
configured_addition_hours = wid.company_id.additional_hours
myduration = 0
chckin = self.checkin_date
chckout = self.checkout_date
if chckin and chckout:
server_dt = DEFAULT_SERVER_DATETIME_FORMAT
chkin_dt = datetime.datetime.strptime(chckin, server_dt)
chkout_dt = datetime.datetime.strptime(chckout, server_dt)
dur = chkout_dt - chkin_dt
sec_dur = dur.seconds
if (not dur.days and not sec_dur) or (dur.days and not sec_dur):
myduration = dur.days
else:
myduration = dur.days + 1
# To calculate additional hours in hotel room as per minutes
if configured_addition_hours > 0:
additional_hours = abs((dur.seconds / 60) / 60)
if additional_hours >= configured_addition_hours:
myduration += 1
self.duration = myduration
self.duration_dummy = self.duration |
def url_to_fn(url):
"""
Convert `url` to filename used to download the datasets.
``http://kitakitsune.org/xe`` -> ``kitakitsune.org_xe``.
Args:
url (str): URL of the resource.
Returns:
str: Normalized URL.
"""
url = url.replace("http://", "").replace("https://", "")
url = url.split("?")[0]
return url.replace("%", "_").replace("/", "_") | Convert `url` to filename used to download the datasets.
``http://kitakitsune.org/xe`` -> ``kitakitsune.org_xe``.
Args:
url (str): URL of the resource.
Returns:
str: Normalized URL. | Below is the the instruction that describes the task:
### Input:
Convert `url` to filename used to download the datasets.
``http://kitakitsune.org/xe`` -> ``kitakitsune.org_xe``.
Args:
url (str): URL of the resource.
Returns:
str: Normalized URL.
### Response:
def url_to_fn(url):
"""
Convert `url` to filename used to download the datasets.
``http://kitakitsune.org/xe`` -> ``kitakitsune.org_xe``.
Args:
url (str): URL of the resource.
Returns:
str: Normalized URL.
"""
url = url.replace("http://", "").replace("https://", "")
url = url.split("?")[0]
return url.replace("%", "_").replace("/", "_") |
def rebuild(self, image, wait=True):
"""
Rebuild this droplet with given image id
Parameters
----------
image: int or str
int for image id and str for image slug
wait: bool, default True
Whether to block until the pending action is completed
"""
return self._action('rebuild', image=image, wait=wait) | Rebuild this droplet with given image id
Parameters
----------
image: int or str
int for image id and str for image slug
wait: bool, default True
Whether to block until the pending action is completed | Below is the the instruction that describes the task:
### Input:
Rebuild this droplet with given image id
Parameters
----------
image: int or str
int for image id and str for image slug
wait: bool, default True
Whether to block until the pending action is completed
### Response:
def rebuild(self, image, wait=True):
"""
Rebuild this droplet with given image id
Parameters
----------
image: int or str
int for image id and str for image slug
wait: bool, default True
Whether to block until the pending action is completed
"""
return self._action('rebuild', image=image, wait=wait) |
def _download_contact(cls, mm_contact, file):
"""
Specialized version of .download_media() for contacts.
Will make use of the vCard 4.0 format.
"""
first_name = mm_contact.first_name
last_name = mm_contact.last_name
phone_number = mm_contact.phone_number
# Remove these pesky characters
first_name = first_name.replace(';', '')
last_name = (last_name or '').replace(';', '')
result = (
'BEGIN:VCARD\n'
'VERSION:4.0\n'
'N:{f};{l};;;\n'
'FN:{f} {l}\n'
'TEL;TYPE=cell;VALUE=uri:tel:+{p}\n'
'END:VCARD\n'
).format(f=first_name, l=last_name, p=phone_number).encode('utf-8')
if file is bytes:
return result
elif isinstance(file, str):
file = cls._get_proper_filename(
file, 'contact', '.vcard',
possible_names=[first_name, phone_number, last_name]
)
f = open(file, 'wb', encoding='utf-8')
else:
f = file
try:
f.write(result)
finally:
# Only close the stream if we opened it
if isinstance(file, str):
f.close()
return file | Specialized version of .download_media() for contacts.
Will make use of the vCard 4.0 format. | Below is the the instruction that describes the task:
### Input:
Specialized version of .download_media() for contacts.
Will make use of the vCard 4.0 format.
### Response:
def _download_contact(cls, mm_contact, file):
"""
Specialized version of .download_media() for contacts.
Will make use of the vCard 4.0 format.
"""
first_name = mm_contact.first_name
last_name = mm_contact.last_name
phone_number = mm_contact.phone_number
# Remove these pesky characters
first_name = first_name.replace(';', '')
last_name = (last_name or '').replace(';', '')
result = (
'BEGIN:VCARD\n'
'VERSION:4.0\n'
'N:{f};{l};;;\n'
'FN:{f} {l}\n'
'TEL;TYPE=cell;VALUE=uri:tel:+{p}\n'
'END:VCARD\n'
).format(f=first_name, l=last_name, p=phone_number).encode('utf-8')
if file is bytes:
return result
elif isinstance(file, str):
file = cls._get_proper_filename(
file, 'contact', '.vcard',
possible_names=[first_name, phone_number, last_name]
)
f = open(file, 'wb', encoding='utf-8')
else:
f = file
try:
f.write(result)
finally:
# Only close the stream if we opened it
if isinstance(file, str):
f.close()
return file |
def play_introjs_tour(
driver, tour_steps, browser, msg_dur, name=None, interval=0):
""" Plays an IntroJS tour on the current website. """
instructions = ""
for tour_step in tour_steps[name]:
instructions += tour_step
instructions += (
"""]
});
intro.setOption("disableInteraction", true);
intro.setOption("overlayOpacity", .29);
intro.setOption("scrollToElement", true);
intro.setOption("keyboardNavigation", true);
intro.setOption("exitOnEsc", false);
intro.setOption("exitOnOverlayClick", false);
intro.setOption("showStepNumbers", false);
intro.setOption("showProgress", false);
intro.start();
$tour = intro;
};
// Start the tour
startIntro();
""")
autoplay = False
if interval and interval > 0:
autoplay = True
interval = float(interval)
if interval < 0.5:
interval = 0.5
if not is_introjs_activated(driver):
activate_introjs(driver)
if len(tour_steps[name]) > 1:
try:
if "element: " in tour_steps[name][1]:
selector = re.search(
r"[\S\s]+element: '([\S\s]+)',[\S\s]+intro: '",
tour_steps[name][1]).group(1)
selector = selector.replace('\\', '')
page_actions.wait_for_element_present(
driver, selector, by=By.CSS_SELECTOR,
timeout=settings.SMALL_TIMEOUT)
else:
selector = "html"
except Exception:
js_utils.post_messenger_error_message(
driver, "Tour Error: {'%s'} was not found!" % selector,
msg_dur)
raise Exception(
"Tour Error: {'%s'} was not found! "
"Exiting due to failure on first tour step!"
"" % selector)
driver.execute_script(instructions)
tour_on = True
if autoplay:
start_ms = time.time() * 1000.0
stop_ms = start_ms + (interval * 1000.0)
latest_step = 0
while tour_on:
try:
time.sleep(0.01)
if browser != "firefox":
result = driver.execute_script(
"return $tour._currentStep")
else:
page_actions.wait_for_element_present(
driver, ".introjs-tooltip",
by=By.CSS_SELECTOR, timeout=0.4)
result = True
except Exception:
tour_on = False
result = None
if result is not None:
tour_on = True
if autoplay:
try:
current_step = driver.execute_script(
"return $tour._currentStep")
except Exception:
continue
if current_step != latest_step:
latest_step = current_step
start_ms = time.time() * 1000.0
stop_ms = start_ms + (interval * 1000.0)
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
if current_step == latest_step:
driver.execute_script("return $tour.nextStep()")
try:
latest_step = driver.execute_script(
"return $tour._currentStep")
start_ms = time.time() * 1000.0
stop_ms = start_ms + (interval * 1000.0)
except Exception:
pass
continue
else:
try:
time.sleep(0.01)
if browser != "firefox":
result = driver.execute_script(
"return $tour._currentStep")
else:
page_actions.wait_for_element_present(
driver, ".introjs-tooltip",
by=By.CSS_SELECTOR, timeout=0.4)
result = True
if result is not None:
time.sleep(0.1)
continue
else:
return
except Exception:
tour_on = False
time.sleep(0.1) | Plays an IntroJS tour on the current website. | Below is the the instruction that describes the task:
### Input:
Plays an IntroJS tour on the current website.
### Response:
def play_introjs_tour(
driver, tour_steps, browser, msg_dur, name=None, interval=0):
""" Plays an IntroJS tour on the current website. """
instructions = ""
for tour_step in tour_steps[name]:
instructions += tour_step
instructions += (
"""]
});
intro.setOption("disableInteraction", true);
intro.setOption("overlayOpacity", .29);
intro.setOption("scrollToElement", true);
intro.setOption("keyboardNavigation", true);
intro.setOption("exitOnEsc", false);
intro.setOption("exitOnOverlayClick", false);
intro.setOption("showStepNumbers", false);
intro.setOption("showProgress", false);
intro.start();
$tour = intro;
};
// Start the tour
startIntro();
""")
autoplay = False
if interval and interval > 0:
autoplay = True
interval = float(interval)
if interval < 0.5:
interval = 0.5
if not is_introjs_activated(driver):
activate_introjs(driver)
if len(tour_steps[name]) > 1:
try:
if "element: " in tour_steps[name][1]:
selector = re.search(
r"[\S\s]+element: '([\S\s]+)',[\S\s]+intro: '",
tour_steps[name][1]).group(1)
selector = selector.replace('\\', '')
page_actions.wait_for_element_present(
driver, selector, by=By.CSS_SELECTOR,
timeout=settings.SMALL_TIMEOUT)
else:
selector = "html"
except Exception:
js_utils.post_messenger_error_message(
driver, "Tour Error: {'%s'} was not found!" % selector,
msg_dur)
raise Exception(
"Tour Error: {'%s'} was not found! "
"Exiting due to failure on first tour step!"
"" % selector)
driver.execute_script(instructions)
tour_on = True
if autoplay:
start_ms = time.time() * 1000.0
stop_ms = start_ms + (interval * 1000.0)
latest_step = 0
while tour_on:
try:
time.sleep(0.01)
if browser != "firefox":
result = driver.execute_script(
"return $tour._currentStep")
else:
page_actions.wait_for_element_present(
driver, ".introjs-tooltip",
by=By.CSS_SELECTOR, timeout=0.4)
result = True
except Exception:
tour_on = False
result = None
if result is not None:
tour_on = True
if autoplay:
try:
current_step = driver.execute_script(
"return $tour._currentStep")
except Exception:
continue
if current_step != latest_step:
latest_step = current_step
start_ms = time.time() * 1000.0
stop_ms = start_ms + (interval * 1000.0)
now_ms = time.time() * 1000.0
if now_ms >= stop_ms:
if current_step == latest_step:
driver.execute_script("return $tour.nextStep()")
try:
latest_step = driver.execute_script(
"return $tour._currentStep")
start_ms = time.time() * 1000.0
stop_ms = start_ms + (interval * 1000.0)
except Exception:
pass
continue
else:
try:
time.sleep(0.01)
if browser != "firefox":
result = driver.execute_script(
"return $tour._currentStep")
else:
page_actions.wait_for_element_present(
driver, ".introjs-tooltip",
by=By.CSS_SELECTOR, timeout=0.4)
result = True
if result is not None:
time.sleep(0.1)
continue
else:
return
except Exception:
tour_on = False
time.sleep(0.1) |
def branch_stats(self):
"""Get stats about branches.
Returns a dict mapping line numbers to a tuple:
(total_exits, taken_exits).
"""
exit_counts = self.parser.exit_counts()
missing_arcs = self.missing_branch_arcs()
stats = {}
for lnum in self.branch_lines():
exits = exit_counts[lnum]
try:
missing = len(missing_arcs[lnum])
except KeyError:
missing = 0
stats[lnum] = (exits, exits - missing)
return stats | Get stats about branches.
Returns a dict mapping line numbers to a tuple:
(total_exits, taken_exits). | Below is the the instruction that describes the task:
### Input:
Get stats about branches.
Returns a dict mapping line numbers to a tuple:
(total_exits, taken_exits).
### Response:
def branch_stats(self):
"""Get stats about branches.
Returns a dict mapping line numbers to a tuple:
(total_exits, taken_exits).
"""
exit_counts = self.parser.exit_counts()
missing_arcs = self.missing_branch_arcs()
stats = {}
for lnum in self.branch_lines():
exits = exit_counts[lnum]
try:
missing = len(missing_arcs[lnum])
except KeyError:
missing = 0
stats[lnum] = (exits, exits - missing)
return stats |
def Draw(self, grid, attr, dc, rect, row, col, is_selected):
"""Draws the text and the combobox icon"""
render = wx.RendererNative.Get()
# clear the background
dc.SetBackgroundMode(wx.SOLID)
if is_selected:
dc.SetBrush(wx.Brush(wx.BLUE, wx.SOLID))
dc.SetPen(wx.Pen(wx.BLUE, 1, wx.SOLID))
else:
dc.SetBrush(wx.Brush(wx.WHITE, wx.SOLID))
dc.SetPen(wx.Pen(wx.WHITE, 1, wx.SOLID))
dc.DrawRectangleRect(rect)
cb_lbl = grid.GetCellValue(row, col)
string_x = rect.x + 2
string_y = rect.y + 2
dc.DrawText(cb_lbl, string_x, string_y)
button_x = rect.x + rect.width - self.iconwidth
button_y = rect.y
button_width = self.iconwidth
button_height = rect.height
button_size = button_x, button_y, button_width, button_height
render.DrawComboBoxDropButton(grid, dc, button_size,
wx.CONTROL_CURRENT) | Draws the text and the combobox icon | Below is the the instruction that describes the task:
### Input:
Draws the text and the combobox icon
### Response:
def Draw(self, grid, attr, dc, rect, row, col, is_selected):
"""Draws the text and the combobox icon"""
render = wx.RendererNative.Get()
# clear the background
dc.SetBackgroundMode(wx.SOLID)
if is_selected:
dc.SetBrush(wx.Brush(wx.BLUE, wx.SOLID))
dc.SetPen(wx.Pen(wx.BLUE, 1, wx.SOLID))
else:
dc.SetBrush(wx.Brush(wx.WHITE, wx.SOLID))
dc.SetPen(wx.Pen(wx.WHITE, 1, wx.SOLID))
dc.DrawRectangleRect(rect)
cb_lbl = grid.GetCellValue(row, col)
string_x = rect.x + 2
string_y = rect.y + 2
dc.DrawText(cb_lbl, string_x, string_y)
button_x = rect.x + rect.width - self.iconwidth
button_y = rect.y
button_width = self.iconwidth
button_height = rect.height
button_size = button_x, button_y, button_width, button_height
render.DrawComboBoxDropButton(grid, dc, button_size,
wx.CONTROL_CURRENT) |
def peeklist(self, fmt, **kwargs):
"""Interpret next bits according to format string(s) and return list.
fmt -- One or more strings with comma separated tokens describing
how to interpret the next bits in the bitstring.
kwargs -- A dictionary or keyword-value pairs - the keywords used in the
format string will be replaced with their given value.
The position in the bitstring is not changed. If not enough bits are
available then all bits to the end of the bitstring will be used.
Raises ReadError if not enough bits are available.
Raises ValueError if the format is not understood.
See the docstring for 'read' for token examples.
"""
pos = self._pos
return_values = self.readlist(fmt, **kwargs)
self._pos = pos
return return_values | Interpret next bits according to format string(s) and return list.
fmt -- One or more strings with comma separated tokens describing
how to interpret the next bits in the bitstring.
kwargs -- A dictionary or keyword-value pairs - the keywords used in the
format string will be replaced with their given value.
The position in the bitstring is not changed. If not enough bits are
available then all bits to the end of the bitstring will be used.
Raises ReadError if not enough bits are available.
Raises ValueError if the format is not understood.
See the docstring for 'read' for token examples. | Below is the the instruction that describes the task:
### Input:
Interpret next bits according to format string(s) and return list.
fmt -- One or more strings with comma separated tokens describing
how to interpret the next bits in the bitstring.
kwargs -- A dictionary or keyword-value pairs - the keywords used in the
format string will be replaced with their given value.
The position in the bitstring is not changed. If not enough bits are
available then all bits to the end of the bitstring will be used.
Raises ReadError if not enough bits are available.
Raises ValueError if the format is not understood.
See the docstring for 'read' for token examples.
### Response:
def peeklist(self, fmt, **kwargs):
"""Interpret next bits according to format string(s) and return list.
fmt -- One or more strings with comma separated tokens describing
how to interpret the next bits in the bitstring.
kwargs -- A dictionary or keyword-value pairs - the keywords used in the
format string will be replaced with their given value.
The position in the bitstring is not changed. If not enough bits are
available then all bits to the end of the bitstring will be used.
Raises ReadError if not enough bits are available.
Raises ValueError if the format is not understood.
See the docstring for 'read' for token examples.
"""
pos = self._pos
return_values = self.readlist(fmt, **kwargs)
self._pos = pos
return return_values |
def write_metadata_to_filestream(filedir, filestream,
max_bytes=MAX_FILE_DEFAULT):
"""
Make metadata file for all files in a directory(helper function)
:param filedir: This field is the filepath of the directory whose csv
has to be made.
:param filestream: This field is a stream for writing to the csv.
:param max_bytes: This field is the maximum file size to consider. Its
default value is 128m.
"""
csv_out = csv.writer(filestream)
subdirs = [os.path.join(filedir, i) for i in os.listdir(filedir) if
os.path.isdir(os.path.join(filedir, i))]
if subdirs:
logging.info('Making metadata for subdirs of {}'.format(filedir))
if not all([re.match('^[0-9]{8}$', os.path.basename(d))
for d in subdirs]):
raise ValueError("Subdirs not all project member ID format!")
csv_out.writerow(['project_member_id', 'filename', 'tags',
'description', 'md5', 'creation_date'])
for subdir in subdirs:
file_info = characterize_local_files(
filedir=subdir, max_bytes=max_bytes)
proj_member_id = os.path.basename(subdir)
if not file_info:
csv_out.writerow([proj_member_id, 'None',
'NA', 'NA', 'NA', 'NA'])
continue
for filename in file_info:
csv_out.writerow([proj_member_id,
filename,
', '.join(file_info[filename]['tags']),
file_info[filename]['description'],
file_info[filename]['md5'],
file_info[filename]['creation_date'],
])
else:
csv_out.writerow(['filename', 'tags',
'description', 'md5', 'creation_date'])
file_info = characterize_local_files(
filedir=filedir, max_bytes=max_bytes)
for filename in file_info:
csv_out.writerow([filename,
', '.join(file_info[filename]['tags']),
file_info[filename]['description'],
file_info[filename]['md5'],
file_info[filename]['creation_date'],
]) | Make metadata file for all files in a directory(helper function)
:param filedir: This field is the filepath of the directory whose csv
has to be made.
:param filestream: This field is a stream for writing to the csv.
:param max_bytes: This field is the maximum file size to consider. Its
default value is 128m. | Below is the the instruction that describes the task:
### Input:
Make metadata file for all files in a directory(helper function)
:param filedir: This field is the filepath of the directory whose csv
has to be made.
:param filestream: This field is a stream for writing to the csv.
:param max_bytes: This field is the maximum file size to consider. Its
default value is 128m.
### Response:
def write_metadata_to_filestream(filedir, filestream,
max_bytes=MAX_FILE_DEFAULT):
"""
Make metadata file for all files in a directory(helper function)
:param filedir: This field is the filepath of the directory whose csv
has to be made.
:param filestream: This field is a stream for writing to the csv.
:param max_bytes: This field is the maximum file size to consider. Its
default value is 128m.
"""
csv_out = csv.writer(filestream)
subdirs = [os.path.join(filedir, i) for i in os.listdir(filedir) if
os.path.isdir(os.path.join(filedir, i))]
if subdirs:
logging.info('Making metadata for subdirs of {}'.format(filedir))
if not all([re.match('^[0-9]{8}$', os.path.basename(d))
for d in subdirs]):
raise ValueError("Subdirs not all project member ID format!")
csv_out.writerow(['project_member_id', 'filename', 'tags',
'description', 'md5', 'creation_date'])
for subdir in subdirs:
file_info = characterize_local_files(
filedir=subdir, max_bytes=max_bytes)
proj_member_id = os.path.basename(subdir)
if not file_info:
csv_out.writerow([proj_member_id, 'None',
'NA', 'NA', 'NA', 'NA'])
continue
for filename in file_info:
csv_out.writerow([proj_member_id,
filename,
', '.join(file_info[filename]['tags']),
file_info[filename]['description'],
file_info[filename]['md5'],
file_info[filename]['creation_date'],
])
else:
csv_out.writerow(['filename', 'tags',
'description', 'md5', 'creation_date'])
file_info = characterize_local_files(
filedir=filedir, max_bytes=max_bytes)
for filename in file_info:
csv_out.writerow([filename,
', '.join(file_info[filename]['tags']),
file_info[filename]['description'],
file_info[filename]['md5'],
file_info[filename]['creation_date'],
]) |
def total_timer(msg):
""" A context which add the time spent inside to TotalTimer. """
start = timer()
yield
t = timer() - start
_TOTAL_TIMER_DATA[msg].feed(t) | A context which add the time spent inside to TotalTimer. | Below is the the instruction that describes the task:
### Input:
A context which add the time spent inside to TotalTimer.
### Response:
def total_timer(msg):
""" A context which add the time spent inside to TotalTimer. """
start = timer()
yield
t = timer() - start
_TOTAL_TIMER_DATA[msg].feed(t) |
def submit(self, job):
"""
Submits a given job
:param job: The job to submit
:type job: pyqueue.job.JobInterface
"""
script = self._printer.generate(job)
stdin, stdout, stderr = self._ssh.exec_command('sbatch')
stdin.write(script)
stdin.flush()
stdin.channel.shutdown_write()
return stdout.read() | Submits a given job
:param job: The job to submit
:type job: pyqueue.job.JobInterface | Below is the the instruction that describes the task:
### Input:
Submits a given job
:param job: The job to submit
:type job: pyqueue.job.JobInterface
### Response:
def submit(self, job):
"""
Submits a given job
:param job: The job to submit
:type job: pyqueue.job.JobInterface
"""
script = self._printer.generate(job)
stdin, stdout, stderr = self._ssh.exec_command('sbatch')
stdin.write(script)
stdin.flush()
stdin.channel.shutdown_write()
return stdout.read() |
def build(self, builder):
"""Build XML by appending to builder"""
params = dict(Value=self.value,
Status=self.status.value,
ProtocolDeviationRepeatKey=self.repeat_key
)
if self.code:
params['Code'] = self.code
if self.pdclass:
params['Class'] = self.pdclass
if self.transaction_type:
params['TransactionType'] = self.transaction_type
builder.start('mdsol:ProtocolDeviation', params)
builder.end('mdsol:ProtocolDeviation') | Build XML by appending to builder | Below is the the instruction that describes the task:
### Input:
Build XML by appending to builder
### Response:
def build(self, builder):
"""Build XML by appending to builder"""
params = dict(Value=self.value,
Status=self.status.value,
ProtocolDeviationRepeatKey=self.repeat_key
)
if self.code:
params['Code'] = self.code
if self.pdclass:
params['Class'] = self.pdclass
if self.transaction_type:
params['TransactionType'] = self.transaction_type
builder.start('mdsol:ProtocolDeviation', params)
builder.end('mdsol:ProtocolDeviation') |
def read_string(self, where, max_length=None, force=False):
"""
Read a NUL-terminated concrete buffer from memory. Stops reading at first symbolic byte.
:param int where: Address to read string from
:param int max_length:
The size in bytes to cap the string at, or None [default] for no
limit.
:param force: whether to ignore memory permissions
:return: string read
:rtype: str
"""
s = io.BytesIO()
while True:
c = self.read_int(where, 8, force)
if issymbolic(c) or c == 0:
break
if max_length is not None:
if max_length == 0:
break
max_length = max_length - 1
s.write(Operators.CHR(c))
where += 1
return s.getvalue().decode() | Read a NUL-terminated concrete buffer from memory. Stops reading at first symbolic byte.
:param int where: Address to read string from
:param int max_length:
The size in bytes to cap the string at, or None [default] for no
limit.
:param force: whether to ignore memory permissions
:return: string read
:rtype: str | Below is the the instruction that describes the task:
### Input:
Read a NUL-terminated concrete buffer from memory. Stops reading at first symbolic byte.
:param int where: Address to read string from
:param int max_length:
The size in bytes to cap the string at, or None [default] for no
limit.
:param force: whether to ignore memory permissions
:return: string read
:rtype: str
### Response:
def read_string(self, where, max_length=None, force=False):
"""
Read a NUL-terminated concrete buffer from memory. Stops reading at first symbolic byte.
:param int where: Address to read string from
:param int max_length:
The size in bytes to cap the string at, or None [default] for no
limit.
:param force: whether to ignore memory permissions
:return: string read
:rtype: str
"""
s = io.BytesIO()
while True:
c = self.read_int(where, 8, force)
if issymbolic(c) or c == 0:
break
if max_length is not None:
if max_length == 0:
break
max_length = max_length - 1
s.write(Operators.CHR(c))
where += 1
return s.getvalue().decode() |
def set_layer_visible(layer, visible):
"""Sets a layer in the project visible or not
:param layer: layer to change
:type layer: QgsMapLayer
:param visible: True to show layer, False to hide layer
:type visible: bool
"""
if layer is None:
return
QgsProject.instance().layerTreeRoot().findLayer(
layer.id()).setItemVisibilityChecked(visible) | Sets a layer in the project visible or not
:param layer: layer to change
:type layer: QgsMapLayer
:param visible: True to show layer, False to hide layer
:type visible: bool | Below is the the instruction that describes the task:
### Input:
Sets a layer in the project visible or not
:param layer: layer to change
:type layer: QgsMapLayer
:param visible: True to show layer, False to hide layer
:type visible: bool
### Response:
def set_layer_visible(layer, visible):
"""Sets a layer in the project visible or not
:param layer: layer to change
:type layer: QgsMapLayer
:param visible: True to show layer, False to hide layer
:type visible: bool
"""
if layer is None:
return
QgsProject.instance().layerTreeRoot().findLayer(
layer.id()).setItemVisibilityChecked(visible) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.