code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _handleBackspace(self):
""" Handles backspace characters """
if self.cursorPos > 0:
#print( 'cp:',self.cursorPos,'was:', self.inputBuffer)
self.inputBuffer = self.inputBuffer[0:self.cursorPos-1] + self.inputBuffer[self.cursorPos:]
self.cursorPos -= 1
#print ('cp:', self.cursorPos,'is:', self.inputBuffer)
self._refreshInputPrompt(len(self.inputBuffer)+1) | Handles backspace characters | Below is the the instruction that describes the task:
### Input:
Handles backspace characters
### Response:
def _handleBackspace(self):
""" Handles backspace characters """
if self.cursorPos > 0:
#print( 'cp:',self.cursorPos,'was:', self.inputBuffer)
self.inputBuffer = self.inputBuffer[0:self.cursorPos-1] + self.inputBuffer[self.cursorPos:]
self.cursorPos -= 1
#print ('cp:', self.cursorPos,'is:', self.inputBuffer)
self._refreshInputPrompt(len(self.inputBuffer)+1) |
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M | Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True | Below is the the instruction that describes the task:
### Input:
Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
### Response:
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M |
def _getse(self):
"""Return data as signed exponential-Golomb code.
Raises InterpretError if bitstring is not a single exponential-Golomb code.
"""
try:
value, newpos = self._readse(0)
if value is None or newpos != self.len:
raise ReadError
except ReadError:
raise InterpretError("Bitstring is not a single exponential-Golomb code.")
return value | Return data as signed exponential-Golomb code.
Raises InterpretError if bitstring is not a single exponential-Golomb code. | Below is the the instruction that describes the task:
### Input:
Return data as signed exponential-Golomb code.
Raises InterpretError if bitstring is not a single exponential-Golomb code.
### Response:
def _getse(self):
"""Return data as signed exponential-Golomb code.
Raises InterpretError if bitstring is not a single exponential-Golomb code.
"""
try:
value, newpos = self._readse(0)
if value is None or newpos != self.len:
raise ReadError
except ReadError:
raise InterpretError("Bitstring is not a single exponential-Golomb code.")
return value |
def build_hypo_depth_dist(hdd):
"""
Returns the hypocentral depth distribution as a Node instance
:param hdd:
Hypocentral depth distribution as an instance of :class:
`openquake.hzardlib.pmf.PMF`
:returns:
Instance of :class:`openquake.baselib.node.Node`
"""
hdds = []
for (prob, depth) in hdd.data:
hdds.append(
Node("hypoDepth", {"depth": depth, "probability": prob}))
return Node("hypoDepthDist", nodes=hdds) | Returns the hypocentral depth distribution as a Node instance
:param hdd:
Hypocentral depth distribution as an instance of :class:
`openquake.hzardlib.pmf.PMF`
:returns:
Instance of :class:`openquake.baselib.node.Node` | Below is the the instruction that describes the task:
### Input:
Returns the hypocentral depth distribution as a Node instance
:param hdd:
Hypocentral depth distribution as an instance of :class:
`openquake.hzardlib.pmf.PMF`
:returns:
Instance of :class:`openquake.baselib.node.Node`
### Response:
def build_hypo_depth_dist(hdd):
"""
Returns the hypocentral depth distribution as a Node instance
:param hdd:
Hypocentral depth distribution as an instance of :class:
`openquake.hzardlib.pmf.PMF`
:returns:
Instance of :class:`openquake.baselib.node.Node`
"""
hdds = []
for (prob, depth) in hdd.data:
hdds.append(
Node("hypoDepth", {"depth": depth, "probability": prob}))
return Node("hypoDepthDist", nodes=hdds) |
def require_ajax_logged_in(func):
"""Check if ajax API is logged in and login if not
"""
@functools.wraps(func)
def inner_func(self, *pargs, **kwargs):
if not self._ajax_api.logged_in:
logger.info('Logging into AJAX API for required meta method')
if not self.has_credentials:
raise ApiLoginFailure(
'Login is required but no credentials were provided')
self._ajax_api.User_Login(name=self._state['username'],
password=self._state['password'])
return func(self, *pargs, **kwargs)
return inner_func | Check if ajax API is logged in and login if not | Below is the the instruction that describes the task:
### Input:
Check if ajax API is logged in and login if not
### Response:
def require_ajax_logged_in(func):
"""Check if ajax API is logged in and login if not
"""
@functools.wraps(func)
def inner_func(self, *pargs, **kwargs):
if not self._ajax_api.logged_in:
logger.info('Logging into AJAX API for required meta method')
if not self.has_credentials:
raise ApiLoginFailure(
'Login is required but no credentials were provided')
self._ajax_api.User_Login(name=self._state['username'],
password=self._state['password'])
return func(self, *pargs, **kwargs)
return inner_func |
def _getActions(self):
"""Retrieve a list of actions supported by the object."""
actions = _a11y.AXUIElement._getActions(self)
# strip leading AX from actions - help distinguish them from attributes
return [action[2:] for action in actions] | Retrieve a list of actions supported by the object. | Below is the the instruction that describes the task:
### Input:
Retrieve a list of actions supported by the object.
### Response:
def _getActions(self):
"""Retrieve a list of actions supported by the object."""
actions = _a11y.AXUIElement._getActions(self)
# strip leading AX from actions - help distinguish them from attributes
return [action[2:] for action in actions] |
def item_id(response):
"""
Parse the item ids, will be available as ``item_0_name``, ``item_1_name``,
``item_2_name`` and so on
"""
dict_keys = ['item_0', 'item_1', 'item_2',
'item_3', 'item_4', 'item_5']
new_keys = ['item_0_name', 'item_1_name', 'item_2_name',
'item_3_name', 'item_4_name', 'item_5_name']
for player in response['players']:
for key, new_key in zip(dict_keys, new_keys):
for item in items['items']:
if item['id'] == player[key]:
player[new_key] = item['localized_name']
return response | Parse the item ids, will be available as ``item_0_name``, ``item_1_name``,
``item_2_name`` and so on | Below is the the instruction that describes the task:
### Input:
Parse the item ids, will be available as ``item_0_name``, ``item_1_name``,
``item_2_name`` and so on
### Response:
def item_id(response):
"""
Parse the item ids, will be available as ``item_0_name``, ``item_1_name``,
``item_2_name`` and so on
"""
dict_keys = ['item_0', 'item_1', 'item_2',
'item_3', 'item_4', 'item_5']
new_keys = ['item_0_name', 'item_1_name', 'item_2_name',
'item_3_name', 'item_4_name', 'item_5_name']
for player in response['players']:
for key, new_key in zip(dict_keys, new_keys):
for item in items['items']:
if item['id'] == player[key]:
player[new_key] = item['localized_name']
return response |
def Call(method,url,payload,silent=False,hide_errors=[],session=None,recursion_cnt=0,debug=False):
"""Execute v1 API call.
:param url: URL paths associated with the API call
:param payload: dict containing all parameters to submit with POST call
:param hide_errors: list of API error codes to ignore. These are not http error codes but returned from the API itself
:param recursion_cnt: recursion counter. This call is recursed if we experience a transient error
:returns: decoded API json result
"""
if not clc._LOGIN_COOKIE_V1: API._Login()
if session is None:
session = clc._REQUESTS_SESSION
session.headers.update({'content-type': 'application/json'})
r = session.request(method,"%s%s/JSON" % (clc.defaults.ENDPOINT_URL_V1,url),
params=payload,
cookies=clc._LOGIN_COOKIE_V1,
verify=API._ResourcePath('clc/cacert.pem'))
if debug:
API._DebugRequest(request=requests.Request(method,"%s%s/JSON" % (clc.defaults.ENDPOINT_URL_V1,url),
data=payload,headers=session.headers).prepare(),response=r)
try:
if int(r.json()['StatusCode']) == 0:
if clc.args and not silent: clc.v1.output.Status('SUCCESS',2,'%s' % (r.json()['Message']))
return(r.json())
elif int(r.json()['StatusCode']) in hide_errors:
return(r.json())
elif int(r.json()['StatusCode']) == 2:
# Account is deleted
#raise clc.v1.Account.eletedException(r.json()['Message'])
if clc.args and not silent: clc.v1.output.Status('ERROR',3,'%s' % (r.json()['Message']))
raise Exception(r.json()['Message'])
elif int(r.json()['StatusCode']) == 5:
# Account or datacenter does not exist
raise clc.v1.AccountDoesNotExistException(r.json()['Message'])
elif int(r.json()['StatusCode']) == 100 and recursion_cnt<2:
# Not logged in - this is a transient failure
clc._LOGIN_COOKIE_V1 = False
return(clc.v1.API.Call(method,url,payload,silent,hide_errors,recursion_cnt+1))
elif int(r.json()['StatusCode']) == 100:
# Not logged in - this keeps recurring - bail
raise clc.v1.AccountLoginException(r.json()['Message'])
else:
if clc.args and (not hide_errors or not silent): clc.v1.output.Status('ERROR',3,'Error calling %s. Status code %s. %s' % (url,r.json()['StatusCode'],r.json()['Message']))
raise Exception('Error calling %s. Status code %s. %s' % (url,r.json()['StatusCode'],r.json()['Message']))
#except clc.v1.Account.eletedException, clc.v1.Account.oginException:
except clc.CLCException:
raise
except:
if clc.args and (not hide_errors or not silent): clc.v1.output.Status('ERROR',3,'Error calling %s. Server response %s' % (url,r.status_code))
#print "Request: %s %s params=%s" % (method,"%s%s/JSON" % (clc.defaults.ENDPOINT_URL_V1,url),payload)
#print "Response: %s" % (r.text)
#print r.url
#print url
#print payload
#print r.text
raise Exception('Error calling %s. Server response %s' % (url,r.status_code)) | Execute v1 API call.
:param url: URL paths associated with the API call
:param payload: dict containing all parameters to submit with POST call
:param hide_errors: list of API error codes to ignore. These are not http error codes but returned from the API itself
:param recursion_cnt: recursion counter. This call is recursed if we experience a transient error
:returns: decoded API json result | Below is the the instruction that describes the task:
### Input:
Execute v1 API call.
:param url: URL paths associated with the API call
:param payload: dict containing all parameters to submit with POST call
:param hide_errors: list of API error codes to ignore. These are not http error codes but returned from the API itself
:param recursion_cnt: recursion counter. This call is recursed if we experience a transient error
:returns: decoded API json result
### Response:
def Call(method,url,payload,silent=False,hide_errors=[],session=None,recursion_cnt=0,debug=False):
"""Execute v1 API call.
:param url: URL paths associated with the API call
:param payload: dict containing all parameters to submit with POST call
:param hide_errors: list of API error codes to ignore. These are not http error codes but returned from the API itself
:param recursion_cnt: recursion counter. This call is recursed if we experience a transient error
:returns: decoded API json result
"""
if not clc._LOGIN_COOKIE_V1: API._Login()
if session is None:
session = clc._REQUESTS_SESSION
session.headers.update({'content-type': 'application/json'})
r = session.request(method,"%s%s/JSON" % (clc.defaults.ENDPOINT_URL_V1,url),
params=payload,
cookies=clc._LOGIN_COOKIE_V1,
verify=API._ResourcePath('clc/cacert.pem'))
if debug:
API._DebugRequest(request=requests.Request(method,"%s%s/JSON" % (clc.defaults.ENDPOINT_URL_V1,url),
data=payload,headers=session.headers).prepare(),response=r)
try:
if int(r.json()['StatusCode']) == 0:
if clc.args and not silent: clc.v1.output.Status('SUCCESS',2,'%s' % (r.json()['Message']))
return(r.json())
elif int(r.json()['StatusCode']) in hide_errors:
return(r.json())
elif int(r.json()['StatusCode']) == 2:
# Account is deleted
#raise clc.v1.Account.eletedException(r.json()['Message'])
if clc.args and not silent: clc.v1.output.Status('ERROR',3,'%s' % (r.json()['Message']))
raise Exception(r.json()['Message'])
elif int(r.json()['StatusCode']) == 5:
# Account or datacenter does not exist
raise clc.v1.AccountDoesNotExistException(r.json()['Message'])
elif int(r.json()['StatusCode']) == 100 and recursion_cnt<2:
# Not logged in - this is a transient failure
clc._LOGIN_COOKIE_V1 = False
return(clc.v1.API.Call(method,url,payload,silent,hide_errors,recursion_cnt+1))
elif int(r.json()['StatusCode']) == 100:
# Not logged in - this keeps recurring - bail
raise clc.v1.AccountLoginException(r.json()['Message'])
else:
if clc.args and (not hide_errors or not silent): clc.v1.output.Status('ERROR',3,'Error calling %s. Status code %s. %s' % (url,r.json()['StatusCode'],r.json()['Message']))
raise Exception('Error calling %s. Status code %s. %s' % (url,r.json()['StatusCode'],r.json()['Message']))
#except clc.v1.Account.eletedException, clc.v1.Account.oginException:
except clc.CLCException:
raise
except:
if clc.args and (not hide_errors or not silent): clc.v1.output.Status('ERROR',3,'Error calling %s. Server response %s' % (url,r.status_code))
#print "Request: %s %s params=%s" % (method,"%s%s/JSON" % (clc.defaults.ENDPOINT_URL_V1,url),payload)
#print "Response: %s" % (r.text)
#print r.url
#print url
#print payload
#print r.text
raise Exception('Error calling %s. Server response %s' % (url,r.status_code)) |
def delete_for_obj(self, entity_model_obj):
"""
Delete the entities associated with a model object.
"""
return self.filter(
entity_type=ContentType.objects.get_for_model(
entity_model_obj, for_concrete_model=False), entity_id=entity_model_obj.id).delete(
force=True) | Delete the entities associated with a model object. | Below is the the instruction that describes the task:
### Input:
Delete the entities associated with a model object.
### Response:
def delete_for_obj(self, entity_model_obj):
"""
Delete the entities associated with a model object.
"""
return self.filter(
entity_type=ContentType.objects.get_for_model(
entity_model_obj, for_concrete_model=False), entity_id=entity_model_obj.id).delete(
force=True) |
def concat(ctx, *strings):
'''
Yields one string, concatenation of argument strings
'''
strings = flatten([ (s.compute(ctx) if callable(s) else s) for s in strings ])
strings = (next(string_arg(ctx, s), '') for s in strings)
#assert(all(map(lambda x: isinstance(x, str), strings)))
#FIXME: Check arg types
yield ''.join(strings) | Yields one string, concatenation of argument strings | Below is the the instruction that describes the task:
### Input:
Yields one string, concatenation of argument strings
### Response:
def concat(ctx, *strings):
'''
Yields one string, concatenation of argument strings
'''
strings = flatten([ (s.compute(ctx) if callable(s) else s) for s in strings ])
strings = (next(string_arg(ctx, s), '') for s in strings)
#assert(all(map(lambda x: isinstance(x, str), strings)))
#FIXME: Check arg types
yield ''.join(strings) |
def unique_element(ll):
""" returns unique elements from a list preserving the original order """
seen = {}
result = []
for item in ll:
if item in seen:
continue
seen[item] = 1
result.append(item)
return result | returns unique elements from a list preserving the original order | Below is the the instruction that describes the task:
### Input:
returns unique elements from a list preserving the original order
### Response:
def unique_element(ll):
""" returns unique elements from a list preserving the original order """
seen = {}
result = []
for item in ll:
if item in seen:
continue
seen[item] = 1
result.append(item)
return result |
def _find_addresses(self, seed, index, count, security_level, checksum):
# type: (Seed, int, Optional[int], int, bool) -> List[Address]
"""
Find addresses matching the command parameters.
"""
generator = AddressGenerator(seed, security_level, checksum)
if count is None:
# Connect to Tangle and find the first address without any
# transactions.
for addy in generator.create_iterator(start=index):
# We use addy.address here because FindTransactions does
# not work on an address with a checksum
response = FindTransactionsCommand(self.adapter)(
addresses=[addy.address],
)
if not response.get('hashes'):
return [addy]
return generator.get_addresses(start=index, count=count) | Find addresses matching the command parameters. | Below is the the instruction that describes the task:
### Input:
Find addresses matching the command parameters.
### Response:
def _find_addresses(self, seed, index, count, security_level, checksum):
# type: (Seed, int, Optional[int], int, bool) -> List[Address]
"""
Find addresses matching the command parameters.
"""
generator = AddressGenerator(seed, security_level, checksum)
if count is None:
# Connect to Tangle and find the first address without any
# transactions.
for addy in generator.create_iterator(start=index):
# We use addy.address here because FindTransactions does
# not work on an address with a checksum
response = FindTransactionsCommand(self.adapter)(
addresses=[addy.address],
)
if not response.get('hashes'):
return [addy]
return generator.get_addresses(start=index, count=count) |
def finished_or_stopped(self):
""" Condition check on finished or stopped status
The method returns a value which is equivalent with not 'active' status of the current state machine.
:return: outcome of condition check stopped or finished
:rtype: bool
"""
return (self._status.execution_mode is StateMachineExecutionStatus.STOPPED) or \
(self._status.execution_mode is StateMachineExecutionStatus.FINISHED) | Condition check on finished or stopped status
The method returns a value which is equivalent with not 'active' status of the current state machine.
:return: outcome of condition check stopped or finished
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Condition check on finished or stopped status
The method returns a value which is equivalent with not 'active' status of the current state machine.
:return: outcome of condition check stopped or finished
:rtype: bool
### Response:
def finished_or_stopped(self):
""" Condition check on finished or stopped status
The method returns a value which is equivalent with not 'active' status of the current state machine.
:return: outcome of condition check stopped or finished
:rtype: bool
"""
return (self._status.execution_mode is StateMachineExecutionStatus.STOPPED) or \
(self._status.execution_mode is StateMachineExecutionStatus.FINISHED) |
def create_task(function, *args, **kwargs):
"""
Create a task object
name: The name of the task.
function: The actual task function. It should take no arguments,
and return a False-y value if it fails.
dependencies: (optional, ()) Any dependencies that this task relies
on.
"""
name = "{}".format(uuid4())
handler = None
deps = set()
if 'name' in kwargs:
name = kwargs['name']
del kwargs['name']
elif function is not None:
name = "{}-{}".format(function.__name__, name)
if 'handler' in kwargs:
handler = kwargs['handler']
del kwargs['handler']
if 'dependencies' in kwargs:
for dep in kwargs['dependencies']:
deps.add(dep)
del kwargs['dependencies']
for arg in args:
if isinstance(arg, Task):
deps.add(arg.name)
for key in kwargs:
if isinstance(kwargs[key], Task):
deps.add(kwargs[key].name)
return Task(name, function, handler, frozenset(deps), args, kwargs) | Create a task object
name: The name of the task.
function: The actual task function. It should take no arguments,
and return a False-y value if it fails.
dependencies: (optional, ()) Any dependencies that this task relies
on. | Below is the the instruction that describes the task:
### Input:
Create a task object
name: The name of the task.
function: The actual task function. It should take no arguments,
and return a False-y value if it fails.
dependencies: (optional, ()) Any dependencies that this task relies
on.
### Response:
def create_task(function, *args, **kwargs):
"""
Create a task object
name: The name of the task.
function: The actual task function. It should take no arguments,
and return a False-y value if it fails.
dependencies: (optional, ()) Any dependencies that this task relies
on.
"""
name = "{}".format(uuid4())
handler = None
deps = set()
if 'name' in kwargs:
name = kwargs['name']
del kwargs['name']
elif function is not None:
name = "{}-{}".format(function.__name__, name)
if 'handler' in kwargs:
handler = kwargs['handler']
del kwargs['handler']
if 'dependencies' in kwargs:
for dep in kwargs['dependencies']:
deps.add(dep)
del kwargs['dependencies']
for arg in args:
if isinstance(arg, Task):
deps.add(arg.name)
for key in kwargs:
if isinstance(kwargs[key], Task):
deps.add(kwargs[key].name)
return Task(name, function, handler, frozenset(deps), args, kwargs) |
def build_parser():
"""
Returns an argparse.ArgumentParser instance to parse the command line
arguments for lk
"""
import argparse
description = "A programmer's search tool, parallel and fast"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('pattern', metavar='PATTERN', action='store',
help='a python re regular expression')
parser.add_argument('--ignore-case', '-i', dest='ignorecase', action='store_true',
default=False, help='ignore case when searching')
parser.add_argument('--no-unicode', '-u', dest='unicode', action='store_false',
default=True, help='unicode-unfriendly searching')
parser.add_argument('--no-multiline', '-l', dest='multiline',
action='store_false', default=True,
help='don\'t search over multiple lines')
parser.add_argument('--dot-all', '-a', dest='dot_all',
action='store_true', default=False,
help='dot in PATTERN matches newline')
parser.add_argument('--escape', '-e', dest='escape',
action='store_true', default=False,
help='treat PATTERN as a string instead of a regex')
if sys.version_info >= (2, 6):
parser.add_argument('--follow-links', '-s', dest='follow_links',
action='store_true', default=False,
help='follow symlinks (Python >= 2.6 only)')
parser.add_argument('--hidden', '-n', dest='search_hidden',
action='store_true', default=False,
help='search hidden files and directories')
parser.add_argument('--binary', '-b', dest='search_binary',
action='store_true', default=False,
help='search binary files')
parser.add_argument('--no-colors', '-c', dest='use_ansi_colors',
action='store_false', default=True,
help="don't print ANSI colors")
parser.add_argument('--stats', '-t', dest='print_stats',
action='store_true', default=False,
help='print statistics')
parser.add_argument('--num-processes', '-p', dest='number_processes',
action='store', default=10, type=int,
help='number of child processes to concurrently search with')
parser.add_argument('--exclude', '-x', metavar='PATH_PATTERN', dest='exclude_path_patterns',
action='append', default=[], type=str,
help='exclude paths matching PATH_PATTERN')
parser.add_argument('--open-with', '-o', metavar='COMMAND',
dest='command_strings', action='append', default=[],
type=str,
help='run each COMMAND where COMMAND is a string with a placeholder, %%s, for the absolute path of the matched file')
parser.add_argument('directory', metavar='DIRECTORY', nargs='?',
default=getcwd(), help='a directory to search in (default cwd)')
return parser | Returns an argparse.ArgumentParser instance to parse the command line
arguments for lk | Below is the the instruction that describes the task:
### Input:
Returns an argparse.ArgumentParser instance to parse the command line
arguments for lk
### Response:
def build_parser():
"""
Returns an argparse.ArgumentParser instance to parse the command line
arguments for lk
"""
import argparse
description = "A programmer's search tool, parallel and fast"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('pattern', metavar='PATTERN', action='store',
help='a python re regular expression')
parser.add_argument('--ignore-case', '-i', dest='ignorecase', action='store_true',
default=False, help='ignore case when searching')
parser.add_argument('--no-unicode', '-u', dest='unicode', action='store_false',
default=True, help='unicode-unfriendly searching')
parser.add_argument('--no-multiline', '-l', dest='multiline',
action='store_false', default=True,
help='don\'t search over multiple lines')
parser.add_argument('--dot-all', '-a', dest='dot_all',
action='store_true', default=False,
help='dot in PATTERN matches newline')
parser.add_argument('--escape', '-e', dest='escape',
action='store_true', default=False,
help='treat PATTERN as a string instead of a regex')
if sys.version_info >= (2, 6):
parser.add_argument('--follow-links', '-s', dest='follow_links',
action='store_true', default=False,
help='follow symlinks (Python >= 2.6 only)')
parser.add_argument('--hidden', '-n', dest='search_hidden',
action='store_true', default=False,
help='search hidden files and directories')
parser.add_argument('--binary', '-b', dest='search_binary',
action='store_true', default=False,
help='search binary files')
parser.add_argument('--no-colors', '-c', dest='use_ansi_colors',
action='store_false', default=True,
help="don't print ANSI colors")
parser.add_argument('--stats', '-t', dest='print_stats',
action='store_true', default=False,
help='print statistics')
parser.add_argument('--num-processes', '-p', dest='number_processes',
action='store', default=10, type=int,
help='number of child processes to concurrently search with')
parser.add_argument('--exclude', '-x', metavar='PATH_PATTERN', dest='exclude_path_patterns',
action='append', default=[], type=str,
help='exclude paths matching PATH_PATTERN')
parser.add_argument('--open-with', '-o', metavar='COMMAND',
dest='command_strings', action='append', default=[],
type=str,
help='run each COMMAND where COMMAND is a string with a placeholder, %%s, for the absolute path of the matched file')
parser.add_argument('directory', metavar='DIRECTORY', nargs='?',
default=getcwd(), help='a directory to search in (default cwd)')
return parser |
def process_pattern(fn):
"""Return a list of paths matching a pattern (or None on error).
"""
directory, pattern = validate_pattern(fn)
if directory is not None:
filenames = fnmatch.filter(auto(listdir, directory), pattern)
if filenames:
return [directory + '/' + sfn for sfn in filenames]
else:
print_err("cannot access '{}': No such file or directory".format(fn)) | Return a list of paths matching a pattern (or None on error). | Below is the the instruction that describes the task:
### Input:
Return a list of paths matching a pattern (or None on error).
### Response:
def process_pattern(fn):
"""Return a list of paths matching a pattern (or None on error).
"""
directory, pattern = validate_pattern(fn)
if directory is not None:
filenames = fnmatch.filter(auto(listdir, directory), pattern)
if filenames:
return [directory + '/' + sfn for sfn in filenames]
else:
print_err("cannot access '{}': No such file or directory".format(fn)) |
def get(self, url=None, delimiter="/"):
"""Path is an s3 url. Ommiting the path or providing "s3://" as the
path will return a list of all buckets. Otherwise, all subdirectories
and their contents will be shown.
"""
params = {'Delimiter': delimiter}
bucket, obj_key = _parse_url(url)
if bucket:
params['Bucket'] = bucket
else:
return self.call("ListBuckets", response_data_key="Buckets")
if obj_key:
params['Prefix'] = obj_key
objects = self.call("ListObjects", response_data_key="Contents",
**params)
if objects:
for obj in objects:
obj['url'] = "s3://{0}/{1}".format(bucket, obj['Key'])
return objects | Path is an s3 url. Ommiting the path or providing "s3://" as the
path will return a list of all buckets. Otherwise, all subdirectories
and their contents will be shown. | Below is the the instruction that describes the task:
### Input:
Path is an s3 url. Ommiting the path or providing "s3://" as the
path will return a list of all buckets. Otherwise, all subdirectories
and their contents will be shown.
### Response:
def get(self, url=None, delimiter="/"):
"""Path is an s3 url. Ommiting the path or providing "s3://" as the
path will return a list of all buckets. Otherwise, all subdirectories
and their contents will be shown.
"""
params = {'Delimiter': delimiter}
bucket, obj_key = _parse_url(url)
if bucket:
params['Bucket'] = bucket
else:
return self.call("ListBuckets", response_data_key="Buckets")
if obj_key:
params['Prefix'] = obj_key
objects = self.call("ListObjects", response_data_key="Contents",
**params)
if objects:
for obj in objects:
obj['url'] = "s3://{0}/{1}".format(bucket, obj['Key'])
return objects |
def cmd_output(self, args):
'''handle output commands'''
if len(args) < 1 or args[0] == "list":
self.cmd_output_list()
elif args[0] == "add":
if len(args) != 2:
print("Usage: output add OUTPUT")
return
self.cmd_output_add(args[1:])
elif args[0] == "remove":
if len(args) != 2:
print("Usage: output remove OUTPUT")
return
self.cmd_output_remove(args[1:])
elif args[0] == "sysid":
if len(args) != 3:
print("Usage: output sysid SYSID OUTPUT")
return
self.cmd_output_sysid(args[1:])
else:
print("usage: output <list|add|remove|sysid>") | handle output commands | Below is the the instruction that describes the task:
### Input:
handle output commands
### Response:
def cmd_output(self, args):
'''handle output commands'''
if len(args) < 1 or args[0] == "list":
self.cmd_output_list()
elif args[0] == "add":
if len(args) != 2:
print("Usage: output add OUTPUT")
return
self.cmd_output_add(args[1:])
elif args[0] == "remove":
if len(args) != 2:
print("Usage: output remove OUTPUT")
return
self.cmd_output_remove(args[1:])
elif args[0] == "sysid":
if len(args) != 3:
print("Usage: output sysid SYSID OUTPUT")
return
self.cmd_output_sysid(args[1:])
else:
print("usage: output <list|add|remove|sysid>") |
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line | Edit a single line using the code expression. | Below is the the instruction that describes the task:
### Input:
Edit a single line using the code expression.
### Response:
def edit_line(self, line):
"""Edit a single line using the code expression."""
for code, code_obj in self.code_objs.items():
line = self.__edit_line(line, code, code_obj)
return line |
def transaction_manager(fn):
"""
Decorator which wraps whole function into ``with transaction.manager:``.
"""
@wraps(fn)
def transaction_manager_decorator(*args, **kwargs):
with transaction.manager:
return fn(*args, **kwargs)
return transaction_manager_decorator | Decorator which wraps whole function into ``with transaction.manager:``. | Below is the the instruction that describes the task:
### Input:
Decorator which wraps whole function into ``with transaction.manager:``.
### Response:
def transaction_manager(fn):
"""
Decorator which wraps whole function into ``with transaction.manager:``.
"""
@wraps(fn)
def transaction_manager_decorator(*args, **kwargs):
with transaction.manager:
return fn(*args, **kwargs)
return transaction_manager_decorator |
def url_replace(context, field, value):
"""
To avoid GET params losing
:param context: context_obj
:param field: str
:param value: str
:return: dict-like object
"""
query_string = context['request'].GET.copy()
query_string[field] = value
return query_string.urlencode() | To avoid GET params losing
:param context: context_obj
:param field: str
:param value: str
:return: dict-like object | Below is the the instruction that describes the task:
### Input:
To avoid GET params losing
:param context: context_obj
:param field: str
:param value: str
:return: dict-like object
### Response:
def url_replace(context, field, value):
"""
To avoid GET params losing
:param context: context_obj
:param field: str
:param value: str
:return: dict-like object
"""
query_string = context['request'].GET.copy()
query_string[field] = value
return query_string.urlencode() |
def _create_capture_exceptions(capture_decider: Callable[[BaseException], bool]) -> Callable:
"""
Creates exception capturer using given capture decider.
:param capture_decider: given the exception as the first arguments, decides whether it should be captured or
not (and hence raised)
:return:
"""
def wrapped(*args, **kwargs):
result = CaptureWrapBuilder._capture_exceptions(*args, **kwargs)
if result.exception is not None:
if not capture_decider(result.exception):
raise result.exception
return result
return wrapped | Creates exception capturer using given capture decider.
:param capture_decider: given the exception as the first arguments, decides whether it should be captured or
not (and hence raised)
:return: | Below is the the instruction that describes the task:
### Input:
Creates exception capturer using given capture decider.
:param capture_decider: given the exception as the first arguments, decides whether it should be captured or
not (and hence raised)
:return:
### Response:
def _create_capture_exceptions(capture_decider: Callable[[BaseException], bool]) -> Callable:
"""
Creates exception capturer using given capture decider.
:param capture_decider: given the exception as the first arguments, decides whether it should be captured or
not (and hence raised)
:return:
"""
def wrapped(*args, **kwargs):
result = CaptureWrapBuilder._capture_exceptions(*args, **kwargs)
if result.exception is not None:
if not capture_decider(result.exception):
raise result.exception
return result
return wrapped |
def _vector_size(v):
"""
Returns the size of the vector.
>>> _vector_size([1., 2., 3.])
3
>>> _vector_size((1., 2., 3.))
3
>>> _vector_size(array.array('d', [1., 2., 3.]))
3
>>> _vector_size(np.zeros(3))
3
>>> _vector_size(np.zeros((3, 1)))
3
>>> _vector_size(np.zeros((1, 3)))
Traceback (most recent call last):
...
ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
"""
if isinstance(v, Vector):
return len(v)
elif type(v) in (array.array, list, tuple, xrange):
return len(v)
elif type(v) == np.ndarray:
if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1):
return len(v)
else:
raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape))
elif _have_scipy and scipy.sparse.issparse(v):
assert v.shape[1] == 1, "Expected column vector"
return v.shape[0]
else:
raise TypeError("Cannot treat type %s as a vector" % type(v)) | Returns the size of the vector.
>>> _vector_size([1., 2., 3.])
3
>>> _vector_size((1., 2., 3.))
3
>>> _vector_size(array.array('d', [1., 2., 3.]))
3
>>> _vector_size(np.zeros(3))
3
>>> _vector_size(np.zeros((3, 1)))
3
>>> _vector_size(np.zeros((1, 3)))
Traceback (most recent call last):
...
ValueError: Cannot treat an ndarray of shape (1, 3) as a vector | Below is the the instruction that describes the task:
### Input:
Returns the size of the vector.
>>> _vector_size([1., 2., 3.])
3
>>> _vector_size((1., 2., 3.))
3
>>> _vector_size(array.array('d', [1., 2., 3.]))
3
>>> _vector_size(np.zeros(3))
3
>>> _vector_size(np.zeros((3, 1)))
3
>>> _vector_size(np.zeros((1, 3)))
Traceback (most recent call last):
...
ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
### Response:
def _vector_size(v):
"""
Returns the size of the vector.
>>> _vector_size([1., 2., 3.])
3
>>> _vector_size((1., 2., 3.))
3
>>> _vector_size(array.array('d', [1., 2., 3.]))
3
>>> _vector_size(np.zeros(3))
3
>>> _vector_size(np.zeros((3, 1)))
3
>>> _vector_size(np.zeros((1, 3)))
Traceback (most recent call last):
...
ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
"""
if isinstance(v, Vector):
return len(v)
elif type(v) in (array.array, list, tuple, xrange):
return len(v)
elif type(v) == np.ndarray:
if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1):
return len(v)
else:
raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape))
elif _have_scipy and scipy.sparse.issparse(v):
assert v.shape[1] == 1, "Expected column vector"
return v.shape[0]
else:
raise TypeError("Cannot treat type %s as a vector" % type(v)) |
def remove(self, path, recursive=False, use_sudo=False):
"""
Remove a file or directory
"""
func = use_sudo and run_as_root or self.run
options = '-r ' if recursive else ''
func('/bin/rm {0}{1}'.format(options, quote(path))) | Remove a file or directory | Below is the the instruction that describes the task:
### Input:
Remove a file or directory
### Response:
def remove(self, path, recursive=False, use_sudo=False):
"""
Remove a file or directory
"""
func = use_sudo and run_as_root or self.run
options = '-r ' if recursive else ''
func('/bin/rm {0}{1}'.format(options, quote(path))) |
def cache():
"""Returns a 304 if an If-Modified-Since header or If-None-Match is present. Returns the same as a GET otherwise.
---
tags:
- Response inspection
parameters:
- in: header
name: If-Modified-Since
- in: header
name: If-None-Match
produces:
- application/json
responses:
200:
description: Cached response
304:
description: Modified
"""
is_conditional = request.headers.get("If-Modified-Since") or request.headers.get(
"If-None-Match"
)
if is_conditional is None:
response = view_get()
response.headers["Last-Modified"] = http_date()
response.headers["ETag"] = uuid.uuid4().hex
return response
else:
return status_code(304) | Returns a 304 if an If-Modified-Since header or If-None-Match is present. Returns the same as a GET otherwise.
---
tags:
- Response inspection
parameters:
- in: header
name: If-Modified-Since
- in: header
name: If-None-Match
produces:
- application/json
responses:
200:
description: Cached response
304:
description: Modified | Below is the the instruction that describes the task:
### Input:
Returns a 304 if an If-Modified-Since header or If-None-Match is present. Returns the same as a GET otherwise.
---
tags:
- Response inspection
parameters:
- in: header
name: If-Modified-Since
- in: header
name: If-None-Match
produces:
- application/json
responses:
200:
description: Cached response
304:
description: Modified
### Response:
def cache():
"""Returns a 304 if an If-Modified-Since header or If-None-Match is present. Returns the same as a GET otherwise.
---
tags:
- Response inspection
parameters:
- in: header
name: If-Modified-Since
- in: header
name: If-None-Match
produces:
- application/json
responses:
200:
description: Cached response
304:
description: Modified
"""
is_conditional = request.headers.get("If-Modified-Since") or request.headers.get(
"If-None-Match"
)
if is_conditional is None:
response = view_get()
response.headers["Last-Modified"] = http_date()
response.headers["ETag"] = uuid.uuid4().hex
return response
else:
return status_code(304) |
def function(name, range=False, sync=False, allow_nested=False, eval=None):
"""Tag a function or plugin method as a Nvim function handler."""
def dec(f):
f._nvim_rpc_method_name = 'function:{}'.format(name)
f._nvim_rpc_sync = sync
f._nvim_bind = True
f._nvim_prefix_plugin_path = True
opts = {}
if range:
opts['range'] = '' if range is True else str(range)
if eval:
opts['eval'] = eval
if not sync and allow_nested:
rpc_sync = "urgent"
else:
rpc_sync = sync
f._nvim_rpc_spec = {
'type': 'function',
'name': name,
'sync': rpc_sync,
'opts': opts
}
return f
return dec | Tag a function or plugin method as a Nvim function handler. | Below is the the instruction that describes the task:
### Input:
Tag a function or plugin method as a Nvim function handler.
### Response:
def function(name, range=False, sync=False, allow_nested=False, eval=None):
"""Tag a function or plugin method as a Nvim function handler."""
def dec(f):
f._nvim_rpc_method_name = 'function:{}'.format(name)
f._nvim_rpc_sync = sync
f._nvim_bind = True
f._nvim_prefix_plugin_path = True
opts = {}
if range:
opts['range'] = '' if range is True else str(range)
if eval:
opts['eval'] = eval
if not sync and allow_nested:
rpc_sync = "urgent"
else:
rpc_sync = sync
f._nvim_rpc_spec = {
'type': 'function',
'name': name,
'sync': rpc_sync,
'opts': opts
}
return f
return dec |
def polfit_residuals_with_cook_rejection(
x, y, deg, times_sigma_cook,
color='b', size=75,
xlim=None, ylim=None,
xlabel=None, ylabel=None, title=None,
use_r=None,
geometry=(0,0,640,480),
debugplot=0):
"""Polynomial fit with iterative rejection of points.
This function makes use of function polfit_residuals for display
purposes.
Parameters
----------
x : 1d numpy array, float
X coordinates of the data being fitted.
y : 1d numpy array, float
Y coordinates of the data being fitted.
deg : int
Degree of the fitting polynomial.
times_sigma_cook : float or None
Number of times the standard deviation of Cook's distances
above the median value to reject points iteratively.
color : single character or 1d numpy array of characters
Color for all the symbols (single character) or for each
individual symbol (array of color names with the same length as
'x' or 'y'). If 'color' is a single character, the rejected
points are displayed in red color, whereas when 'color' is an
array of color names, rejected points are displayed with the
color provided in this array.
size : int
Marker size for all the symbols (single character) or for each
individual symbol (array of integers with the same length as
'x' or 'y').
xlim : tuple (floats)
Plot limits in the X axis.
ylim : tuple (floats)
Plot limits in the Y axis.
xlabel : string
Character string for label in X axis.
ylabel : string
Character string for label in y axis.
title : string
Character string for graph title.
use_r : bool
If True, the function computes several fits, using R, to
polynomials of degree deg, deg+1 and deg+2 (when possible).
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the window geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Return
------
poly : instance of Polynomial (numpy)
Result from the polynomial fit using numpy Polynomial. Only
points not flagged as rejected are employed in the fit.
yres : 1d numpy array, float
Residuals from polynomial fit. Note that the residuals are
computed for all the points, including the rejected ones. In
this way the dimension of this array is the same as the
dimensions of the input 'x' and 'y' arrays.
reject : 1d numpy array, bool
Boolean array indicating rejected points.
"""
# protections
if type(x) is not np.ndarray:
raise ValueError("x=" + str(x) + " must be a numpy.ndarray")
elif x.ndim != 1:
raise ValueError("x.ndim=" + str(x.ndim) + " must be 1")
if type(y) is not np.ndarray:
raise ValueError("y=" + str(y) + " must be a numpy.ndarray")
elif y.ndim != 1:
raise ValueError("y.ndim=" + str(y.ndim) + " must be 1")
npoints = x.size
if npoints != y.size:
raise ValueError("x.size != y.size")
if type(deg) not in [np.int, np.int64]:
raise ValueError("deg=" + str(deg) +
" is not a valid integer")
if deg >= npoints:
raise ValueError("Polynomial degree=" + str(deg) +
" can't be fitted with npoints=" + str(npoints))
# initialize boolean rejection array
reject = np.zeros(npoints, dtype=np.bool)
# if there is no room to remove two points, compute a fit without
# rejection
if deg == npoints - 1 or deg == npoints - 2:
poly, yres = polfit_residuals(x=x, y=y, deg=deg, reject=None,
color=color, size=size,
xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel,
title=title,
use_r=use_r,
geometry=geometry,
debugplot=debugplot)
return poly, yres, reject
# main loop to reject points iteratively
loop_to_reject_points = True
poly = None
yres = None
any_point_removed = False
while loop_to_reject_points:
# fit to compute residual variance (neglecting already
# rejected points)
poly, yres = polfit_residuals(x=x, y=y, deg=deg, reject=reject,
color=color, size=size,
xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel,
title=title,
use_r=use_r,
geometry=geometry,
debugplot=debugplot)
npoints_effective = npoints - np.sum(reject)
residual_variance = np.sum(yres*yres)/float(npoints_effective-deg-1)
# check that there is room to remove two points with the
# current polynomial degree
if deg <= npoints_effective - 2:
cook_distance = np.zeros(npoints)
for i in range(npoints):
if not reject[i]:
reject_cook = np.copy(reject)
reject_cook[i] = True
poly_cook, yres_cook = polfit_residuals(
x=x, y=y, deg=deg, reject=reject_cook,
color=color, size=size,
xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel,
title="Computing Cook's distance for point " +
str(i+1),
use_r=False,
debugplot=0)
yres_cook_fitted = yres_cook[np.logical_not(reject)]
cook_distance[i] = \
np.sum(yres_cook_fitted*yres_cook_fitted) / \
(2*residual_variance)
else:
cook_distance[i] = np.inf
if abs(debugplot) >= 10:
print('i, cook_distance[i]:', i, cook_distance[i])
# determine median absolute cook distance, excluding points
# already rejected
dist_cook_fitted = np.abs(cook_distance[np.logical_not(reject)])
q50 = np.median(dist_cook_fitted)
# rms computed from the previous data after removing the
# point with the largest deviation
rms = np.std(np.sort(dist_cook_fitted)[:-2])
if abs(debugplot) >= 10:
print("--> median.......:", q50)
print("--> rms -2 points:", rms)
# reject fitted point exceeding the threshold with the
# largest Cook distance (note: with this method only one
# point is removed in each iteration of the loop). If the
# polynomial degree is larger than 1, only intermediate
# points can be discarded (i.e., the first and last point
# are never rejected because the curvature of the
# extrapolated polynomials leads to false outliers)
index_to_remove = []
if deg > 1:
n1 = 1
n2 = npoints - 1
else:
n1 = 0
n2 = npoints
for i in range(n1, n2):
if not reject[i]:
if np.abs(cook_distance[i]-q50) > times_sigma_cook * rms:
index_to_remove.append(i)
if abs(debugplot) >= 10:
print('--> suspicious point #', i + 1)
if len(index_to_remove) == 0:
if abs(debugplot) >= 10:
if any_point_removed:
print('==> no need to remove any additional point')
else:
print('==> no need to remove any point')
loop_to_reject_points = False
else:
imax = np.argmax(np.abs(cook_distance[index_to_remove]))
reject[index_to_remove[imax]] = True
any_point_removed = True
if abs(debugplot) >= 10:
print('==> removing point #', index_to_remove[imax] + 1)
else:
loop_to_reject_points = False
# return result
return poly, yres, reject | Polynomial fit with iterative rejection of points.
This function makes use of function polfit_residuals for display
purposes.
Parameters
----------
x : 1d numpy array, float
X coordinates of the data being fitted.
y : 1d numpy array, float
Y coordinates of the data being fitted.
deg : int
Degree of the fitting polynomial.
times_sigma_cook : float or None
Number of times the standard deviation of Cook's distances
above the median value to reject points iteratively.
color : single character or 1d numpy array of characters
Color for all the symbols (single character) or for each
individual symbol (array of color names with the same length as
'x' or 'y'). If 'color' is a single character, the rejected
points are displayed in red color, whereas when 'color' is an
array of color names, rejected points are displayed with the
color provided in this array.
size : int
Marker size for all the symbols (single character) or for each
individual symbol (array of integers with the same length as
'x' or 'y').
xlim : tuple (floats)
Plot limits in the X axis.
ylim : tuple (floats)
Plot limits in the Y axis.
xlabel : string
Character string for label in X axis.
ylabel : string
Character string for label in y axis.
title : string
Character string for graph title.
use_r : bool
If True, the function computes several fits, using R, to
polynomials of degree deg, deg+1 and deg+2 (when possible).
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the window geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Return
------
poly : instance of Polynomial (numpy)
Result from the polynomial fit using numpy Polynomial. Only
points not flagged as rejected are employed in the fit.
yres : 1d numpy array, float
Residuals from polynomial fit. Note that the residuals are
computed for all the points, including the rejected ones. In
this way the dimension of this array is the same as the
dimensions of the input 'x' and 'y' arrays.
reject : 1d numpy array, bool
Boolean array indicating rejected points. | Below is the the instruction that describes the task:
### Input:
Polynomial fit with iterative rejection of points.
This function makes use of function polfit_residuals for display
purposes.
Parameters
----------
x : 1d numpy array, float
X coordinates of the data being fitted.
y : 1d numpy array, float
Y coordinates of the data being fitted.
deg : int
Degree of the fitting polynomial.
times_sigma_cook : float or None
Number of times the standard deviation of Cook's distances
above the median value to reject points iteratively.
color : single character or 1d numpy array of characters
Color for all the symbols (single character) or for each
individual symbol (array of color names with the same length as
'x' or 'y'). If 'color' is a single character, the rejected
points are displayed in red color, whereas when 'color' is an
array of color names, rejected points are displayed with the
color provided in this array.
size : int
Marker size for all the symbols (single character) or for each
individual symbol (array of integers with the same length as
'x' or 'y').
xlim : tuple (floats)
Plot limits in the X axis.
ylim : tuple (floats)
Plot limits in the Y axis.
xlabel : string
Character string for label in X axis.
ylabel : string
Character string for label in y axis.
title : string
Character string for graph title.
use_r : bool
If True, the function computes several fits, using R, to
polynomials of degree deg, deg+1 and deg+2 (when possible).
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the window geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Return
------
poly : instance of Polynomial (numpy)
Result from the polynomial fit using numpy Polynomial. Only
points not flagged as rejected are employed in the fit.
yres : 1d numpy array, float
Residuals from polynomial fit. Note that the residuals are
computed for all the points, including the rejected ones. In
this way the dimension of this array is the same as the
dimensions of the input 'x' and 'y' arrays.
reject : 1d numpy array, bool
Boolean array indicating rejected points.
### Response:
def polfit_residuals_with_cook_rejection(
x, y, deg, times_sigma_cook,
color='b', size=75,
xlim=None, ylim=None,
xlabel=None, ylabel=None, title=None,
use_r=None,
geometry=(0,0,640,480),
debugplot=0):
"""Polynomial fit with iterative rejection of points.
This function makes use of function polfit_residuals for display
purposes.
Parameters
----------
x : 1d numpy array, float
X coordinates of the data being fitted.
y : 1d numpy array, float
Y coordinates of the data being fitted.
deg : int
Degree of the fitting polynomial.
times_sigma_cook : float or None
Number of times the standard deviation of Cook's distances
above the median value to reject points iteratively.
color : single character or 1d numpy array of characters
Color for all the symbols (single character) or for each
individual symbol (array of color names with the same length as
'x' or 'y'). If 'color' is a single character, the rejected
points are displayed in red color, whereas when 'color' is an
array of color names, rejected points are displayed with the
color provided in this array.
size : int
Marker size for all the symbols (single character) or for each
individual symbol (array of integers with the same length as
'x' or 'y').
xlim : tuple (floats)
Plot limits in the X axis.
ylim : tuple (floats)
Plot limits in the Y axis.
xlabel : string
Character string for label in X axis.
ylabel : string
Character string for label in y axis.
title : string
Character string for graph title.
use_r : bool
If True, the function computes several fits, using R, to
polynomials of degree deg, deg+1 and deg+2 (when possible).
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the window geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Return
------
poly : instance of Polynomial (numpy)
Result from the polynomial fit using numpy Polynomial. Only
points not flagged as rejected are employed in the fit.
yres : 1d numpy array, float
Residuals from polynomial fit. Note that the residuals are
computed for all the points, including the rejected ones. In
this way the dimension of this array is the same as the
dimensions of the input 'x' and 'y' arrays.
reject : 1d numpy array, bool
Boolean array indicating rejected points.
"""
# protections
if type(x) is not np.ndarray:
raise ValueError("x=" + str(x) + " must be a numpy.ndarray")
elif x.ndim != 1:
raise ValueError("x.ndim=" + str(x.ndim) + " must be 1")
if type(y) is not np.ndarray:
raise ValueError("y=" + str(y) + " must be a numpy.ndarray")
elif y.ndim != 1:
raise ValueError("y.ndim=" + str(y.ndim) + " must be 1")
npoints = x.size
if npoints != y.size:
raise ValueError("x.size != y.size")
if type(deg) not in [np.int, np.int64]:
raise ValueError("deg=" + str(deg) +
" is not a valid integer")
if deg >= npoints:
raise ValueError("Polynomial degree=" + str(deg) +
" can't be fitted with npoints=" + str(npoints))
# initialize boolean rejection array
reject = np.zeros(npoints, dtype=np.bool)
# if there is no room to remove two points, compute a fit without
# rejection
if deg == npoints - 1 or deg == npoints - 2:
poly, yres = polfit_residuals(x=x, y=y, deg=deg, reject=None,
color=color, size=size,
xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel,
title=title,
use_r=use_r,
geometry=geometry,
debugplot=debugplot)
return poly, yres, reject
# main loop to reject points iteratively
loop_to_reject_points = True
poly = None
yres = None
any_point_removed = False
while loop_to_reject_points:
# fit to compute residual variance (neglecting already
# rejected points)
poly, yres = polfit_residuals(x=x, y=y, deg=deg, reject=reject,
color=color, size=size,
xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel,
title=title,
use_r=use_r,
geometry=geometry,
debugplot=debugplot)
npoints_effective = npoints - np.sum(reject)
residual_variance = np.sum(yres*yres)/float(npoints_effective-deg-1)
# check that there is room to remove two points with the
# current polynomial degree
if deg <= npoints_effective - 2:
cook_distance = np.zeros(npoints)
for i in range(npoints):
if not reject[i]:
reject_cook = np.copy(reject)
reject_cook[i] = True
poly_cook, yres_cook = polfit_residuals(
x=x, y=y, deg=deg, reject=reject_cook,
color=color, size=size,
xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel,
title="Computing Cook's distance for point " +
str(i+1),
use_r=False,
debugplot=0)
yres_cook_fitted = yres_cook[np.logical_not(reject)]
cook_distance[i] = \
np.sum(yres_cook_fitted*yres_cook_fitted) / \
(2*residual_variance)
else:
cook_distance[i] = np.inf
if abs(debugplot) >= 10:
print('i, cook_distance[i]:', i, cook_distance[i])
# determine median absolute cook distance, excluding points
# already rejected
dist_cook_fitted = np.abs(cook_distance[np.logical_not(reject)])
q50 = np.median(dist_cook_fitted)
# rms computed from the previous data after removing the
# point with the largest deviation
rms = np.std(np.sort(dist_cook_fitted)[:-2])
if abs(debugplot) >= 10:
print("--> median.......:", q50)
print("--> rms -2 points:", rms)
# reject fitted point exceeding the threshold with the
# largest Cook distance (note: with this method only one
# point is removed in each iteration of the loop). If the
# polynomial degree is larger than 1, only intermediate
# points can be discarded (i.e., the first and last point
# are never rejected because the curvature of the
# extrapolated polynomials leads to false outliers)
index_to_remove = []
if deg > 1:
n1 = 1
n2 = npoints - 1
else:
n1 = 0
n2 = npoints
for i in range(n1, n2):
if not reject[i]:
if np.abs(cook_distance[i]-q50) > times_sigma_cook * rms:
index_to_remove.append(i)
if abs(debugplot) >= 10:
print('--> suspicious point #', i + 1)
if len(index_to_remove) == 0:
if abs(debugplot) >= 10:
if any_point_removed:
print('==> no need to remove any additional point')
else:
print('==> no need to remove any point')
loop_to_reject_points = False
else:
imax = np.argmax(np.abs(cook_distance[index_to_remove]))
reject[index_to_remove[imax]] = True
any_point_removed = True
if abs(debugplot) >= 10:
print('==> removing point #', index_to_remove[imax] + 1)
else:
loop_to_reject_points = False
# return result
return poly, yres, reject |
def Skip(self: dict, n):
"""
[
{
'self': [1, 2, 3, 4, 5],
'n': 3,
'assert': lambda ret: list(ret) == [4, 5]
}
]
"""
con = self.items()
for i, _ in enumerate(con):
if i == n:
break
return con | [
{
'self': [1, 2, 3, 4, 5],
'n': 3,
'assert': lambda ret: list(ret) == [4, 5]
}
] | Below is the the instruction that describes the task:
### Input:
[
{
'self': [1, 2, 3, 4, 5],
'n': 3,
'assert': lambda ret: list(ret) == [4, 5]
}
]
### Response:
def Skip(self: dict, n):
"""
[
{
'self': [1, 2, 3, 4, 5],
'n': 3,
'assert': lambda ret: list(ret) == [4, 5]
}
]
"""
con = self.items()
for i, _ in enumerate(con):
if i == n:
break
return con |
def set_bytes_transferred(self, bytes_transferred):
''' set the number of bytes transferred - if it has changed return True '''
_changed = False
if bytes_transferred:
_changed = (self._bytes_transferred != int(bytes_transferred))
if _changed:
self._bytes_transferred = int(bytes_transferred)
logger.debug("(%s) BytesTransferred: %d" % (
self.session_id, self._bytes_transferred))
if AsperaSession.PROGRESS_MSGS_SEND_ALL:
return True
return _changed | set the number of bytes transferred - if it has changed return True | Below is the the instruction that describes the task:
### Input:
set the number of bytes transferred - if it has changed return True
### Response:
def set_bytes_transferred(self, bytes_transferred):
''' set the number of bytes transferred - if it has changed return True '''
_changed = False
if bytes_transferred:
_changed = (self._bytes_transferred != int(bytes_transferred))
if _changed:
self._bytes_transferred = int(bytes_transferred)
logger.debug("(%s) BytesTransferred: %d" % (
self.session_id, self._bytes_transferred))
if AsperaSession.PROGRESS_MSGS_SEND_ALL:
return True
return _changed |
def sign_input_at(self, start_index, private_key):
# type: (int, PrivateKey) -> None
"""
Signs the input at the specified index.
:param start_index:
The index of the first input transaction.
If necessary, the resulting signature will be split across
multiple transactions automatically (i.e., if an input has
``security_level=2``, you still only need to call
:py:meth:`sign_input_at` once).
:param private_key:
The private key that will be used to generate the signature.
.. important::
Be sure that the private key was generated using the
correct seed, or the resulting signature will be
invalid!
"""
if not self.hash:
raise RuntimeError('Cannot sign inputs until bundle is finalized.')
private_key.sign_input_transactions(self, start_index) | Signs the input at the specified index.
:param start_index:
The index of the first input transaction.
If necessary, the resulting signature will be split across
multiple transactions automatically (i.e., if an input has
``security_level=2``, you still only need to call
:py:meth:`sign_input_at` once).
:param private_key:
The private key that will be used to generate the signature.
.. important::
Be sure that the private key was generated using the
correct seed, or the resulting signature will be
invalid! | Below is the the instruction that describes the task:
### Input:
Signs the input at the specified index.
:param start_index:
The index of the first input transaction.
If necessary, the resulting signature will be split across
multiple transactions automatically (i.e., if an input has
``security_level=2``, you still only need to call
:py:meth:`sign_input_at` once).
:param private_key:
The private key that will be used to generate the signature.
.. important::
Be sure that the private key was generated using the
correct seed, or the resulting signature will be
invalid!
### Response:
def sign_input_at(self, start_index, private_key):
# type: (int, PrivateKey) -> None
"""
Signs the input at the specified index.
:param start_index:
The index of the first input transaction.
If necessary, the resulting signature will be split across
multiple transactions automatically (i.e., if an input has
``security_level=2``, you still only need to call
:py:meth:`sign_input_at` once).
:param private_key:
The private key that will be used to generate the signature.
.. important::
Be sure that the private key was generated using the
correct seed, or the resulting signature will be
invalid!
"""
if not self.hash:
raise RuntimeError('Cannot sign inputs until bundle is finalized.')
private_key.sign_input_transactions(self, start_index) |
async def receive_deferred_messages(self, sequence_numbers, mode=ReceiveSettleMode.PeekLock):
"""Receive messages that have previously been deferred.
This operation can only receive deferred messages from the current session.
When receiving deferred messages from a partitioned entity, all of the supplied
sequence numbers must be messages from the same partition.
:param sequence_numbers: A list of the sequence numbers of messages that have been
deferred.
:type sequence_numbers: list[int]
:param mode: The receive mode, default value is PeekLock.
:type mode: ~azure.servicebus.common.constants.ReceiveSettleMode
:rtype: list[~azure.servicebus.aio.async_message.DeferredMessage]
Example:
.. literalinclude:: ../examples/async_examples/test_examples_async.py
:start-after: [START receiver_defer_session_messages]
:end-before: [END receiver_defer_session_messages]
:language: python
:dedent: 8
:caption: Defer messages, then retrieve them by sequence number.
"""
if not sequence_numbers:
raise ValueError("At least one sequence number must be specified.")
await self._can_run()
try:
receive_mode = mode.value.value
except AttributeError:
receive_mode = int(mode)
message = {
'sequence-numbers': types.AMQPArray([types.AMQPLong(s) for s in sequence_numbers]),
'receiver-settle-mode': types.AMQPuInt(receive_mode),
'session-id': self.session_id
}
handler = functools.partial(mgmt_handlers.deferred_message_op, mode=receive_mode, message_type=DeferredMessage)
messages = await self._mgmt_request_response(
REQUEST_RESPONSE_RECEIVE_BY_SEQUENCE_NUMBER,
message,
handler)
for m in messages:
m._receiver = self # pylint: disable=protected-access
return messages | Receive messages that have previously been deferred.
This operation can only receive deferred messages from the current session.
When receiving deferred messages from a partitioned entity, all of the supplied
sequence numbers must be messages from the same partition.
:param sequence_numbers: A list of the sequence numbers of messages that have been
deferred.
:type sequence_numbers: list[int]
:param mode: The receive mode, default value is PeekLock.
:type mode: ~azure.servicebus.common.constants.ReceiveSettleMode
:rtype: list[~azure.servicebus.aio.async_message.DeferredMessage]
Example:
.. literalinclude:: ../examples/async_examples/test_examples_async.py
:start-after: [START receiver_defer_session_messages]
:end-before: [END receiver_defer_session_messages]
:language: python
:dedent: 8
:caption: Defer messages, then retrieve them by sequence number. | Below is the the instruction that describes the task:
### Input:
Receive messages that have previously been deferred.
This operation can only receive deferred messages from the current session.
When receiving deferred messages from a partitioned entity, all of the supplied
sequence numbers must be messages from the same partition.
:param sequence_numbers: A list of the sequence numbers of messages that have been
deferred.
:type sequence_numbers: list[int]
:param mode: The receive mode, default value is PeekLock.
:type mode: ~azure.servicebus.common.constants.ReceiveSettleMode
:rtype: list[~azure.servicebus.aio.async_message.DeferredMessage]
Example:
.. literalinclude:: ../examples/async_examples/test_examples_async.py
:start-after: [START receiver_defer_session_messages]
:end-before: [END receiver_defer_session_messages]
:language: python
:dedent: 8
:caption: Defer messages, then retrieve them by sequence number.
### Response:
async def receive_deferred_messages(self, sequence_numbers, mode=ReceiveSettleMode.PeekLock):
"""Receive messages that have previously been deferred.
This operation can only receive deferred messages from the current session.
When receiving deferred messages from a partitioned entity, all of the supplied
sequence numbers must be messages from the same partition.
:param sequence_numbers: A list of the sequence numbers of messages that have been
deferred.
:type sequence_numbers: list[int]
:param mode: The receive mode, default value is PeekLock.
:type mode: ~azure.servicebus.common.constants.ReceiveSettleMode
:rtype: list[~azure.servicebus.aio.async_message.DeferredMessage]
Example:
.. literalinclude:: ../examples/async_examples/test_examples_async.py
:start-after: [START receiver_defer_session_messages]
:end-before: [END receiver_defer_session_messages]
:language: python
:dedent: 8
:caption: Defer messages, then retrieve them by sequence number.
"""
if not sequence_numbers:
raise ValueError("At least one sequence number must be specified.")
await self._can_run()
try:
receive_mode = mode.value.value
except AttributeError:
receive_mode = int(mode)
message = {
'sequence-numbers': types.AMQPArray([types.AMQPLong(s) for s in sequence_numbers]),
'receiver-settle-mode': types.AMQPuInt(receive_mode),
'session-id': self.session_id
}
handler = functools.partial(mgmt_handlers.deferred_message_op, mode=receive_mode, message_type=DeferredMessage)
messages = await self._mgmt_request_response(
REQUEST_RESPONSE_RECEIVE_BY_SEQUENCE_NUMBER,
message,
handler)
for m in messages:
m._receiver = self # pylint: disable=protected-access
return messages |
def _combine_nd(combined_ids, concat_dims, data_vars='all',
coords='different', compat='no_conflicts'):
"""
Concatenates and merges an N-dimensional structure of datasets.
No checks are performed on the consistency of the datasets, concat_dims or
tile_IDs, because it is assumed that this has already been done.
Parameters
----------
combined_ids : Dict[Tuple[int, ...]], xarray.Dataset]
Structure containing all datasets to be concatenated with "tile_IDs" as
keys, which specify position within the desired final combined result.
concat_dims : sequence of str
The dimensions along which the datasets should be concatenated. Must be
in order, and the length must match
Returns
-------
combined_ds : xarray.Dataset
"""
# Perform N-D dimensional concatenation
# Each iteration of this loop reduces the length of the tile_ids tuples
# by one. It always combines along the first dimension, removing the first
# element of the tuple
for concat_dim in concat_dims:
combined_ids = _auto_combine_all_along_first_dim(combined_ids,
dim=concat_dim,
data_vars=data_vars,
coords=coords,
compat=compat)
combined_ds = list(combined_ids.values())[0]
return combined_ds | Concatenates and merges an N-dimensional structure of datasets.
No checks are performed on the consistency of the datasets, concat_dims or
tile_IDs, because it is assumed that this has already been done.
Parameters
----------
combined_ids : Dict[Tuple[int, ...]], xarray.Dataset]
Structure containing all datasets to be concatenated with "tile_IDs" as
keys, which specify position within the desired final combined result.
concat_dims : sequence of str
The dimensions along which the datasets should be concatenated. Must be
in order, and the length must match
Returns
-------
combined_ds : xarray.Dataset | Below is the the instruction that describes the task:
### Input:
Concatenates and merges an N-dimensional structure of datasets.
No checks are performed on the consistency of the datasets, concat_dims or
tile_IDs, because it is assumed that this has already been done.
Parameters
----------
combined_ids : Dict[Tuple[int, ...]], xarray.Dataset]
Structure containing all datasets to be concatenated with "tile_IDs" as
keys, which specify position within the desired final combined result.
concat_dims : sequence of str
The dimensions along which the datasets should be concatenated. Must be
in order, and the length must match
Returns
-------
combined_ds : xarray.Dataset
### Response:
def _combine_nd(combined_ids, concat_dims, data_vars='all',
coords='different', compat='no_conflicts'):
"""
Concatenates and merges an N-dimensional structure of datasets.
No checks are performed on the consistency of the datasets, concat_dims or
tile_IDs, because it is assumed that this has already been done.
Parameters
----------
combined_ids : Dict[Tuple[int, ...]], xarray.Dataset]
Structure containing all datasets to be concatenated with "tile_IDs" as
keys, which specify position within the desired final combined result.
concat_dims : sequence of str
The dimensions along which the datasets should be concatenated. Must be
in order, and the length must match
Returns
-------
combined_ds : xarray.Dataset
"""
# Perform N-D dimensional concatenation
# Each iteration of this loop reduces the length of the tile_ids tuples
# by one. It always combines along the first dimension, removing the first
# element of the tuple
for concat_dim in concat_dims:
combined_ids = _auto_combine_all_along_first_dim(combined_ids,
dim=concat_dim,
data_vars=data_vars,
coords=coords,
compat=compat)
combined_ds = list(combined_ids.values())[0]
return combined_ds |
def iter_chunksize(num_samples, chunksize):
"""Iterator used to iterate in chunks over an array of size `num_samples`.
At each iteration returns `chunksize` except for the last iteration.
"""
last_chunksize = int(np.mod(num_samples, chunksize))
chunksize = int(chunksize)
for _ in range(int(num_samples) // chunksize):
yield chunksize
if last_chunksize > 0:
yield last_chunksize | Iterator used to iterate in chunks over an array of size `num_samples`.
At each iteration returns `chunksize` except for the last iteration. | Below is the the instruction that describes the task:
### Input:
Iterator used to iterate in chunks over an array of size `num_samples`.
At each iteration returns `chunksize` except for the last iteration.
### Response:
def iter_chunksize(num_samples, chunksize):
"""Iterator used to iterate in chunks over an array of size `num_samples`.
At each iteration returns `chunksize` except for the last iteration.
"""
last_chunksize = int(np.mod(num_samples, chunksize))
chunksize = int(chunksize)
for _ in range(int(num_samples) // chunksize):
yield chunksize
if last_chunksize > 0:
yield last_chunksize |
def run(self, data, store, signal, context, **kwargs):
""" The main run method of the Python task.
Args:
data (:class:`.MultiTaskData`): The data object that has been passed from the
predecessor task.
store (:class:`.DataStoreDocument`): The persistent data store object that allows the
task to store data for access across the current workflow run.
signal (TaskSignal): The signal object for tasks. It wraps the construction
and sending of signals into easy to use methods.
context (TaskContext): The context in which the tasks runs.
Returns:
Action (Action): An Action object containing the data that should be passed on
to the next task and optionally a list of successor tasks that
should be executed.
"""
params = self.params.eval(data, store, exclude=['command'])
capture_stdout = self._callback_stdout is not None or params.capture_stdout
capture_stderr = self._callback_stderr is not None or params.capture_stderr
stdout_file = TemporaryFile() if params.capture_stdout else None
stderr_file = TemporaryFile() if params.capture_stderr else None
stdout = PIPE if capture_stdout else None
stderr = PIPE if capture_stderr else None
# change the user or group under which the process should run
if params.user is not None or params.group is not None:
pre_exec = self._run_as(params.user, params.group)
else:
pre_exec = None
# call the command
proc = Popen(self.params.eval_single('command', data, store),
cwd=params.cwd, shell=True, env=params.env,
preexec_fn=pre_exec, stdout=stdout, stderr=stderr,
stdin=PIPE if params.stdin is not None else None)
# if input is available, send it to the process
if params.stdin is not None:
proc.stdin.write(params.stdin.encode(sys.getfilesystemencoding()))
# send a notification that the process has been started
try:
if self._callback_process is not None:
self._callback_process(proc.pid, data, store, signal, context)
except (StopTask, AbortWorkflow):
proc.terminate()
raise
# send the output handling to a thread
if capture_stdout or capture_stderr:
output_reader = BashTaskOutputReader(proc, stdout_file, stderr_file,
self._callback_stdout,
self._callback_stderr,
params.refresh_time,
data, store, signal, context)
output_reader.start()
else:
output_reader = None
# wait for the process to complete and watch for a stop signal
while proc.poll() is None or\
(output_reader is not None and output_reader.is_alive()):
sleep(params.refresh_time)
if signal.is_stopped:
proc.terminate()
if output_reader is not None:
output_reader.join()
data = output_reader.data
# if a stop or abort exception was raised, stop the bash process and re-raise
if output_reader.exc_obj is not None:
if proc.poll() is None:
proc.terminate()
raise output_reader.exc_obj
# send a notification that the process has completed
if self._callback_end is not None:
if stdout_file is not None:
stdout_file.seek(0)
if stderr_file is not None:
stderr_file.seek(0)
self._callback_end(proc.returncode, stdout_file, stderr_file,
data, store, signal, context)
if stdout_file is not None:
stdout_file.close()
if stderr_file is not None:
stderr_file.close()
return Action(data) | The main run method of the Python task.
Args:
data (:class:`.MultiTaskData`): The data object that has been passed from the
predecessor task.
store (:class:`.DataStoreDocument`): The persistent data store object that allows the
task to store data for access across the current workflow run.
signal (TaskSignal): The signal object for tasks. It wraps the construction
and sending of signals into easy to use methods.
context (TaskContext): The context in which the tasks runs.
Returns:
Action (Action): An Action object containing the data that should be passed on
to the next task and optionally a list of successor tasks that
should be executed. | Below is the the instruction that describes the task:
### Input:
The main run method of the Python task.
Args:
data (:class:`.MultiTaskData`): The data object that has been passed from the
predecessor task.
store (:class:`.DataStoreDocument`): The persistent data store object that allows the
task to store data for access across the current workflow run.
signal (TaskSignal): The signal object for tasks. It wraps the construction
and sending of signals into easy to use methods.
context (TaskContext): The context in which the tasks runs.
Returns:
Action (Action): An Action object containing the data that should be passed on
to the next task and optionally a list of successor tasks that
should be executed.
### Response:
def run(self, data, store, signal, context, **kwargs):
""" The main run method of the Python task.
Args:
data (:class:`.MultiTaskData`): The data object that has been passed from the
predecessor task.
store (:class:`.DataStoreDocument`): The persistent data store object that allows the
task to store data for access across the current workflow run.
signal (TaskSignal): The signal object for tasks. It wraps the construction
and sending of signals into easy to use methods.
context (TaskContext): The context in which the tasks runs.
Returns:
Action (Action): An Action object containing the data that should be passed on
to the next task and optionally a list of successor tasks that
should be executed.
"""
params = self.params.eval(data, store, exclude=['command'])
capture_stdout = self._callback_stdout is not None or params.capture_stdout
capture_stderr = self._callback_stderr is not None or params.capture_stderr
stdout_file = TemporaryFile() if params.capture_stdout else None
stderr_file = TemporaryFile() if params.capture_stderr else None
stdout = PIPE if capture_stdout else None
stderr = PIPE if capture_stderr else None
# change the user or group under which the process should run
if params.user is not None or params.group is not None:
pre_exec = self._run_as(params.user, params.group)
else:
pre_exec = None
# call the command
proc = Popen(self.params.eval_single('command', data, store),
cwd=params.cwd, shell=True, env=params.env,
preexec_fn=pre_exec, stdout=stdout, stderr=stderr,
stdin=PIPE if params.stdin is not None else None)
# if input is available, send it to the process
if params.stdin is not None:
proc.stdin.write(params.stdin.encode(sys.getfilesystemencoding()))
# send a notification that the process has been started
try:
if self._callback_process is not None:
self._callback_process(proc.pid, data, store, signal, context)
except (StopTask, AbortWorkflow):
proc.terminate()
raise
# send the output handling to a thread
if capture_stdout or capture_stderr:
output_reader = BashTaskOutputReader(proc, stdout_file, stderr_file,
self._callback_stdout,
self._callback_stderr,
params.refresh_time,
data, store, signal, context)
output_reader.start()
else:
output_reader = None
# wait for the process to complete and watch for a stop signal
while proc.poll() is None or\
(output_reader is not None and output_reader.is_alive()):
sleep(params.refresh_time)
if signal.is_stopped:
proc.terminate()
if output_reader is not None:
output_reader.join()
data = output_reader.data
# if a stop or abort exception was raised, stop the bash process and re-raise
if output_reader.exc_obj is not None:
if proc.poll() is None:
proc.terminate()
raise output_reader.exc_obj
# send a notification that the process has completed
if self._callback_end is not None:
if stdout_file is not None:
stdout_file.seek(0)
if stderr_file is not None:
stderr_file.seek(0)
self._callback_end(proc.returncode, stdout_file, stderr_file,
data, store, signal, context)
if stdout_file is not None:
stdout_file.close()
if stderr_file is not None:
stderr_file.close()
return Action(data) |
def print_trip_table(document):
""" Print trip table """
headers = [
'Alt.',
'Name',
'Time',
'Track',
'Direction',
'Dest.',
'Track',
'Arrival']
table = []
altnr = 0
for alternative in document:
altnr += 1
first_trip_in_alt = True
if not isinstance(alternative['Leg'], list):
alternative['Leg'] = [alternative['Leg']]
for part in alternative['Leg']:
orig = part['Origin']
dest = part['Destination']
row = [
altnr if first_trip_in_alt else None,
part['name'],
orig['rtTime'] if 'rtTime' in orig else orig['time'],
orig['track'],
part['direction'] if 'direction' in part else None,
dest['name'],
dest['track'],
dest['rtTime'] if 'rtTime' in dest else dest['time'],
]
table.append(row)
first_trip_in_alt = False
print(tabulate.tabulate(table, headers)) | Print trip table | Below is the the instruction that describes the task:
### Input:
Print trip table
### Response:
def print_trip_table(document):
""" Print trip table """
headers = [
'Alt.',
'Name',
'Time',
'Track',
'Direction',
'Dest.',
'Track',
'Arrival']
table = []
altnr = 0
for alternative in document:
altnr += 1
first_trip_in_alt = True
if not isinstance(alternative['Leg'], list):
alternative['Leg'] = [alternative['Leg']]
for part in alternative['Leg']:
orig = part['Origin']
dest = part['Destination']
row = [
altnr if first_trip_in_alt else None,
part['name'],
orig['rtTime'] if 'rtTime' in orig else orig['time'],
orig['track'],
part['direction'] if 'direction' in part else None,
dest['name'],
dest['track'],
dest['rtTime'] if 'rtTime' in dest else dest['time'],
]
table.append(row)
first_trip_in_alt = False
print(tabulate.tabulate(table, headers)) |
def tabulate(data, header=True, headers=None, accessors=None,
**table_options):
""" Shortcut function to produce tabular output of data without the
need to create and configure a Table instance directly. The function
does however return a table instance when it's done for any further use
by the user. """
if header and not headers:
data = iter(data)
try:
headers = next(data)
except StopIteration:
pass
if headers and hasattr(headers, 'items') and accessors is None:
# Dict mode; Build accessors and headers from keys of data.
data = itertools.chain([headers], data)
accessors = list(headers)
headers = [' '.join(map(str.capitalize, x.replace('_', ' ').split()))
for x in accessors]
t = Table(headers=headers, accessors=accessors, **table_options)
try:
t.print(data)
except RowsNotFound:
pass
return t | Shortcut function to produce tabular output of data without the
need to create and configure a Table instance directly. The function
does however return a table instance when it's done for any further use
by the user. | Below is the the instruction that describes the task:
### Input:
Shortcut function to produce tabular output of data without the
need to create and configure a Table instance directly. The function
does however return a table instance when it's done for any further use
by the user.
### Response:
def tabulate(data, header=True, headers=None, accessors=None,
**table_options):
""" Shortcut function to produce tabular output of data without the
need to create and configure a Table instance directly. The function
does however return a table instance when it's done for any further use
by the user. """
if header and not headers:
data = iter(data)
try:
headers = next(data)
except StopIteration:
pass
if headers and hasattr(headers, 'items') and accessors is None:
# Dict mode; Build accessors and headers from keys of data.
data = itertools.chain([headers], data)
accessors = list(headers)
headers = [' '.join(map(str.capitalize, x.replace('_', ' ').split()))
for x in accessors]
t = Table(headers=headers, accessors=accessors, **table_options)
try:
t.print(data)
except RowsNotFound:
pass
return t |
def highway_core_with_recurrent_dropout(
hidden_size,
num_layers,
keep_prob=0.5,
**kwargs):
"""Highway core with recurrent dropout.
Args:
hidden_size: (int) Hidden size dimensionality.
num_layers: (int) Number of highway layers.
keep_prob: the probability to keep an entry when applying dropout.
**kwargs: Extra keyword arguments to pass to the highway core.
Returns:
A tuple (train_core, test_core) where train_core is a higway core with
recurrent dropout enabled to be used for training and test_core is the
same highway core without recurrent dropout.
"""
core = HighwayCore(hidden_size, num_layers, **kwargs)
return RecurrentDropoutWrapper(core, keep_prob), core | Highway core with recurrent dropout.
Args:
hidden_size: (int) Hidden size dimensionality.
num_layers: (int) Number of highway layers.
keep_prob: the probability to keep an entry when applying dropout.
**kwargs: Extra keyword arguments to pass to the highway core.
Returns:
A tuple (train_core, test_core) where train_core is a higway core with
recurrent dropout enabled to be used for training and test_core is the
same highway core without recurrent dropout. | Below is the the instruction that describes the task:
### Input:
Highway core with recurrent dropout.
Args:
hidden_size: (int) Hidden size dimensionality.
num_layers: (int) Number of highway layers.
keep_prob: the probability to keep an entry when applying dropout.
**kwargs: Extra keyword arguments to pass to the highway core.
Returns:
A tuple (train_core, test_core) where train_core is a higway core with
recurrent dropout enabled to be used for training and test_core is the
same highway core without recurrent dropout.
### Response:
def highway_core_with_recurrent_dropout(
hidden_size,
num_layers,
keep_prob=0.5,
**kwargs):
"""Highway core with recurrent dropout.
Args:
hidden_size: (int) Hidden size dimensionality.
num_layers: (int) Number of highway layers.
keep_prob: the probability to keep an entry when applying dropout.
**kwargs: Extra keyword arguments to pass to the highway core.
Returns:
A tuple (train_core, test_core) where train_core is a higway core with
recurrent dropout enabled to be used for training and test_core is the
same highway core without recurrent dropout.
"""
core = HighwayCore(hidden_size, num_layers, **kwargs)
return RecurrentDropoutWrapper(core, keep_prob), core |
def filter_gradient_threshold(self, analyte, win, threshold, recalc=True):
"""
Apply gradient threshold filter.
Generates threshold filters for the given analytes above and below
the specified threshold.
Two filters are created with prefixes '_above' and '_below'.
'_above' keeps all the data above the threshold.
'_below' keeps all the data below the threshold.
i.e. to select data below the threshold value, you should turn the
'_above' filter off.
Parameters
----------
analyte : str
Description of `analyte`.
threshold : float
Description of `threshold`.
win : int
Window used to calculate gradients (n points)
recalc : bool
Whether or not to re-calculate the gradients.
Returns
-------
None
"""
params = locals()
del(params['self'])
# calculate absolute gradient
if recalc or not self.grads_calced:
self.grads = calc_grads(self.Time, self.focus,
[analyte], win)
self.grads_calced = True
below, above = filters.threshold(abs(self.grads[analyte]), threshold)
setn = self.filt.maxset + 1
self.filt.add(analyte + '_gthresh_below',
below,
'Keep gradient below {:.3e} '.format(threshold) + analyte,
params, setn=setn)
self.filt.add(analyte + '_gthresh_above',
above,
'Keep gradient above {:.3e} '.format(threshold) + analyte,
params, setn=setn) | Apply gradient threshold filter.
Generates threshold filters for the given analytes above and below
the specified threshold.
Two filters are created with prefixes '_above' and '_below'.
'_above' keeps all the data above the threshold.
'_below' keeps all the data below the threshold.
i.e. to select data below the threshold value, you should turn the
'_above' filter off.
Parameters
----------
analyte : str
Description of `analyte`.
threshold : float
Description of `threshold`.
win : int
Window used to calculate gradients (n points)
recalc : bool
Whether or not to re-calculate the gradients.
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Apply gradient threshold filter.
Generates threshold filters for the given analytes above and below
the specified threshold.
Two filters are created with prefixes '_above' and '_below'.
'_above' keeps all the data above the threshold.
'_below' keeps all the data below the threshold.
i.e. to select data below the threshold value, you should turn the
'_above' filter off.
Parameters
----------
analyte : str
Description of `analyte`.
threshold : float
Description of `threshold`.
win : int
Window used to calculate gradients (n points)
recalc : bool
Whether or not to re-calculate the gradients.
Returns
-------
None
### Response:
def filter_gradient_threshold(self, analyte, win, threshold, recalc=True):
"""
Apply gradient threshold filter.
Generates threshold filters for the given analytes above and below
the specified threshold.
Two filters are created with prefixes '_above' and '_below'.
'_above' keeps all the data above the threshold.
'_below' keeps all the data below the threshold.
i.e. to select data below the threshold value, you should turn the
'_above' filter off.
Parameters
----------
analyte : str
Description of `analyte`.
threshold : float
Description of `threshold`.
win : int
Window used to calculate gradients (n points)
recalc : bool
Whether or not to re-calculate the gradients.
Returns
-------
None
"""
params = locals()
del(params['self'])
# calculate absolute gradient
if recalc or not self.grads_calced:
self.grads = calc_grads(self.Time, self.focus,
[analyte], win)
self.grads_calced = True
below, above = filters.threshold(abs(self.grads[analyte]), threshold)
setn = self.filt.maxset + 1
self.filt.add(analyte + '_gthresh_below',
below,
'Keep gradient below {:.3e} '.format(threshold) + analyte,
params, setn=setn)
self.filt.add(analyte + '_gthresh_above',
above,
'Keep gradient above {:.3e} '.format(threshold) + analyte,
params, setn=setn) |
def __safe_validation_callback(self, event):
# type: (str) -> Any
"""
Calls the ``@ValidateComponent`` or ``@InvalidateComponent`` callback,
ignoring raised exceptions
:param event: The kind of life-cycle callback (in/validation)
:return: The callback result, or None
"""
if self.state == StoredInstance.KILLED:
# Invalid state
return None
try:
return self.__validation_callback(event)
except FrameworkException as ex:
# Important error
self._logger.exception(
"Critical error calling back %s: %s", self.name, ex
)
# Kill the component
self._ipopo_service.kill(self.name)
# Store the exception as it is a validation error
self.error_trace = traceback.format_exc()
if ex.needs_stop:
# Framework must be stopped...
self._logger.error(
"%s said that the Framework must be stopped.", self.name
)
self.bundle_context.get_framework().stop()
return False
except:
self._logger.exception(
"Component '%s': error calling @ValidateComponent callback",
self.name,
)
# Store the exception as it is a validation error
self.error_trace = traceback.format_exc()
return False | Calls the ``@ValidateComponent`` or ``@InvalidateComponent`` callback,
ignoring raised exceptions
:param event: The kind of life-cycle callback (in/validation)
:return: The callback result, or None | Below is the the instruction that describes the task:
### Input:
Calls the ``@ValidateComponent`` or ``@InvalidateComponent`` callback,
ignoring raised exceptions
:param event: The kind of life-cycle callback (in/validation)
:return: The callback result, or None
### Response:
def __safe_validation_callback(self, event):
# type: (str) -> Any
"""
Calls the ``@ValidateComponent`` or ``@InvalidateComponent`` callback,
ignoring raised exceptions
:param event: The kind of life-cycle callback (in/validation)
:return: The callback result, or None
"""
if self.state == StoredInstance.KILLED:
# Invalid state
return None
try:
return self.__validation_callback(event)
except FrameworkException as ex:
# Important error
self._logger.exception(
"Critical error calling back %s: %s", self.name, ex
)
# Kill the component
self._ipopo_service.kill(self.name)
# Store the exception as it is a validation error
self.error_trace = traceback.format_exc()
if ex.needs_stop:
# Framework must be stopped...
self._logger.error(
"%s said that the Framework must be stopped.", self.name
)
self.bundle_context.get_framework().stop()
return False
except:
self._logger.exception(
"Component '%s': error calling @ValidateComponent callback",
self.name,
)
# Store the exception as it is a validation error
self.error_trace = traceback.format_exc()
return False |
def in_repo(self, filepath):
"""
This excludes repository directories because they cause some exceptions
occationally.
"""
filepath = set(filepath.replace('\\', '/').split('/'))
for p in ('.git', '.hg', '.svn', '.cvs', '.bzr'):
if p in filepath:
return True
return False | This excludes repository directories because they cause some exceptions
occationally. | Below is the the instruction that describes the task:
### Input:
This excludes repository directories because they cause some exceptions
occationally.
### Response:
def in_repo(self, filepath):
"""
This excludes repository directories because they cause some exceptions
occationally.
"""
filepath = set(filepath.replace('\\', '/').split('/'))
for p in ('.git', '.hg', '.svn', '.cvs', '.bzr'):
if p in filepath:
return True
return False |
def refresh(self):
'''Refetch instance data from the API.
'''
response = requests.get('%s/guides/%s' % (API_BASE_URL, self.id))
attributes = response.json()
self.category = Category(attributes['category'])
self.url = attributes['url']
self.title = attributes['title']
if attributes['image']:
self.image = Image(attributes['image']['id'])
else:
self.image = None
self.locale = attributes['locale']
self.introduction = WikiText(attributes['introduction_raw'],
attributes['introduction_rendered'])
self.conclusion = WikiText(attributes['conclusion_raw'],
attributes['conclusion_rendered'])
#self.tools = attributes['tools']
#self.parts = attributes['parts']
self.subject = attributes['subject']
self.modifiedDate = datetime.utcfromtimestamp(attributes['modified_date'])
self.createdDate = datetime.utcfromtimestamp(attributes['created_date'])
self.publishedDate = datetime.utcfromtimestamp(attributes['published_date'])
#self.documents = attributes['documents']
author = attributes['author']
#self.author = User(author['userid'], name=author['text'])
#self.timeRequired = attributes['timeRequired']
self.steps = [Step(step['guideid'], step['stepid'], data=step) for step in attributes['steps']]
self.type = attributes['type']
self.public = attributes['public']
self.revision = attributes['revisionid']
self.difficulty = attributes['difficulty']
self.prerequisites = [Guide(guide['guideid']) for guide in attributes['prerequisites']]
# attributes['prereq_modified_date']
#self.summary = attributes['summary']
self.flags = [Flag.from_id(flag['flagid']) for flag in attributes['flags']] | Refetch instance data from the API. | Below is the the instruction that describes the task:
### Input:
Refetch instance data from the API.
### Response:
def refresh(self):
'''Refetch instance data from the API.
'''
response = requests.get('%s/guides/%s' % (API_BASE_URL, self.id))
attributes = response.json()
self.category = Category(attributes['category'])
self.url = attributes['url']
self.title = attributes['title']
if attributes['image']:
self.image = Image(attributes['image']['id'])
else:
self.image = None
self.locale = attributes['locale']
self.introduction = WikiText(attributes['introduction_raw'],
attributes['introduction_rendered'])
self.conclusion = WikiText(attributes['conclusion_raw'],
attributes['conclusion_rendered'])
#self.tools = attributes['tools']
#self.parts = attributes['parts']
self.subject = attributes['subject']
self.modifiedDate = datetime.utcfromtimestamp(attributes['modified_date'])
self.createdDate = datetime.utcfromtimestamp(attributes['created_date'])
self.publishedDate = datetime.utcfromtimestamp(attributes['published_date'])
#self.documents = attributes['documents']
author = attributes['author']
#self.author = User(author['userid'], name=author['text'])
#self.timeRequired = attributes['timeRequired']
self.steps = [Step(step['guideid'], step['stepid'], data=step) for step in attributes['steps']]
self.type = attributes['type']
self.public = attributes['public']
self.revision = attributes['revisionid']
self.difficulty = attributes['difficulty']
self.prerequisites = [Guide(guide['guideid']) for guide in attributes['prerequisites']]
# attributes['prereq_modified_date']
#self.summary = attributes['summary']
self.flags = [Flag.from_id(flag['flagid']) for flag in attributes['flags']] |
def overall(goback = 0, case = 1):
""" To run all over the stock and to find who match the 'case'
'goback' is back to what days ago.
0 is the last day.
"""
from twseno import twseno
for i in twseno().allstock:
#timetest(i)
try:
if case == 1:
try:
a = goristock(i)
if goback:
a.goback(goback)
if a.MAO(3,6)[1] == '↑'.decode('utf-8') and (a.MAO(3,6)[0][1][-1] < 0 or ( a.MAO(3,6)[0][1][-1] < 1 and a.MAO(3,6)[0][1][-1] > 0 and a.MAO(3,6)[0][1][-2] < 0 and a.MAO(3,6)[0][0] == 3)) and a.VOLMAX3 and a.stock_vol[-1] > 1000*1000 and a.raw_data[-1] > 10:
#print a.Cmd_display
print 'buy-: ' + oop(a)
elif a.MAO(3,6)[1] == '↓'.decode('utf-8') and a.MAO(3,6)[0][1][-1] > 0 and a.MAO(3,6)[0][0] <= 3:
print 'sell: ' + oop(a)
except KeyboardInterrupt:
print '::KeyboardInterrupt'
break
except IndexError:
print i
elif case == 2:
try:
a = goristock(i)
if goback:
a.goback(goback)
if a.MAO(3,6)[1] == '↑'.decode('utf-8') and (a.MAO(3,6)[0][1][-1] < 0 or ( a.MAO(3,6)[0][1][-1] < 1 and a.MAO(3,6)[0][1][-1] > 0 and a.MAO(3,6)[0][1][-2] < 0 and a.MAO(3,6)[0][0] == 3)) and a.stock_vol[-1] >= 1000*1000 and a.raw_data[-1] > 10 and (sum(a.stock_vol[-45:])/45) <= 1000*1000:
#print a.Cmd_display
print 'buy-: ' + oop(a)
except KeyboardInterrupt:
print '::KeyboardInterrupt'
break
except IndexError:
print i
elif case == 3:
try:
a = goristock(i)
if goback:
a.goback(goback)
if a.MA(3) > a.raw_data[-1] and a.MA(6) <= a.raw_data[-1] and a.MA(6) > a.MA(18):
#print a.Cmd_display
print 'buy-: ' + oop(a)
except KeyboardInterrupt:
print '::KeyboardInterrupt'
break
except IndexError:
print i
except KeyboardInterrupt:
print 'KeyboardInterrupt'
break | To run all over the stock and to find who match the 'case'
'goback' is back to what days ago.
0 is the last day. | Below is the the instruction that describes the task:
### Input:
To run all over the stock and to find who match the 'case'
'goback' is back to what days ago.
0 is the last day.
### Response:
def overall(goback = 0, case = 1):
""" To run all over the stock and to find who match the 'case'
'goback' is back to what days ago.
0 is the last day.
"""
from twseno import twseno
for i in twseno().allstock:
#timetest(i)
try:
if case == 1:
try:
a = goristock(i)
if goback:
a.goback(goback)
if a.MAO(3,6)[1] == '↑'.decode('utf-8') and (a.MAO(3,6)[0][1][-1] < 0 or ( a.MAO(3,6)[0][1][-1] < 1 and a.MAO(3,6)[0][1][-1] > 0 and a.MAO(3,6)[0][1][-2] < 0 and a.MAO(3,6)[0][0] == 3)) and a.VOLMAX3 and a.stock_vol[-1] > 1000*1000 and a.raw_data[-1] > 10:
#print a.Cmd_display
print 'buy-: ' + oop(a)
elif a.MAO(3,6)[1] == '↓'.decode('utf-8') and a.MAO(3,6)[0][1][-1] > 0 and a.MAO(3,6)[0][0] <= 3:
print 'sell: ' + oop(a)
except KeyboardInterrupt:
print '::KeyboardInterrupt'
break
except IndexError:
print i
elif case == 2:
try:
a = goristock(i)
if goback:
a.goback(goback)
if a.MAO(3,6)[1] == '↑'.decode('utf-8') and (a.MAO(3,6)[0][1][-1] < 0 or ( a.MAO(3,6)[0][1][-1] < 1 and a.MAO(3,6)[0][1][-1] > 0 and a.MAO(3,6)[0][1][-2] < 0 and a.MAO(3,6)[0][0] == 3)) and a.stock_vol[-1] >= 1000*1000 and a.raw_data[-1] > 10 and (sum(a.stock_vol[-45:])/45) <= 1000*1000:
#print a.Cmd_display
print 'buy-: ' + oop(a)
except KeyboardInterrupt:
print '::KeyboardInterrupt'
break
except IndexError:
print i
elif case == 3:
try:
a = goristock(i)
if goback:
a.goback(goback)
if a.MA(3) > a.raw_data[-1] and a.MA(6) <= a.raw_data[-1] and a.MA(6) > a.MA(18):
#print a.Cmd_display
print 'buy-: ' + oop(a)
except KeyboardInterrupt:
print '::KeyboardInterrupt'
break
except IndexError:
print i
except KeyboardInterrupt:
print 'KeyboardInterrupt'
break |
def norm_name(build_module: str, target_name: str):
"""Return a normalized canonical target name for the `target_name`
observed in build module `build_module`.
A normalized canonical target name is of the form "<build module>:<name>",
where <build module> is the relative normalized path from the project root
to the target build module (POSIX), and <name> is a valid target name
(see `validate_name()`).
"""
if ':' not in target_name:
raise ValueError(
"Must provide fully-qualified target name (with `:') to avoid "
"possible ambiguity - `{}' not valid".format(target_name))
mod, name = split(target_name)
return '{}:{}'.format(
PurePath(norm_proj_path(mod, build_module)).as_posix().strip('.'),
validate_name(name)) | Return a normalized canonical target name for the `target_name`
observed in build module `build_module`.
A normalized canonical target name is of the form "<build module>:<name>",
where <build module> is the relative normalized path from the project root
to the target build module (POSIX), and <name> is a valid target name
(see `validate_name()`). | Below is the the instruction that describes the task:
### Input:
Return a normalized canonical target name for the `target_name`
observed in build module `build_module`.
A normalized canonical target name is of the form "<build module>:<name>",
where <build module> is the relative normalized path from the project root
to the target build module (POSIX), and <name> is a valid target name
(see `validate_name()`).
### Response:
def norm_name(build_module: str, target_name: str):
"""Return a normalized canonical target name for the `target_name`
observed in build module `build_module`.
A normalized canonical target name is of the form "<build module>:<name>",
where <build module> is the relative normalized path from the project root
to the target build module (POSIX), and <name> is a valid target name
(see `validate_name()`).
"""
if ':' not in target_name:
raise ValueError(
"Must provide fully-qualified target name (with `:') to avoid "
"possible ambiguity - `{}' not valid".format(target_name))
mod, name = split(target_name)
return '{}:{}'.format(
PurePath(norm_proj_path(mod, build_module)).as_posix().strip('.'),
validate_name(name)) |
def resolve_path(target, start=os.path.curdir):
r"""
Find a path from start to target where target is relative to start.
>>> tmp = str(getfixture('tmpdir_as_cwd'))
>>> findpath('d:\\')
'd:\\'
>>> findpath('d:\\', tmp)
'd:\\'
>>> findpath('\\bar', 'd:\\')
'd:\\bar'
>>> findpath('\\bar', 'd:\\foo') # fails with '\\bar'
'd:\\bar'
>>> findpath('bar', 'd:\\foo')
'd:\\foo\\bar'
>>> findpath('\\baz', 'd:\\foo\\bar') # fails with '\\baz'
'd:\\baz'
>>> os.path.abspath(findpath('\\bar')).lower()
'c:\\bar'
>>> os.path.abspath(findpath('bar'))
'...\\bar'
>>> findpath('..', 'd:\\foo\\bar')
'd:\\foo'
The parent of the root directory is the root directory.
>>> findpath('..', 'd:\\')
'd:\\'
"""
return os.path.normpath(join(start, target)) | r"""
Find a path from start to target where target is relative to start.
>>> tmp = str(getfixture('tmpdir_as_cwd'))
>>> findpath('d:\\')
'd:\\'
>>> findpath('d:\\', tmp)
'd:\\'
>>> findpath('\\bar', 'd:\\')
'd:\\bar'
>>> findpath('\\bar', 'd:\\foo') # fails with '\\bar'
'd:\\bar'
>>> findpath('bar', 'd:\\foo')
'd:\\foo\\bar'
>>> findpath('\\baz', 'd:\\foo\\bar') # fails with '\\baz'
'd:\\baz'
>>> os.path.abspath(findpath('\\bar')).lower()
'c:\\bar'
>>> os.path.abspath(findpath('bar'))
'...\\bar'
>>> findpath('..', 'd:\\foo\\bar')
'd:\\foo'
The parent of the root directory is the root directory.
>>> findpath('..', 'd:\\')
'd:\\' | Below is the the instruction that describes the task:
### Input:
r"""
Find a path from start to target where target is relative to start.
>>> tmp = str(getfixture('tmpdir_as_cwd'))
>>> findpath('d:\\')
'd:\\'
>>> findpath('d:\\', tmp)
'd:\\'
>>> findpath('\\bar', 'd:\\')
'd:\\bar'
>>> findpath('\\bar', 'd:\\foo') # fails with '\\bar'
'd:\\bar'
>>> findpath('bar', 'd:\\foo')
'd:\\foo\\bar'
>>> findpath('\\baz', 'd:\\foo\\bar') # fails with '\\baz'
'd:\\baz'
>>> os.path.abspath(findpath('\\bar')).lower()
'c:\\bar'
>>> os.path.abspath(findpath('bar'))
'...\\bar'
>>> findpath('..', 'd:\\foo\\bar')
'd:\\foo'
The parent of the root directory is the root directory.
>>> findpath('..', 'd:\\')
'd:\\'
### Response:
def resolve_path(target, start=os.path.curdir):
r"""
Find a path from start to target where target is relative to start.
>>> tmp = str(getfixture('tmpdir_as_cwd'))
>>> findpath('d:\\')
'd:\\'
>>> findpath('d:\\', tmp)
'd:\\'
>>> findpath('\\bar', 'd:\\')
'd:\\bar'
>>> findpath('\\bar', 'd:\\foo') # fails with '\\bar'
'd:\\bar'
>>> findpath('bar', 'd:\\foo')
'd:\\foo\\bar'
>>> findpath('\\baz', 'd:\\foo\\bar') # fails with '\\baz'
'd:\\baz'
>>> os.path.abspath(findpath('\\bar')).lower()
'c:\\bar'
>>> os.path.abspath(findpath('bar'))
'...\\bar'
>>> findpath('..', 'd:\\foo\\bar')
'd:\\foo'
The parent of the root directory is the root directory.
>>> findpath('..', 'd:\\')
'd:\\'
"""
return os.path.normpath(join(start, target)) |
def remove_metadata_key(self, key, prefix=None):
"""
Removes the specified key from the storage object's metadata. If the
key does not exist in the metadata, nothing is done.
"""
self.manager.remove_metadata_key(self, key, prefix=prefix) | Removes the specified key from the storage object's metadata. If the
key does not exist in the metadata, nothing is done. | Below is the the instruction that describes the task:
### Input:
Removes the specified key from the storage object's metadata. If the
key does not exist in the metadata, nothing is done.
### Response:
def remove_metadata_key(self, key, prefix=None):
"""
Removes the specified key from the storage object's metadata. If the
key does not exist in the metadata, nothing is done.
"""
self.manager.remove_metadata_key(self, key, prefix=prefix) |
def resname_in_proximity(resname, model, chains, resnums, threshold=5):
"""Search within the proximity of a defined list of residue numbers and their chains for any specifed residue name.
Args:
resname (str): Residue name to search for in proximity of specified chains + resnums
model: Biopython Model object
chains (str, list): Chain ID or IDs to check
resnums (int, list): Residue numbers within the chain to check
threshold (float): Cutoff in Angstroms for returning True if a RESNAME is near
Returns:
bool: True if a RESNAME is within the threshold cutoff
"""
residues = [r for r in model.get_residues() if r.get_resname() == resname]
chains = ssbio.utils.force_list(chains)
resnums = ssbio.utils.force_list(resnums)
for chain in chains:
for resnum in resnums:
my_residue_last_atom = model[chain][resnum].child_list[-1]
for rz in residues:
distance = rz.child_list[-1] - my_residue_last_atom
if distance < threshold:
# print(resnum, rz, distance)
return True
return False | Search within the proximity of a defined list of residue numbers and their chains for any specifed residue name.
Args:
resname (str): Residue name to search for in proximity of specified chains + resnums
model: Biopython Model object
chains (str, list): Chain ID or IDs to check
resnums (int, list): Residue numbers within the chain to check
threshold (float): Cutoff in Angstroms for returning True if a RESNAME is near
Returns:
bool: True if a RESNAME is within the threshold cutoff | Below is the the instruction that describes the task:
### Input:
Search within the proximity of a defined list of residue numbers and their chains for any specifed residue name.
Args:
resname (str): Residue name to search for in proximity of specified chains + resnums
model: Biopython Model object
chains (str, list): Chain ID or IDs to check
resnums (int, list): Residue numbers within the chain to check
threshold (float): Cutoff in Angstroms for returning True if a RESNAME is near
Returns:
bool: True if a RESNAME is within the threshold cutoff
### Response:
def resname_in_proximity(resname, model, chains, resnums, threshold=5):
"""Search within the proximity of a defined list of residue numbers and their chains for any specifed residue name.
Args:
resname (str): Residue name to search for in proximity of specified chains + resnums
model: Biopython Model object
chains (str, list): Chain ID or IDs to check
resnums (int, list): Residue numbers within the chain to check
threshold (float): Cutoff in Angstroms for returning True if a RESNAME is near
Returns:
bool: True if a RESNAME is within the threshold cutoff
"""
residues = [r for r in model.get_residues() if r.get_resname() == resname]
chains = ssbio.utils.force_list(chains)
resnums = ssbio.utils.force_list(resnums)
for chain in chains:
for resnum in resnums:
my_residue_last_atom = model[chain][resnum].child_list[-1]
for rz in residues:
distance = rz.child_list[-1] - my_residue_last_atom
if distance < threshold:
# print(resnum, rz, distance)
return True
return False |
def scm_find_files(path, scm_files, scm_dirs):
""" setuptools compatible file finder that follows symlinks
- path: the root directory from which to search
- scm_files: set of scm controlled files and symlinks
(including symlinks to directories)
- scm_dirs: set of scm controlled directories
(including directories containing no scm controlled files)
scm_files and scm_dirs must be absolute with symlinks resolved (realpath),
with normalized case (normcase)
Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\
adding-support-for-revision-control-systems
"""
realpath = os.path.normcase(os.path.realpath(path))
seen = set()
res = []
for dirpath, dirnames, filenames in os.walk(realpath, followlinks=True):
# dirpath with symlinks resolved
realdirpath = os.path.normcase(os.path.realpath(dirpath))
def _link_not_in_scm(n):
fn = os.path.join(realdirpath, os.path.normcase(n))
return os.path.islink(fn) and fn not in scm_files
if realdirpath not in scm_dirs:
# directory not in scm, don't walk it's content
dirnames[:] = []
continue
if (
os.path.islink(dirpath)
and not os.path.relpath(realdirpath, realpath).startswith(os.pardir)
):
# a symlink to a directory not outside path:
# we keep it in the result and don't walk its content
res.append(os.path.join(path, os.path.relpath(dirpath, path)))
dirnames[:] = []
continue
if realdirpath in seen:
# symlink loop protection
dirnames[:] = []
continue
dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)]
for filename in filenames:
if _link_not_in_scm(filename):
continue
# dirpath + filename with symlinks preserved
fullfilename = os.path.join(dirpath, filename)
if os.path.normcase(os.path.realpath(fullfilename)) in scm_files:
res.append(os.path.join(path, os.path.relpath(fullfilename, path)))
seen.add(realdirpath)
return res | setuptools compatible file finder that follows symlinks
- path: the root directory from which to search
- scm_files: set of scm controlled files and symlinks
(including symlinks to directories)
- scm_dirs: set of scm controlled directories
(including directories containing no scm controlled files)
scm_files and scm_dirs must be absolute with symlinks resolved (realpath),
with normalized case (normcase)
Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\
adding-support-for-revision-control-systems | Below is the the instruction that describes the task:
### Input:
setuptools compatible file finder that follows symlinks
- path: the root directory from which to search
- scm_files: set of scm controlled files and symlinks
(including symlinks to directories)
- scm_dirs: set of scm controlled directories
(including directories containing no scm controlled files)
scm_files and scm_dirs must be absolute with symlinks resolved (realpath),
with normalized case (normcase)
Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\
adding-support-for-revision-control-systems
### Response:
def scm_find_files(path, scm_files, scm_dirs):
""" setuptools compatible file finder that follows symlinks
- path: the root directory from which to search
- scm_files: set of scm controlled files and symlinks
(including symlinks to directories)
- scm_dirs: set of scm controlled directories
(including directories containing no scm controlled files)
scm_files and scm_dirs must be absolute with symlinks resolved (realpath),
with normalized case (normcase)
Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\
adding-support-for-revision-control-systems
"""
realpath = os.path.normcase(os.path.realpath(path))
seen = set()
res = []
for dirpath, dirnames, filenames in os.walk(realpath, followlinks=True):
# dirpath with symlinks resolved
realdirpath = os.path.normcase(os.path.realpath(dirpath))
def _link_not_in_scm(n):
fn = os.path.join(realdirpath, os.path.normcase(n))
return os.path.islink(fn) and fn not in scm_files
if realdirpath not in scm_dirs:
# directory not in scm, don't walk it's content
dirnames[:] = []
continue
if (
os.path.islink(dirpath)
and not os.path.relpath(realdirpath, realpath).startswith(os.pardir)
):
# a symlink to a directory not outside path:
# we keep it in the result and don't walk its content
res.append(os.path.join(path, os.path.relpath(dirpath, path)))
dirnames[:] = []
continue
if realdirpath in seen:
# symlink loop protection
dirnames[:] = []
continue
dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)]
for filename in filenames:
if _link_not_in_scm(filename):
continue
# dirpath + filename with symlinks preserved
fullfilename = os.path.join(dirpath, filename)
if os.path.normcase(os.path.realpath(fullfilename)) in scm_files:
res.append(os.path.join(path, os.path.relpath(fullfilename, path)))
seen.add(realdirpath)
return res |
def start_rpc_listeners(self):
"""Configure all listeners here"""
self._setup_rpc()
if not self.endpoints:
return []
self.conn = n_rpc.create_connection()
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
return self.conn.consume_in_threads() | Configure all listeners here | Below is the the instruction that describes the task:
### Input:
Configure all listeners here
### Response:
def start_rpc_listeners(self):
"""Configure all listeners here"""
self._setup_rpc()
if not self.endpoints:
return []
self.conn = n_rpc.create_connection()
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
return self.conn.consume_in_threads() |
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
"""
if callable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols | Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'. | Below is the the instruction that describes the task:
### Input:
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
### Response:
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
"""
if callable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols |
def Collect(self, knowledge_base):
"""Collects values from the knowledge base.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
Raises:
PreProcessFail: if the preprocessing fails.
"""
environment_variable = knowledge_base.GetEnvironmentVariable(
'programdata')
allusersprofile = getattr(environment_variable, 'value', None)
if not allusersprofile:
environment_variable = knowledge_base.GetEnvironmentVariable(
'allusersprofile')
allusersprofile = getattr(environment_variable, 'value', None)
if allusersprofile:
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='programdata', value=allusersprofile)
try:
logger.debug('setting environment variable: {0:s} to: "{1:s}"'.format(
'programdata', allusersprofile))
knowledge_base.AddEnvironmentVariable(environment_variable)
except KeyError:
# TODO: add and store preprocessing errors.
pass | Collects values from the knowledge base.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
Raises:
PreProcessFail: if the preprocessing fails. | Below is the the instruction that describes the task:
### Input:
Collects values from the knowledge base.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
Raises:
PreProcessFail: if the preprocessing fails.
### Response:
def Collect(self, knowledge_base):
"""Collects values from the knowledge base.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
Raises:
PreProcessFail: if the preprocessing fails.
"""
environment_variable = knowledge_base.GetEnvironmentVariable(
'programdata')
allusersprofile = getattr(environment_variable, 'value', None)
if not allusersprofile:
environment_variable = knowledge_base.GetEnvironmentVariable(
'allusersprofile')
allusersprofile = getattr(environment_variable, 'value', None)
if allusersprofile:
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='programdata', value=allusersprofile)
try:
logger.debug('setting environment variable: {0:s} to: "{1:s}"'.format(
'programdata', allusersprofile))
knowledge_base.AddEnvironmentVariable(environment_variable)
except KeyError:
# TODO: add and store preprocessing errors.
pass |
def draw_png(self, image, h_zoom, v_zoom, current_y):
"""
Draw this set of labels to PNG.
:param image: the image to draw onto
:param int h_zoom: the horizontal zoom
:param int v_zoom: the vertical zoom
:param int current_y: the current y offset, in modules
:type image: :class:`PIL.Image`
"""
# PIL object
draw = ImageDraw.Draw(image)
mws = self.rconf.mws
pixels_per_second = int(h_zoom / mws)
# font for begin/end times
time_font_height_pt = 12
time_font = ImageFont.truetype(self.FONT_PATH, time_font_height_pt)
# font for labels
label_font_height_pt = 18
label_font = ImageFont.truetype(self.FONT_PATH, label_font_height_pt)
current_y_px = current_y * v_zoom + 0.25 * v_zoom
for (begin, end, label) in self.labelset:
# base x position
begin_px = int(begin * pixels_per_second)
end_px = int(end * pixels_per_second)
# select color for the horizontal bar
if label == "speech":
color = PlotterColors.RED
elif label == "nonspeech":
color = PlotterColors.GREEN
else:
color = self.parameters["color"]
# horizontal bar
bar_top_px = current_y_px + v_zoom * 0.5 - self.TICK_WIDTH
bar_bottom_px = bar_top_px + 2 * self.TICK_WIDTH
bar_left_px = begin_px
bar_right_px = end_px
draw.rectangle((bar_left_px, bar_top_px, bar_right_px, bar_bottom_px), fill=color)
# left guide
if self.parameters["begin_guide"]:
top_px = 0
bottom_px = current_y_px + v_zoom
left_px = begin_px
draw.rectangle((left_px, top_px, left_px, bottom_px), fill=color)
# left tick
top_px = current_y_px
bottom_px = current_y_px + v_zoom
left_px = begin_px
right_px = begin_px + self.TICK_WIDTH
draw.rectangle((left_px, top_px, right_px, bottom_px), fill=PlotterColors.BLACK)
# right guide
if self.parameters["end_guide"]:
top_px = 0
bottom_px = current_y_px + v_zoom
left_px = end_px
draw.rectangle((left_px, top_px, left_px, bottom_px), fill=color)
# right tick
top_px = current_y_px
bottom_px = current_y_px + v_zoom
left_px = end_px - self.TICK_WIDTH
right_px = end_px
draw.rectangle((left_px, top_px, right_px, bottom_px), fill=PlotterColors.BLACK)
# begin time
if self.parameters["begin_time"]:
sb = ("%.03f" % (begin - int(begin)))[2:]
left_px = begin_px + self.TICK_WIDTH + self.TEXT_MARGIN
top_px = current_y_px - self.TEXT_MARGIN
draw.text((left_px, top_px), sb, PlotterColors.BLACK, font=time_font)
# end time
if self.parameters["end_time"]:
se = ("%.03f" % (end - int(end)))[2:]
left_px = end_px - self.TEXT_MARGIN - self.TICK_WIDTH - self.text_bounding_box(time_font_height_pt, se)[0]
top_px = current_y_px + v_zoom - self.text_bounding_box(time_font_height_pt, sb)[1]
draw.text((left_px, top_px), se, PlotterColors.BLACK, font=time_font)
# interval label
if self.parameters["labels"]:
left_px = begin_px + (end_px - begin_px - self.text_bounding_box(label_font_height_pt, label)[0]) // 2
top_px = current_y_px + v_zoom
draw.text((left_px, top_px), label, PlotterColors.BLACK, font=label_font)
# label
left_px = 0
top_px = current_y_px + v_zoom
if self.label is not None:
draw.text((left_px, top_px), self.label, PlotterColors.BLACK, font=label_font) | Draw this set of labels to PNG.
:param image: the image to draw onto
:param int h_zoom: the horizontal zoom
:param int v_zoom: the vertical zoom
:param int current_y: the current y offset, in modules
:type image: :class:`PIL.Image` | Below is the the instruction that describes the task:
### Input:
Draw this set of labels to PNG.
:param image: the image to draw onto
:param int h_zoom: the horizontal zoom
:param int v_zoom: the vertical zoom
:param int current_y: the current y offset, in modules
:type image: :class:`PIL.Image`
### Response:
def draw_png(self, image, h_zoom, v_zoom, current_y):
"""
Draw this set of labels to PNG.
:param image: the image to draw onto
:param int h_zoom: the horizontal zoom
:param int v_zoom: the vertical zoom
:param int current_y: the current y offset, in modules
:type image: :class:`PIL.Image`
"""
# PIL object
draw = ImageDraw.Draw(image)
mws = self.rconf.mws
pixels_per_second = int(h_zoom / mws)
# font for begin/end times
time_font_height_pt = 12
time_font = ImageFont.truetype(self.FONT_PATH, time_font_height_pt)
# font for labels
label_font_height_pt = 18
label_font = ImageFont.truetype(self.FONT_PATH, label_font_height_pt)
current_y_px = current_y * v_zoom + 0.25 * v_zoom
for (begin, end, label) in self.labelset:
# base x position
begin_px = int(begin * pixels_per_second)
end_px = int(end * pixels_per_second)
# select color for the horizontal bar
if label == "speech":
color = PlotterColors.RED
elif label == "nonspeech":
color = PlotterColors.GREEN
else:
color = self.parameters["color"]
# horizontal bar
bar_top_px = current_y_px + v_zoom * 0.5 - self.TICK_WIDTH
bar_bottom_px = bar_top_px + 2 * self.TICK_WIDTH
bar_left_px = begin_px
bar_right_px = end_px
draw.rectangle((bar_left_px, bar_top_px, bar_right_px, bar_bottom_px), fill=color)
# left guide
if self.parameters["begin_guide"]:
top_px = 0
bottom_px = current_y_px + v_zoom
left_px = begin_px
draw.rectangle((left_px, top_px, left_px, bottom_px), fill=color)
# left tick
top_px = current_y_px
bottom_px = current_y_px + v_zoom
left_px = begin_px
right_px = begin_px + self.TICK_WIDTH
draw.rectangle((left_px, top_px, right_px, bottom_px), fill=PlotterColors.BLACK)
# right guide
if self.parameters["end_guide"]:
top_px = 0
bottom_px = current_y_px + v_zoom
left_px = end_px
draw.rectangle((left_px, top_px, left_px, bottom_px), fill=color)
# right tick
top_px = current_y_px
bottom_px = current_y_px + v_zoom
left_px = end_px - self.TICK_WIDTH
right_px = end_px
draw.rectangle((left_px, top_px, right_px, bottom_px), fill=PlotterColors.BLACK)
# begin time
if self.parameters["begin_time"]:
sb = ("%.03f" % (begin - int(begin)))[2:]
left_px = begin_px + self.TICK_WIDTH + self.TEXT_MARGIN
top_px = current_y_px - self.TEXT_MARGIN
draw.text((left_px, top_px), sb, PlotterColors.BLACK, font=time_font)
# end time
if self.parameters["end_time"]:
se = ("%.03f" % (end - int(end)))[2:]
left_px = end_px - self.TEXT_MARGIN - self.TICK_WIDTH - self.text_bounding_box(time_font_height_pt, se)[0]
top_px = current_y_px + v_zoom - self.text_bounding_box(time_font_height_pt, sb)[1]
draw.text((left_px, top_px), se, PlotterColors.BLACK, font=time_font)
# interval label
if self.parameters["labels"]:
left_px = begin_px + (end_px - begin_px - self.text_bounding_box(label_font_height_pt, label)[0]) // 2
top_px = current_y_px + v_zoom
draw.text((left_px, top_px), label, PlotterColors.BLACK, font=label_font)
# label
left_px = 0
top_px = current_y_px + v_zoom
if self.label is not None:
draw.text((left_px, top_px), self.label, PlotterColors.BLACK, font=label_font) |
def validate_protoquil(program: Program) -> None:
"""
Ensure that a program is valid ProtoQuil, otherwise raise a ValueError.
Protoquil is a subset of Quil which excludes control flow and classical instructions.
:param program: The Quil program to validate.
"""
valid_instruction_types = tuple([Pragma, Declare, Halt, Gate, Reset, ResetQubit, Measurement])
for instr in program.instructions:
if not isinstance(instr, valid_instruction_types):
# Instructions like MOVE, NOT, JUMP, JUMP-UNLESS will fail here
raise ValueError(f"ProtoQuil validation failed: {instr} is not allowed.") | Ensure that a program is valid ProtoQuil, otherwise raise a ValueError.
Protoquil is a subset of Quil which excludes control flow and classical instructions.
:param program: The Quil program to validate. | Below is the the instruction that describes the task:
### Input:
Ensure that a program is valid ProtoQuil, otherwise raise a ValueError.
Protoquil is a subset of Quil which excludes control flow and classical instructions.
:param program: The Quil program to validate.
### Response:
def validate_protoquil(program: Program) -> None:
"""
Ensure that a program is valid ProtoQuil, otherwise raise a ValueError.
Protoquil is a subset of Quil which excludes control flow and classical instructions.
:param program: The Quil program to validate.
"""
valid_instruction_types = tuple([Pragma, Declare, Halt, Gate, Reset, ResetQubit, Measurement])
for instr in program.instructions:
if not isinstance(instr, valid_instruction_types):
# Instructions like MOVE, NOT, JUMP, JUMP-UNLESS will fail here
raise ValueError(f"ProtoQuil validation failed: {instr} is not allowed.") |
def build_model(self, n_features, n_classes):
"""Create the computational graph.
:param n_features: number of features
:param n_classes: number of classes
:return: self
"""
self._create_placeholders(n_features, n_classes)
self._create_variables(n_features, n_classes)
self.mod_y = tf.nn.softmax(
tf.add(tf.matmul(self.input_data, self.W_), self.b_))
self.cost = self.loss.compile(self.mod_y, self.input_labels)
self.train_step = tf.train.GradientDescentOptimizer(
self.learning_rate).minimize(self.cost)
self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels) | Create the computational graph.
:param n_features: number of features
:param n_classes: number of classes
:return: self | Below is the the instruction that describes the task:
### Input:
Create the computational graph.
:param n_features: number of features
:param n_classes: number of classes
:return: self
### Response:
def build_model(self, n_features, n_classes):
"""Create the computational graph.
:param n_features: number of features
:param n_classes: number of classes
:return: self
"""
self._create_placeholders(n_features, n_classes)
self._create_variables(n_features, n_classes)
self.mod_y = tf.nn.softmax(
tf.add(tf.matmul(self.input_data, self.W_), self.b_))
self.cost = self.loss.compile(self.mod_y, self.input_labels)
self.train_step = tf.train.GradientDescentOptimizer(
self.learning_rate).minimize(self.cost)
self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels) |
def readLongString(self):
"""
Read UTF8 string.
"""
l = self.stream.read_ulong()
bytes = self.stream.read(l)
return self.context.getStringForBytes(bytes) | Read UTF8 string. | Below is the the instruction that describes the task:
### Input:
Read UTF8 string.
### Response:
def readLongString(self):
"""
Read UTF8 string.
"""
l = self.stream.read_ulong()
bytes = self.stream.read(l)
return self.context.getStringForBytes(bytes) |
def artist(self):
"""
:class:`Artist` object of album's artist
"""
if not self._artist:
self._artist = Artist(self._artist_id, self._artist_name, self._connection)
return self._artist | :class:`Artist` object of album's artist | Below is the the instruction that describes the task:
### Input:
:class:`Artist` object of album's artist
### Response:
def artist(self):
"""
:class:`Artist` object of album's artist
"""
if not self._artist:
self._artist = Artist(self._artist_id, self._artist_name, self._connection)
return self._artist |
def set_alternative_view(self, request):
"""
Allows the admin user to change their assigned alternative
"""
if not request.user.has_perm('experiments.change_experiment'):
return HttpResponseForbidden()
experiment_name = request.POST.get("experiment")
alternative_name = request.POST.get("alternative")
if not (experiment_name and alternative_name):
return HttpResponseBadRequest()
participant(request).set_alternative(experiment_name, alternative_name)
return JsonResponse({
'success': True,
'alternative': participant(request).get_alternative(experiment_name)
}) | Allows the admin user to change their assigned alternative | Below is the the instruction that describes the task:
### Input:
Allows the admin user to change their assigned alternative
### Response:
def set_alternative_view(self, request):
"""
Allows the admin user to change their assigned alternative
"""
if not request.user.has_perm('experiments.change_experiment'):
return HttpResponseForbidden()
experiment_name = request.POST.get("experiment")
alternative_name = request.POST.get("alternative")
if not (experiment_name and alternative_name):
return HttpResponseBadRequest()
participant(request).set_alternative(experiment_name, alternative_name)
return JsonResponse({
'success': True,
'alternative': participant(request).get_alternative(experiment_name)
}) |
def add_record(self, msg_id, rec):
"""Add a new Task Record, by msg_id."""
# print rec
rec = self._binary_buffers(rec)
self._records.insert(rec) | Add a new Task Record, by msg_id. | Below is the the instruction that describes the task:
### Input:
Add a new Task Record, by msg_id.
### Response:
def add_record(self, msg_id, rec):
"""Add a new Task Record, by msg_id."""
# print rec
rec = self._binary_buffers(rec)
self._records.insert(rec) |
def open(filename, frame='unspecified'):
"""Creates a ColorImage from a file.
Parameters
----------
filename : :obj:`str`
The file to load the data from. Must be one of .png, .jpg,
.npy, or .npz.
frame : :obj:`str`
A string representing the frame of reference in which the new image
lies.
Returns
-------
:obj:`ColorImage`
The new color image.
"""
data = Image.load_data(filename).astype(np.uint8)
return ColorImage(data, frame) | Creates a ColorImage from a file.
Parameters
----------
filename : :obj:`str`
The file to load the data from. Must be one of .png, .jpg,
.npy, or .npz.
frame : :obj:`str`
A string representing the frame of reference in which the new image
lies.
Returns
-------
:obj:`ColorImage`
The new color image. | Below is the the instruction that describes the task:
### Input:
Creates a ColorImage from a file.
Parameters
----------
filename : :obj:`str`
The file to load the data from. Must be one of .png, .jpg,
.npy, or .npz.
frame : :obj:`str`
A string representing the frame of reference in which the new image
lies.
Returns
-------
:obj:`ColorImage`
The new color image.
### Response:
def open(filename, frame='unspecified'):
"""Creates a ColorImage from a file.
Parameters
----------
filename : :obj:`str`
The file to load the data from. Must be one of .png, .jpg,
.npy, or .npz.
frame : :obj:`str`
A string representing the frame of reference in which the new image
lies.
Returns
-------
:obj:`ColorImage`
The new color image.
"""
data = Image.load_data(filename).astype(np.uint8)
return ColorImage(data, frame) |
def _isCompatible(self, other, reporter):
"""
This is the environment implementation of
:meth:`BaseComponent.isCompatible`.
Subclasses may override this method.
"""
component1 = self
component2 = other
# base glyphs
if component1.baseName != component2.baseName:
reporter.baseDifference = True
reporter.warning = True | This is the environment implementation of
:meth:`BaseComponent.isCompatible`.
Subclasses may override this method. | Below is the the instruction that describes the task:
### Input:
This is the environment implementation of
:meth:`BaseComponent.isCompatible`.
Subclasses may override this method.
### Response:
def _isCompatible(self, other, reporter):
"""
This is the environment implementation of
:meth:`BaseComponent.isCompatible`.
Subclasses may override this method.
"""
component1 = self
component2 = other
# base glyphs
if component1.baseName != component2.baseName:
reporter.baseDifference = True
reporter.warning = True |
def add_interface_router(router, subnet, profile=None):
'''
Adds an internal network interface to the specified router
CLI Example:
.. code-block:: bash
salt '*' neutron.add_interface_router router-name subnet-name
:param router: ID or name of the router
:param subnet: ID or name of the subnet
:param profile: Profile to build on (Optional)
:return: Added interface information
'''
conn = _auth(profile)
return conn.add_interface_router(router, subnet) | Adds an internal network interface to the specified router
CLI Example:
.. code-block:: bash
salt '*' neutron.add_interface_router router-name subnet-name
:param router: ID or name of the router
:param subnet: ID or name of the subnet
:param profile: Profile to build on (Optional)
:return: Added interface information | Below is the the instruction that describes the task:
### Input:
Adds an internal network interface to the specified router
CLI Example:
.. code-block:: bash
salt '*' neutron.add_interface_router router-name subnet-name
:param router: ID or name of the router
:param subnet: ID or name of the subnet
:param profile: Profile to build on (Optional)
:return: Added interface information
### Response:
def add_interface_router(router, subnet, profile=None):
'''
Adds an internal network interface to the specified router
CLI Example:
.. code-block:: bash
salt '*' neutron.add_interface_router router-name subnet-name
:param router: ID or name of the router
:param subnet: ID or name of the subnet
:param profile: Profile to build on (Optional)
:return: Added interface information
'''
conn = _auth(profile)
return conn.add_interface_router(router, subnet) |
def _compute_mean_on_rock(self, C, mag, rrup, rvol, hypo_depth, CN, CR,
f4HW):
"""
Compute mean value on site class A/B (equation 2 on page 22)
"""
# Define subduction flag (page 23)
# SI=1 for subduction interface, 0 otherwise
# DS=1 for subduction intraslab, 0 otherwise
SI = 0
DS = 1
lnSA_AB = (
# line 1 and 2 of equation 2
C['c11'] + (C['c12y'] + (C['c15'] - C['c17']) * C['c19y']) *
(mag - 6) +
# line 3
C['c13y'] * (10 - mag) ** 3 +
# line 4
C['c17'] * np.log(rrup + C['c18y'] * np.exp(C['c19y'] * mag)) +
# line 5
C['c20'] * hypo_depth + C['c24'] * SI +
# line 6
C['c46'] * rvol * (1 - DS)
)
return lnSA_AB | Compute mean value on site class A/B (equation 2 on page 22) | Below is the the instruction that describes the task:
### Input:
Compute mean value on site class A/B (equation 2 on page 22)
### Response:
def _compute_mean_on_rock(self, C, mag, rrup, rvol, hypo_depth, CN, CR,
f4HW):
"""
Compute mean value on site class A/B (equation 2 on page 22)
"""
# Define subduction flag (page 23)
# SI=1 for subduction interface, 0 otherwise
# DS=1 for subduction intraslab, 0 otherwise
SI = 0
DS = 1
lnSA_AB = (
# line 1 and 2 of equation 2
C['c11'] + (C['c12y'] + (C['c15'] - C['c17']) * C['c19y']) *
(mag - 6) +
# line 3
C['c13y'] * (10 - mag) ** 3 +
# line 4
C['c17'] * np.log(rrup + C['c18y'] * np.exp(C['c19y'] * mag)) +
# line 5
C['c20'] * hypo_depth + C['c24'] * SI +
# line 6
C['c46'] * rvol * (1 - DS)
)
return lnSA_AB |
def __set_authoring_nodes(self, source, target):
"""
Sets given editor authoring nodes.
:param source: Source file.
:type source: unicode
:param target: Target file.
:type target: unicode
"""
editor = self.__script_editor.get_editor(source)
editor.set_file(target)
self.__script_editor.model.update_authoring_nodes(editor) | Sets given editor authoring nodes.
:param source: Source file.
:type source: unicode
:param target: Target file.
:type target: unicode | Below is the the instruction that describes the task:
### Input:
Sets given editor authoring nodes.
:param source: Source file.
:type source: unicode
:param target: Target file.
:type target: unicode
### Response:
def __set_authoring_nodes(self, source, target):
"""
Sets given editor authoring nodes.
:param source: Source file.
:type source: unicode
:param target: Target file.
:type target: unicode
"""
editor = self.__script_editor.get_editor(source)
editor.set_file(target)
self.__script_editor.model.update_authoring_nodes(editor) |
def _setSmsMemory(self, readDelete=None, write=None):
""" Set the current SMS memory to use for read/delete/write operations """
# Switch to the correct memory type if required
if write != None and write != self._smsMemWrite:
self.write()
readDel = readDelete or self._smsMemReadDelete
self.write('AT+CPMS="{0}","{1}"'.format(readDel, write))
self._smsMemReadDelete = readDel
self._smsMemWrite = write
elif readDelete != None and readDelete != self._smsMemReadDelete:
self.write('AT+CPMS="{0}"'.format(readDelete))
self._smsMemReadDelete = readDelete | Set the current SMS memory to use for read/delete/write operations | Below is the the instruction that describes the task:
### Input:
Set the current SMS memory to use for read/delete/write operations
### Response:
def _setSmsMemory(self, readDelete=None, write=None):
""" Set the current SMS memory to use for read/delete/write operations """
# Switch to the correct memory type if required
if write != None and write != self._smsMemWrite:
self.write()
readDel = readDelete or self._smsMemReadDelete
self.write('AT+CPMS="{0}","{1}"'.format(readDel, write))
self._smsMemReadDelete = readDel
self._smsMemWrite = write
elif readDelete != None and readDelete != self._smsMemReadDelete:
self.write('AT+CPMS="{0}"'.format(readDelete))
self._smsMemReadDelete = readDelete |
def dispatch(self, *args, **kwargs):
"""This decorator sets this view to have restricted permissions."""
return super(AnimalDelete, self).dispatch(*args, **kwargs) | This decorator sets this view to have restricted permissions. | Below is the the instruction that describes the task:
### Input:
This decorator sets this view to have restricted permissions.
### Response:
def dispatch(self, *args, **kwargs):
"""This decorator sets this view to have restricted permissions."""
return super(AnimalDelete, self).dispatch(*args, **kwargs) |
def find_counterpart_in(self, tree_b):
""" Finds a TreeNode counterpart for this node in tree_b
:param tree_b: target tree that hosts counterpart to this node
:return: TreeNode from tree_b that has the same timeperiod as self.timeperiod,
or None if no counterpart ware found
"""
tree_b_hierarchy_entry = tree_b.process_hierarchy.get_by_qualifier(self.time_qualifier)
if not tree_b_hierarchy_entry:
# special case when tree with more levels depends on the tree with smaller amount of levels
# for example ThreeLevel Financial tree depends on TwoLevel Google Channel
# in this case - we just verify time-periods that matches in both trees;
# for levels that have no match, we assume that dependency does not exists
# for example Financial Monthly has no counterpart in Google Daily Report -
# so we assume that its not blocked
node_b = None
else:
node_b = tree_b.get_node(tree_b_hierarchy_entry.process_entry.process_name, self.timeperiod)
return node_b | Finds a TreeNode counterpart for this node in tree_b
:param tree_b: target tree that hosts counterpart to this node
:return: TreeNode from tree_b that has the same timeperiod as self.timeperiod,
or None if no counterpart ware found | Below is the the instruction that describes the task:
### Input:
Finds a TreeNode counterpart for this node in tree_b
:param tree_b: target tree that hosts counterpart to this node
:return: TreeNode from tree_b that has the same timeperiod as self.timeperiod,
or None if no counterpart ware found
### Response:
def find_counterpart_in(self, tree_b):
""" Finds a TreeNode counterpart for this node in tree_b
:param tree_b: target tree that hosts counterpart to this node
:return: TreeNode from tree_b that has the same timeperiod as self.timeperiod,
or None if no counterpart ware found
"""
tree_b_hierarchy_entry = tree_b.process_hierarchy.get_by_qualifier(self.time_qualifier)
if not tree_b_hierarchy_entry:
# special case when tree with more levels depends on the tree with smaller amount of levels
# for example ThreeLevel Financial tree depends on TwoLevel Google Channel
# in this case - we just verify time-periods that matches in both trees;
# for levels that have no match, we assume that dependency does not exists
# for example Financial Monthly has no counterpart in Google Daily Report -
# so we assume that its not blocked
node_b = None
else:
node_b = tree_b.get_node(tree_b_hierarchy_entry.process_entry.process_name, self.timeperiod)
return node_b |
def hide_routemap_holder_route_map_content_set_local_preference_local_preference_value(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
set = ET.SubElement(content, "set")
local_preference = ET.SubElement(set, "local-preference")
local_preference_value = ET.SubElement(local_preference, "local-preference-value")
local_preference_value.text = kwargs.pop('local_preference_value')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def hide_routemap_holder_route_map_content_set_local_preference_local_preference_value(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
set = ET.SubElement(content, "set")
local_preference = ET.SubElement(set, "local-preference")
local_preference_value = ET.SubElement(local_preference, "local-preference-value")
local_preference_value.text = kwargs.pop('local_preference_value')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_diffs(ptrms_vectors, ptrm_checks_vectors, ptrms_orig, checks_orig):
"""
input: ptrms_vectors, ptrm_checks_vectors, ptrms_orig, checks_orig
output: vector diffs between original and ptrm check, C
"""
ptrm_temps = numpy.array(ptrms_orig)[:,0]
check_temps = numpy.array(checks_orig)[:,0]
index = numpy.zeros(len(ptrm_temps))
for num, temp in enumerate(ptrm_temps):
if len(numpy.where(check_temps == temp)[0]):
index[num] = numpy.where(check_temps == temp)[0][0]
else:
index[num] = float('nan')
diffs = numpy.zeros((len(ptrms_vectors), 3))
for num, ptrm in enumerate(ptrms_vectors):
if numpy.isnan(index[num]):
diffs[num] = numpy.array([0,0,0])
else:
diffs[num] = ptrm_checks_vectors[int(index[num])] - ptrm
C = numpy.cumsum(diffs, 0)
#print "diffs (should be same as to_sum"
#print diffs
#print "C (should be same as dpal_sum)"
#print C
return diffs, C | input: ptrms_vectors, ptrm_checks_vectors, ptrms_orig, checks_orig
output: vector diffs between original and ptrm check, C | Below is the the instruction that describes the task:
### Input:
input: ptrms_vectors, ptrm_checks_vectors, ptrms_orig, checks_orig
output: vector diffs between original and ptrm check, C
### Response:
def get_diffs(ptrms_vectors, ptrm_checks_vectors, ptrms_orig, checks_orig):
"""
input: ptrms_vectors, ptrm_checks_vectors, ptrms_orig, checks_orig
output: vector diffs between original and ptrm check, C
"""
ptrm_temps = numpy.array(ptrms_orig)[:,0]
check_temps = numpy.array(checks_orig)[:,0]
index = numpy.zeros(len(ptrm_temps))
for num, temp in enumerate(ptrm_temps):
if len(numpy.where(check_temps == temp)[0]):
index[num] = numpy.where(check_temps == temp)[0][0]
else:
index[num] = float('nan')
diffs = numpy.zeros((len(ptrms_vectors), 3))
for num, ptrm in enumerate(ptrms_vectors):
if numpy.isnan(index[num]):
diffs[num] = numpy.array([0,0,0])
else:
diffs[num] = ptrm_checks_vectors[int(index[num])] - ptrm
C = numpy.cumsum(diffs, 0)
#print "diffs (should be same as to_sum"
#print diffs
#print "C (should be same as dpal_sum)"
#print C
return diffs, C |
def inc_from_lat(lat):
"""
Calculate inclination predicted from latitude using the dipole equation
Parameter
----------
lat : latitude in degrees
Returns
-------
inc : inclination calculated using the dipole equation
"""
rad = old_div(np.pi, 180.)
inc = old_div(np.arctan(2 * np.tan(lat * rad)), rad)
return inc | Calculate inclination predicted from latitude using the dipole equation
Parameter
----------
lat : latitude in degrees
Returns
-------
inc : inclination calculated using the dipole equation | Below is the the instruction that describes the task:
### Input:
Calculate inclination predicted from latitude using the dipole equation
Parameter
----------
lat : latitude in degrees
Returns
-------
inc : inclination calculated using the dipole equation
### Response:
def inc_from_lat(lat):
"""
Calculate inclination predicted from latitude using the dipole equation
Parameter
----------
lat : latitude in degrees
Returns
-------
inc : inclination calculated using the dipole equation
"""
rad = old_div(np.pi, 180.)
inc = old_div(np.arctan(2 * np.tan(lat * rad)), rad)
return inc |
async def connect(self, hostname=None, port=None, tls=False, **kwargs):
""" Connect to a server, optionally over TLS. See pydle.features.RFC1459Support.connect for misc parameters. """
if not port:
if tls:
port = DEFAULT_TLS_PORT
else:
port = rfc1459.protocol.DEFAULT_PORT
return await super().connect(hostname, port, tls=tls, **kwargs) | Connect to a server, optionally over TLS. See pydle.features.RFC1459Support.connect for misc parameters. | Below is the the instruction that describes the task:
### Input:
Connect to a server, optionally over TLS. See pydle.features.RFC1459Support.connect for misc parameters.
### Response:
async def connect(self, hostname=None, port=None, tls=False, **kwargs):
""" Connect to a server, optionally over TLS. See pydle.features.RFC1459Support.connect for misc parameters. """
if not port:
if tls:
port = DEFAULT_TLS_PORT
else:
port = rfc1459.protocol.DEFAULT_PORT
return await super().connect(hostname, port, tls=tls, **kwargs) |
def perf_total(self, value):
"""The perf_total property.
Args:
value (string). the property value.
"""
if value == self._defaults['perfTotal'] and 'perfTotal' in self._values:
del self._values['perfTotal']
else:
self._values['perfTotal'] = value | The perf_total property.
Args:
value (string). the property value. | Below is the the instruction that describes the task:
### Input:
The perf_total property.
Args:
value (string). the property value.
### Response:
def perf_total(self, value):
"""The perf_total property.
Args:
value (string). the property value.
"""
if value == self._defaults['perfTotal'] and 'perfTotal' in self._values:
del self._values['perfTotal']
else:
self._values['perfTotal'] = value |
def report_stats(self):
"""Create the dict of stats data for the MCP stats queue"""
if not self.previous:
self.previous = dict()
for key in self.counters:
self.previous[key] = 0
values = {
'name': self.name,
'consumer_name': self.consumer_name,
'counts': dict(self.counters),
'previous': dict(self.previous)
}
self.previous = dict(self.counters)
return values | Create the dict of stats data for the MCP stats queue | Below is the the instruction that describes the task:
### Input:
Create the dict of stats data for the MCP stats queue
### Response:
def report_stats(self):
"""Create the dict of stats data for the MCP stats queue"""
if not self.previous:
self.previous = dict()
for key in self.counters:
self.previous[key] = 0
values = {
'name': self.name,
'consumer_name': self.consumer_name,
'counts': dict(self.counters),
'previous': dict(self.previous)
}
self.previous = dict(self.counters)
return values |
def merge_results(x, y):
"""
Given two dicts, x and y, merge them into a new dict as a shallow copy.
The result only differs from `x.update(y)` in the way that it handles list
values when both x and y have list values for the same key. In which case
the returned dictionary, z, has a value according to:
z[key] = x[key] + z[key]
:param x: The first dictionary
:type x: :py:class:`dict`
:param y: The second dictionary
:type y: :py:class:`dict`
:returns: The merged dictionary
:rtype: :py:class:`dict`
"""
z = x.copy()
for key, value in y.items():
if isinstance(value, list) and isinstance(z.get(key), list):
z[key] += value
else:
z[key] = value
return z | Given two dicts, x and y, merge them into a new dict as a shallow copy.
The result only differs from `x.update(y)` in the way that it handles list
values when both x and y have list values for the same key. In which case
the returned dictionary, z, has a value according to:
z[key] = x[key] + z[key]
:param x: The first dictionary
:type x: :py:class:`dict`
:param y: The second dictionary
:type y: :py:class:`dict`
:returns: The merged dictionary
:rtype: :py:class:`dict` | Below is the the instruction that describes the task:
### Input:
Given two dicts, x and y, merge them into a new dict as a shallow copy.
The result only differs from `x.update(y)` in the way that it handles list
values when both x and y have list values for the same key. In which case
the returned dictionary, z, has a value according to:
z[key] = x[key] + z[key]
:param x: The first dictionary
:type x: :py:class:`dict`
:param y: The second dictionary
:type y: :py:class:`dict`
:returns: The merged dictionary
:rtype: :py:class:`dict`
### Response:
def merge_results(x, y):
"""
Given two dicts, x and y, merge them into a new dict as a shallow copy.
The result only differs from `x.update(y)` in the way that it handles list
values when both x and y have list values for the same key. In which case
the returned dictionary, z, has a value according to:
z[key] = x[key] + z[key]
:param x: The first dictionary
:type x: :py:class:`dict`
:param y: The second dictionary
:type y: :py:class:`dict`
:returns: The merged dictionary
:rtype: :py:class:`dict`
"""
z = x.copy()
for key, value in y.items():
if isinstance(value, list) and isinstance(z.get(key), list):
z[key] += value
else:
z[key] = value
return z |
def load_boston_multitask():
"""Boston House Prices Dataset with a synthetic multitask output.
The multitask output is obtained by applying a linear transformation
to the original y and adding it as a second output column.
"""
dataset = datasets.load_boston()
y = dataset.target
target = np.column_stack([y, 2 * y + 5])
return Dataset(load_boston.__doc__, dataset.data, target, r2_score) | Boston House Prices Dataset with a synthetic multitask output.
The multitask output is obtained by applying a linear transformation
to the original y and adding it as a second output column. | Below is the the instruction that describes the task:
### Input:
Boston House Prices Dataset with a synthetic multitask output.
The multitask output is obtained by applying a linear transformation
to the original y and adding it as a second output column.
### Response:
def load_boston_multitask():
"""Boston House Prices Dataset with a synthetic multitask output.
The multitask output is obtained by applying a linear transformation
to the original y and adding it as a second output column.
"""
dataset = datasets.load_boston()
y = dataset.target
target = np.column_stack([y, 2 * y + 5])
return Dataset(load_boston.__doc__, dataset.data, target, r2_score) |
def use_technique(self, tech):
"""
Use an exploration technique with this SimulationManager.
Techniques can be found in :mod:`angr.exploration_techniques`.
:param tech: An ExplorationTechnique object that contains code to modify
this SimulationManager's behavior.
:type tech: ExplorationTechnique
:return: The technique that was added, for convenience
"""
if not isinstance(tech, ExplorationTechnique):
raise SimulationManagerError
# XXX: as promised
tech.project = self._project
tech.setup(self)
HookSet.install_hooks(self, **tech._get_hooks())
self._techniques.append(tech)
return tech | Use an exploration technique with this SimulationManager.
Techniques can be found in :mod:`angr.exploration_techniques`.
:param tech: An ExplorationTechnique object that contains code to modify
this SimulationManager's behavior.
:type tech: ExplorationTechnique
:return: The technique that was added, for convenience | Below is the the instruction that describes the task:
### Input:
Use an exploration technique with this SimulationManager.
Techniques can be found in :mod:`angr.exploration_techniques`.
:param tech: An ExplorationTechnique object that contains code to modify
this SimulationManager's behavior.
:type tech: ExplorationTechnique
:return: The technique that was added, for convenience
### Response:
def use_technique(self, tech):
"""
Use an exploration technique with this SimulationManager.
Techniques can be found in :mod:`angr.exploration_techniques`.
:param tech: An ExplorationTechnique object that contains code to modify
this SimulationManager's behavior.
:type tech: ExplorationTechnique
:return: The technique that was added, for convenience
"""
if not isinstance(tech, ExplorationTechnique):
raise SimulationManagerError
# XXX: as promised
tech.project = self._project
tech.setup(self)
HookSet.install_hooks(self, **tech._get_hooks())
self._techniques.append(tech)
return tech |
def project_home_breadcrumb_bs3(label):
"""A template tag to return the project's home URL and label
formatted as a Bootstrap 3 breadcrumb.
PROJECT_HOME_NAMESPACE must be defined in settings, for example:
PROJECT_HOME_NAMESPACE = 'project_name:index_view'
Usage Example:
{% load project_home_tags %}
<ol class="breadcrumb">
{% project_home_breadcrumb_bs3 %} {# <--- #}
<li><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class="active">Object Detail</li>
</ol>
This gets converted into:
<ol class="breadcrumb">
<li><a href="{% url 'project_name:index_view' %}">Home</a></li> {# <--- #}
<li><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class="active">Object Detail</li>
</ol>
By default, the link's text is 'Home'. A project-wide label can be
defined with PROJECT_HOME_LABEL in settings. Both the default and
the project-wide label can be overridden by passing a string to
the template tag.
For example:
{% project_home_breadcrumb_bs3 'Custom Label' %}
"""
url = home_url()
if url:
return format_html(
'<li><a href="{}">{}</a></li>', url, label)
else:
return format_html('<li>{}</li>', label) | A template tag to return the project's home URL and label
formatted as a Bootstrap 3 breadcrumb.
PROJECT_HOME_NAMESPACE must be defined in settings, for example:
PROJECT_HOME_NAMESPACE = 'project_name:index_view'
Usage Example:
{% load project_home_tags %}
<ol class="breadcrumb">
{% project_home_breadcrumb_bs3 %} {# <--- #}
<li><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class="active">Object Detail</li>
</ol>
This gets converted into:
<ol class="breadcrumb">
<li><a href="{% url 'project_name:index_view' %}">Home</a></li> {# <--- #}
<li><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class="active">Object Detail</li>
</ol>
By default, the link's text is 'Home'. A project-wide label can be
defined with PROJECT_HOME_LABEL in settings. Both the default and
the project-wide label can be overridden by passing a string to
the template tag.
For example:
{% project_home_breadcrumb_bs3 'Custom Label' %} | Below is the the instruction that describes the task:
### Input:
A template tag to return the project's home URL and label
formatted as a Bootstrap 3 breadcrumb.
PROJECT_HOME_NAMESPACE must be defined in settings, for example:
PROJECT_HOME_NAMESPACE = 'project_name:index_view'
Usage Example:
{% load project_home_tags %}
<ol class="breadcrumb">
{% project_home_breadcrumb_bs3 %} {# <--- #}
<li><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class="active">Object Detail</li>
</ol>
This gets converted into:
<ol class="breadcrumb">
<li><a href="{% url 'project_name:index_view' %}">Home</a></li> {# <--- #}
<li><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class="active">Object Detail</li>
</ol>
By default, the link's text is 'Home'. A project-wide label can be
defined with PROJECT_HOME_LABEL in settings. Both the default and
the project-wide label can be overridden by passing a string to
the template tag.
For example:
{% project_home_breadcrumb_bs3 'Custom Label' %}
### Response:
def project_home_breadcrumb_bs3(label):
"""A template tag to return the project's home URL and label
formatted as a Bootstrap 3 breadcrumb.
PROJECT_HOME_NAMESPACE must be defined in settings, for example:
PROJECT_HOME_NAMESPACE = 'project_name:index_view'
Usage Example:
{% load project_home_tags %}
<ol class="breadcrumb">
{% project_home_breadcrumb_bs3 %} {# <--- #}
<li><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class="active">Object Detail</li>
</ol>
This gets converted into:
<ol class="breadcrumb">
<li><a href="{% url 'project_name:index_view' %}">Home</a></li> {# <--- #}
<li><a href="{% url 'app:namespace' %}">List of Objects</a></li>
<li class="active">Object Detail</li>
</ol>
By default, the link's text is 'Home'. A project-wide label can be
defined with PROJECT_HOME_LABEL in settings. Both the default and
the project-wide label can be overridden by passing a string to
the template tag.
For example:
{% project_home_breadcrumb_bs3 'Custom Label' %}
"""
url = home_url()
if url:
return format_html(
'<li><a href="{}">{}</a></li>', url, label)
else:
return format_html('<li>{}</li>', label) |
def move(self, category_id, parent_id, after_id=None):
"""
Move category in tree
:param category_id: ID of category to move
:param parent_id: New parent of the category
:param after_id: Category ID after what position it will be moved
:return: Boolean
"""
return bool(self.call(
'catalog_category.move', [category_id, parent_id, after_id])
) | Move category in tree
:param category_id: ID of category to move
:param parent_id: New parent of the category
:param after_id: Category ID after what position it will be moved
:return: Boolean | Below is the the instruction that describes the task:
### Input:
Move category in tree
:param category_id: ID of category to move
:param parent_id: New parent of the category
:param after_id: Category ID after what position it will be moved
:return: Boolean
### Response:
def move(self, category_id, parent_id, after_id=None):
"""
Move category in tree
:param category_id: ID of category to move
:param parent_id: New parent of the category
:param after_id: Category ID after what position it will be moved
:return: Boolean
"""
return bool(self.call(
'catalog_category.move', [category_id, parent_id, after_id])
) |
def chown(path, user=None, group=None, recursive=False):
"""Change file owner and group.
>>> if chown('/tmp/one', user='root', group='wheel'):
... print('OK')
OK
"""
successful = True
uid = -1
gid = -1
if user is not None:
if isinstance(user, basestring_type):
user = _ops_user(name=user)
elif isinstance(user, numbers.Number):
user = _ops_user(id=user)
if isinstance(user, _ops_user):
if user:
uid = user.id
else:
log.error('chown: unable to get uid')
successful = False
else:
successful = False
if group is not None:
if isinstance(group, basestring_type):
group = _ops_group(name=group)
elif isinstance(group, numbers.Number):
group = _ops_group(id=group)
if isinstance(group, _ops_group):
if group:
gid = group.id
else:
log.error('chown: unable to get gid')
successful = False
else:
successful = False
if not (uid == -1 and gid == -1):
if recursive:
for p in find(path, no_peek=True):
successful = _chown(p, uid=uid, gid=gid) and successful
else:
successful = _chown(path, uid=uid, gid=gid)
else:
successful = False
return successful | Change file owner and group.
>>> if chown('/tmp/one', user='root', group='wheel'):
... print('OK')
OK | Below is the the instruction that describes the task:
### Input:
Change file owner and group.
>>> if chown('/tmp/one', user='root', group='wheel'):
... print('OK')
OK
### Response:
def chown(path, user=None, group=None, recursive=False):
"""Change file owner and group.
>>> if chown('/tmp/one', user='root', group='wheel'):
... print('OK')
OK
"""
successful = True
uid = -1
gid = -1
if user is not None:
if isinstance(user, basestring_type):
user = _ops_user(name=user)
elif isinstance(user, numbers.Number):
user = _ops_user(id=user)
if isinstance(user, _ops_user):
if user:
uid = user.id
else:
log.error('chown: unable to get uid')
successful = False
else:
successful = False
if group is not None:
if isinstance(group, basestring_type):
group = _ops_group(name=group)
elif isinstance(group, numbers.Number):
group = _ops_group(id=group)
if isinstance(group, _ops_group):
if group:
gid = group.id
else:
log.error('chown: unable to get gid')
successful = False
else:
successful = False
if not (uid == -1 and gid == -1):
if recursive:
for p in find(path, no_peek=True):
successful = _chown(p, uid=uid, gid=gid) and successful
else:
successful = _chown(path, uid=uid, gid=gid)
else:
successful = False
return successful |
def data(self, buffer_index):
"""Return the data vector for a given ring buffer"""
# Check for expired elements and discard if they exist
expired = self.time - self.max_time
exp = self.buffer_expire[buffer_index]
j = 0
while j < len(exp):
# Everything before this j must be expired
if exp[j] >= expired:
self.buffer_expire[buffer_index] = exp[j:].copy()
self.buffer[buffer_index] = self.buffer[buffer_index][j:].copy()
break
j += 1
return self.buffer[buffer_index] | Return the data vector for a given ring buffer | Below is the the instruction that describes the task:
### Input:
Return the data vector for a given ring buffer
### Response:
def data(self, buffer_index):
"""Return the data vector for a given ring buffer"""
# Check for expired elements and discard if they exist
expired = self.time - self.max_time
exp = self.buffer_expire[buffer_index]
j = 0
while j < len(exp):
# Everything before this j must be expired
if exp[j] >= expired:
self.buffer_expire[buffer_index] = exp[j:].copy()
self.buffer[buffer_index] = self.buffer[buffer_index][j:].copy()
break
j += 1
return self.buffer[buffer_index] |
def kill_speech_dispatcher(self):
'''kill speech dispatcher processs'''
if not 'HOME' in os.environ:
return
pidpath = os.path.join(os.environ['HOME'], '.speech-dispatcher',
'pid', 'speech-dispatcher.pid')
if os.path.exists(pidpath):
try:
import signal
pid = int(open(pidpath).read())
if pid > 1 and os.kill(pid, 0) is None:
print("Killing speech dispatcher pid %u" % pid)
os.kill(pid, signal.SIGINT)
time.sleep(1)
except Exception as e:
pass | kill speech dispatcher processs | Below is the the instruction that describes the task:
### Input:
kill speech dispatcher processs
### Response:
def kill_speech_dispatcher(self):
'''kill speech dispatcher processs'''
if not 'HOME' in os.environ:
return
pidpath = os.path.join(os.environ['HOME'], '.speech-dispatcher',
'pid', 'speech-dispatcher.pid')
if os.path.exists(pidpath):
try:
import signal
pid = int(open(pidpath).read())
if pid > 1 and os.kill(pid, 0) is None:
print("Killing speech dispatcher pid %u" % pid)
os.kill(pid, signal.SIGINT)
time.sleep(1)
except Exception as e:
pass |
def menu(items, heading):
'''Takes list of dictionaries and prints a menu.
items parameter should be in the form of a list, containing
dictionaries with the keys: {"key", "text", "function"}.
Typing the key for a menuitem, followed by return, will run
"function".
'''
heading = "\n"*5 + heading # A little vertical padding
while True:
keydict = {}
clear_screen()
print(heading)
for item in items:
menustring = " " + item["key"] + " " + item["text"]
keydict[item["key"]] = item["function"]
print(menustring)
key = input("\nType key and Return (q to quit): ").strip()
if key.lower() == "q":
return
else:
try:
ret = keydict[key]()
if ret: # If child returns non-false, exit menu.
return 1
except KeyError: # Handle garbage input.
continue | Takes list of dictionaries and prints a menu.
items parameter should be in the form of a list, containing
dictionaries with the keys: {"key", "text", "function"}.
Typing the key for a menuitem, followed by return, will run
"function". | Below is the the instruction that describes the task:
### Input:
Takes list of dictionaries and prints a menu.
items parameter should be in the form of a list, containing
dictionaries with the keys: {"key", "text", "function"}.
Typing the key for a menuitem, followed by return, will run
"function".
### Response:
def menu(items, heading):
'''Takes list of dictionaries and prints a menu.
items parameter should be in the form of a list, containing
dictionaries with the keys: {"key", "text", "function"}.
Typing the key for a menuitem, followed by return, will run
"function".
'''
heading = "\n"*5 + heading # A little vertical padding
while True:
keydict = {}
clear_screen()
print(heading)
for item in items:
menustring = " " + item["key"] + " " + item["text"]
keydict[item["key"]] = item["function"]
print(menustring)
key = input("\nType key and Return (q to quit): ").strip()
if key.lower() == "q":
return
else:
try:
ret = keydict[key]()
if ret: # If child returns non-false, exit menu.
return 1
except KeyError: # Handle garbage input.
continue |
def at(self, p):
"""
Returns the set of all intervals that contain p.
Completes in O(m + log n) time, where:
* n = size of the tree
* m = number of matches
:rtype: set of Interval
"""
root = self.top_node
if not root:
return set()
return root.search_point(p, set()) | Returns the set of all intervals that contain p.
Completes in O(m + log n) time, where:
* n = size of the tree
* m = number of matches
:rtype: set of Interval | Below is the the instruction that describes the task:
### Input:
Returns the set of all intervals that contain p.
Completes in O(m + log n) time, where:
* n = size of the tree
* m = number of matches
:rtype: set of Interval
### Response:
def at(self, p):
"""
Returns the set of all intervals that contain p.
Completes in O(m + log n) time, where:
* n = size of the tree
* m = number of matches
:rtype: set of Interval
"""
root = self.top_node
if not root:
return set()
return root.search_point(p, set()) |
def checkIsMember(self, CorpNum):
""" 회원가입여부 확인
args
CorpNum : 회원 사업자번호
return
회원가입여부 True/False
raise
PopbillException
"""
if CorpNum == None or CorpNum == '':
raise PopbillException(-99999999, "사업자번호가 입력되지 않았습니다.")
return self._httpget('/Join?CorpNum=' + CorpNum + '&LID=' + self.__linkID, None, None) | 회원가입여부 확인
args
CorpNum : 회원 사업자번호
return
회원가입여부 True/False
raise
PopbillException | Below is the the instruction that describes the task:
### Input:
회원가입여부 확인
args
CorpNum : 회원 사업자번호
return
회원가입여부 True/False
raise
PopbillException
### Response:
def checkIsMember(self, CorpNum):
""" 회원가입여부 확인
args
CorpNum : 회원 사업자번호
return
회원가입여부 True/False
raise
PopbillException
"""
if CorpNum == None or CorpNum == '':
raise PopbillException(-99999999, "사업자번호가 입력되지 않았습니다.")
return self._httpget('/Join?CorpNum=' + CorpNum + '&LID=' + self.__linkID, None, None) |
def GetBlockHash(self, height):
"""
Get the block hash by its block height
Args:
height(int): height of the block to retrieve hash from.
Returns:
bytes: a non-raw block hash (e.g. b'6dd83ed8a3fc02e322f91f30431bf3662a8c8e8ebe976c3565f0d21c70620991', but not b'\x6d\xd8...etc'
"""
if self._current_block_height < height:
return
if len(self._header_index) <= height:
return
return self._header_index[height] | Get the block hash by its block height
Args:
height(int): height of the block to retrieve hash from.
Returns:
bytes: a non-raw block hash (e.g. b'6dd83ed8a3fc02e322f91f30431bf3662a8c8e8ebe976c3565f0d21c70620991', but not b'\x6d\xd8...etc' | Below is the the instruction that describes the task:
### Input:
Get the block hash by its block height
Args:
height(int): height of the block to retrieve hash from.
Returns:
bytes: a non-raw block hash (e.g. b'6dd83ed8a3fc02e322f91f30431bf3662a8c8e8ebe976c3565f0d21c70620991', but not b'\x6d\xd8...etc'
### Response:
def GetBlockHash(self, height):
"""
Get the block hash by its block height
Args:
height(int): height of the block to retrieve hash from.
Returns:
bytes: a non-raw block hash (e.g. b'6dd83ed8a3fc02e322f91f30431bf3662a8c8e8ebe976c3565f0d21c70620991', but not b'\x6d\xd8...etc'
"""
if self._current_block_height < height:
return
if len(self._header_index) <= height:
return
return self._header_index[height] |
def resize(image, width, height, channels=None, decode=False,
resample='nearest'):
"""
Resizes the image or SArray of Images to a specific width, height, and
number of channels.
Parameters
----------
image : turicreate.Image | SArray
The image or SArray of images to be resized.
width : int
The width the image is resized to.
height : int
The height the image is resized to.
channels : int, optional
The number of channels the image is resized to. 1 channel
corresponds to grayscale, 3 channels corresponds to RGB, and 4
channels corresponds to RGBA images.
decode : bool, optional
Whether to store the resized image in decoded format. Decoded takes
more space, but makes the resize and future operations on the image faster.
resample : 'nearest' or 'bilinear'
Specify the resampling filter:
- ``'nearest'``: Nearest neigbhor, extremely fast
- ``'bilinear'``: Bilinear, fast and with less aliasing artifacts
Returns
-------
out : turicreate.Image
Returns a resized Image object.
Notes
-----
Grayscale Images -> Images with one channel, representing a scale from
white to black
RGB Images -> Images with 3 channels, with each pixel having Green, Red,
and Blue values.
RGBA Images -> An RGB image with an opacity channel.
Examples
--------
Resize a single image
>>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')
>>> resized_img = turicreate.image_analysis.resize(img,100,100,1)
Resize an SArray of images
>>> url ='https://static.turi.com/datasets/images/nested'
>>> image_sframe = turicreate.image_analysis.load_images(url, "auto", with_path=False,
... recursive=True)
>>> image_sarray = image_sframe["image"]
>>> resized_images = turicreate.image_analysis.resize(image_sarray, 100, 100, 1)
"""
if height < 0 or width < 0:
raise ValueError("Cannot resize to negative sizes")
if resample == 'nearest':
resample_method = 0
elif resample == 'bilinear':
resample_method = 1
else:
raise ValueError("Unknown resample option: '%s'" % resample)
from ...data_structures.sarray import SArray as _SArray
from ... import extensions as _extensions
if type(image) is _Image:
if channels is None:
channels = image.channels
if channels <= 0:
raise ValueError("cannot resize images to 0 or fewer channels")
return _extensions.resize_image(image, width, height, channels, decode, resample_method)
elif type(image) is _SArray:
if channels is None:
channels = 3
if channels <= 0:
raise ValueError("cannot resize images to 0 or fewer channels")
return image.apply(lambda x: _extensions.resize_image(x, width, height, channels, decode, resample_method))
else:
raise ValueError("Cannot call 'resize' on objects that are not either an Image or SArray of Images") | Resizes the image or SArray of Images to a specific width, height, and
number of channels.
Parameters
----------
image : turicreate.Image | SArray
The image or SArray of images to be resized.
width : int
The width the image is resized to.
height : int
The height the image is resized to.
channels : int, optional
The number of channels the image is resized to. 1 channel
corresponds to grayscale, 3 channels corresponds to RGB, and 4
channels corresponds to RGBA images.
decode : bool, optional
Whether to store the resized image in decoded format. Decoded takes
more space, but makes the resize and future operations on the image faster.
resample : 'nearest' or 'bilinear'
Specify the resampling filter:
- ``'nearest'``: Nearest neigbhor, extremely fast
- ``'bilinear'``: Bilinear, fast and with less aliasing artifacts
Returns
-------
out : turicreate.Image
Returns a resized Image object.
Notes
-----
Grayscale Images -> Images with one channel, representing a scale from
white to black
RGB Images -> Images with 3 channels, with each pixel having Green, Red,
and Blue values.
RGBA Images -> An RGB image with an opacity channel.
Examples
--------
Resize a single image
>>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')
>>> resized_img = turicreate.image_analysis.resize(img,100,100,1)
Resize an SArray of images
>>> url ='https://static.turi.com/datasets/images/nested'
>>> image_sframe = turicreate.image_analysis.load_images(url, "auto", with_path=False,
... recursive=True)
>>> image_sarray = image_sframe["image"]
>>> resized_images = turicreate.image_analysis.resize(image_sarray, 100, 100, 1) | Below is the the instruction that describes the task:
### Input:
Resizes the image or SArray of Images to a specific width, height, and
number of channels.
Parameters
----------
image : turicreate.Image | SArray
The image or SArray of images to be resized.
width : int
The width the image is resized to.
height : int
The height the image is resized to.
channels : int, optional
The number of channels the image is resized to. 1 channel
corresponds to grayscale, 3 channels corresponds to RGB, and 4
channels corresponds to RGBA images.
decode : bool, optional
Whether to store the resized image in decoded format. Decoded takes
more space, but makes the resize and future operations on the image faster.
resample : 'nearest' or 'bilinear'
Specify the resampling filter:
- ``'nearest'``: Nearest neigbhor, extremely fast
- ``'bilinear'``: Bilinear, fast and with less aliasing artifacts
Returns
-------
out : turicreate.Image
Returns a resized Image object.
Notes
-----
Grayscale Images -> Images with one channel, representing a scale from
white to black
RGB Images -> Images with 3 channels, with each pixel having Green, Red,
and Blue values.
RGBA Images -> An RGB image with an opacity channel.
Examples
--------
Resize a single image
>>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')
>>> resized_img = turicreate.image_analysis.resize(img,100,100,1)
Resize an SArray of images
>>> url ='https://static.turi.com/datasets/images/nested'
>>> image_sframe = turicreate.image_analysis.load_images(url, "auto", with_path=False,
... recursive=True)
>>> image_sarray = image_sframe["image"]
>>> resized_images = turicreate.image_analysis.resize(image_sarray, 100, 100, 1)
### Response:
def resize(image, width, height, channels=None, decode=False,
resample='nearest'):
"""
Resizes the image or SArray of Images to a specific width, height, and
number of channels.
Parameters
----------
image : turicreate.Image | SArray
The image or SArray of images to be resized.
width : int
The width the image is resized to.
height : int
The height the image is resized to.
channels : int, optional
The number of channels the image is resized to. 1 channel
corresponds to grayscale, 3 channels corresponds to RGB, and 4
channels corresponds to RGBA images.
decode : bool, optional
Whether to store the resized image in decoded format. Decoded takes
more space, but makes the resize and future operations on the image faster.
resample : 'nearest' or 'bilinear'
Specify the resampling filter:
- ``'nearest'``: Nearest neigbhor, extremely fast
- ``'bilinear'``: Bilinear, fast and with less aliasing artifacts
Returns
-------
out : turicreate.Image
Returns a resized Image object.
Notes
-----
Grayscale Images -> Images with one channel, representing a scale from
white to black
RGB Images -> Images with 3 channels, with each pixel having Green, Red,
and Blue values.
RGBA Images -> An RGB image with an opacity channel.
Examples
--------
Resize a single image
>>> img = turicreate.Image('https://static.turi.com/datasets/images/sample.jpg')
>>> resized_img = turicreate.image_analysis.resize(img,100,100,1)
Resize an SArray of images
>>> url ='https://static.turi.com/datasets/images/nested'
>>> image_sframe = turicreate.image_analysis.load_images(url, "auto", with_path=False,
... recursive=True)
>>> image_sarray = image_sframe["image"]
>>> resized_images = turicreate.image_analysis.resize(image_sarray, 100, 100, 1)
"""
if height < 0 or width < 0:
raise ValueError("Cannot resize to negative sizes")
if resample == 'nearest':
resample_method = 0
elif resample == 'bilinear':
resample_method = 1
else:
raise ValueError("Unknown resample option: '%s'" % resample)
from ...data_structures.sarray import SArray as _SArray
from ... import extensions as _extensions
if type(image) is _Image:
if channels is None:
channels = image.channels
if channels <= 0:
raise ValueError("cannot resize images to 0 or fewer channels")
return _extensions.resize_image(image, width, height, channels, decode, resample_method)
elif type(image) is _SArray:
if channels is None:
channels = 3
if channels <= 0:
raise ValueError("cannot resize images to 0 or fewer channels")
return image.apply(lambda x: _extensions.resize_image(x, width, height, channels, decode, resample_method))
else:
raise ValueError("Cannot call 'resize' on objects that are not either an Image or SArray of Images") |
def structured_mesh(shape = (2,2,2), dim = (1.,1.,1.)):
"""
Returns a structured mesh.
:arg shape: 2 or 3 integers (eg: shape = (10, 10, 10)).
:type shape: tuple
:arg dim: 2 or 3 floats (eg: dim = (4., 2., 1.))
:type dim: tuple
.. note::
This function does not use GMSH for mesh generation.
>>> import argiope as ag
>>> mesh = ag.mesh.structured_mesh(shape =(10,10,10), dim=(1.,1.,1.)))
"""
# PREPROCESSING
shape = np.array(shape)
dim = np.array(dim)
Ne = shape.prod()
Nn = (shape + 1).prod()
# LABELS
nindex = np.arange(Nn) + 1
eindex = np.arange(Ne) + 1
# COORDINATES
coords = [ np.linspace(0., dim[i], shape[i] + 1) for i in range(len(shape))]
coords = np.array(np.meshgrid(*coords))
coords = np.array([c.swapaxes(0,1).flatten("F") for c in coords]).T
if len(shape) == 2:
c = coords
coords = np.zeros((Nn, 3))
coords[:, :2] = c
# CONNECTIVITY
conn = _make_conn(shape)
# MESH INSTANCE
mesh = Mesh(nlabels = nindex,
coords = coords,
elabels = eindex,
conn = conn,)
if len(shape) == 2: mesh.elements[("type", "argiope")] = "quad4"
if len(shape) == 3: mesh.elements[("type", "argiope")] = "hexa8"
return mesh | Returns a structured mesh.
:arg shape: 2 or 3 integers (eg: shape = (10, 10, 10)).
:type shape: tuple
:arg dim: 2 or 3 floats (eg: dim = (4., 2., 1.))
:type dim: tuple
.. note::
This function does not use GMSH for mesh generation.
>>> import argiope as ag
>>> mesh = ag.mesh.structured_mesh(shape =(10,10,10), dim=(1.,1.,1.))) | Below is the the instruction that describes the task:
### Input:
Returns a structured mesh.
:arg shape: 2 or 3 integers (eg: shape = (10, 10, 10)).
:type shape: tuple
:arg dim: 2 or 3 floats (eg: dim = (4., 2., 1.))
:type dim: tuple
.. note::
This function does not use GMSH for mesh generation.
>>> import argiope as ag
>>> mesh = ag.mesh.structured_mesh(shape =(10,10,10), dim=(1.,1.,1.)))
### Response:
def structured_mesh(shape = (2,2,2), dim = (1.,1.,1.)):
"""
Returns a structured mesh.
:arg shape: 2 or 3 integers (eg: shape = (10, 10, 10)).
:type shape: tuple
:arg dim: 2 or 3 floats (eg: dim = (4., 2., 1.))
:type dim: tuple
.. note::
This function does not use GMSH for mesh generation.
>>> import argiope as ag
>>> mesh = ag.mesh.structured_mesh(shape =(10,10,10), dim=(1.,1.,1.)))
"""
# PREPROCESSING
shape = np.array(shape)
dim = np.array(dim)
Ne = shape.prod()
Nn = (shape + 1).prod()
# LABELS
nindex = np.arange(Nn) + 1
eindex = np.arange(Ne) + 1
# COORDINATES
coords = [ np.linspace(0., dim[i], shape[i] + 1) for i in range(len(shape))]
coords = np.array(np.meshgrid(*coords))
coords = np.array([c.swapaxes(0,1).flatten("F") for c in coords]).T
if len(shape) == 2:
c = coords
coords = np.zeros((Nn, 3))
coords[:, :2] = c
# CONNECTIVITY
conn = _make_conn(shape)
# MESH INSTANCE
mesh = Mesh(nlabels = nindex,
coords = coords,
elabels = eindex,
conn = conn,)
if len(shape) == 2: mesh.elements[("type", "argiope")] = "quad4"
if len(shape) == 3: mesh.elements[("type", "argiope")] = "hexa8"
return mesh |
def crpix(self):
"""
The location of the reference coordinate in the pixel frame.
First simple respond with the header values, if they don't exist try usnig the DETSEC values
@rtype: float, float
"""
try:
return self.wcs.crpix1, self.wcs.crpix2
except Exception as ex:
logging.debug("Couldn't get CRPIX from WCS: {}".format(ex))
logging.debug("Switching to use DATASEC for CRPIX value computation.")
try:
(x1, x2), (y1, y2) = util.get_pixel_bounds_from_datasec_keyword(self['DETSEC'])
dx = float(self['NAXIS1'])
dy = float(self['NAXIS2'])
except KeyError as ke:
raise KeyError("Header missing keyword: {}, required for CRPIX[12] computation".format(ke.args[0]))
crpix1 = self._DET_X_CEN - (x1 + x2) / 2. + dx / 2.
crpix2 = self._DET_Y_CEN - (y1 + y2) / 2. + dy / 2.
return crpix1, crpix2 | The location of the reference coordinate in the pixel frame.
First simple respond with the header values, if they don't exist try usnig the DETSEC values
@rtype: float, float | Below is the the instruction that describes the task:
### Input:
The location of the reference coordinate in the pixel frame.
First simple respond with the header values, if they don't exist try usnig the DETSEC values
@rtype: float, float
### Response:
def crpix(self):
"""
The location of the reference coordinate in the pixel frame.
First simple respond with the header values, if they don't exist try usnig the DETSEC values
@rtype: float, float
"""
try:
return self.wcs.crpix1, self.wcs.crpix2
except Exception as ex:
logging.debug("Couldn't get CRPIX from WCS: {}".format(ex))
logging.debug("Switching to use DATASEC for CRPIX value computation.")
try:
(x1, x2), (y1, y2) = util.get_pixel_bounds_from_datasec_keyword(self['DETSEC'])
dx = float(self['NAXIS1'])
dy = float(self['NAXIS2'])
except KeyError as ke:
raise KeyError("Header missing keyword: {}, required for CRPIX[12] computation".format(ke.args[0]))
crpix1 = self._DET_X_CEN - (x1 + x2) / 2. + dx / 2.
crpix2 = self._DET_Y_CEN - (y1 + y2) / 2. + dy / 2.
return crpix1, crpix2 |
def get_turbine_data_from_file(turbine_type, file_):
r"""
Fetches power (coefficient) curve data from a csv file.
See `example_power_curves.csv' and `example_power_coefficient_curves.csv`
in example/data for the required format of a csv file. The self-provided
csv file may contain more columns than the example files. Only columns
containing wind speed and the corresponding power or power coefficient as
well as the column 'nominal_power' are taken into account.
Parameters
----------
turbine_type : string
Specifies the turbine type data is fetched for.
file_ : string
Specifies the source of the turbine data.
See the example below for how to use the example data.
Returns
-------
Tuple (pandas.DataFrame, float)
Power curve or power coefficient curve (pandas.DataFrame) and nominal
power (float). Power (coefficient) curve DataFrame contains power
coefficient curve values (dimensionless) or power curve values in W
as column names with the corresponding wind speeds in m/s.
Examples
--------
>>> from windpowerlib import wind_turbine
>>> import os
>>> source = os.path.join(os.path.dirname(__file__), '../example/data',
... 'example_power_curves.csv')
>>> example_turbine = {
... 'hub_height': 100,
... 'rotor_diameter': 70,
... 'name': 'DUMMY 3',
... 'fetch_curve': 'power_curve',
... 'data_source': source}
>>> e_t_1 = wind_turbine.WindTurbine(**example_turbine)
>>> print(e_t_1.power_curve['value'][7])
18000.0
>>> print(e_t_1.nominal_power)
150000
"""
def isfloat(x):
try:
float(x)
return x
except ValueError:
return False
try:
df = pd.read_csv(file_, index_col=0)
except FileNotFoundError:
raise FileNotFoundError("The file '{}' was not found.".format(file_))
wpp_df = df[df.turbine_id == turbine_type]
# if turbine not in data file
if wpp_df.shape[0] == 0:
pd.set_option('display.max_rows', len(df))
logging.info('Possible types: \n{0}'.format(df.turbine_id))
pd.reset_option('display.max_rows')
sys.exit('Cannot find the wind converter type: {0}'.format(
turbine_type))
# if turbine in data file select power (coefficient) curve columns and
# drop nans
cols = [_ for _ in wpp_df.columns if isfloat(_)]
curve_data = wpp_df[cols].dropna(axis=1)
df = curve_data.transpose().reset_index()
df.columns = ['wind_speed', 'value']
df['wind_speed'] = df['wind_speed'].apply(lambda x: float(x))
nominal_power = wpp_df['p_nom'].iloc[0]
return df, nominal_power | r"""
Fetches power (coefficient) curve data from a csv file.
See `example_power_curves.csv' and `example_power_coefficient_curves.csv`
in example/data for the required format of a csv file. The self-provided
csv file may contain more columns than the example files. Only columns
containing wind speed and the corresponding power or power coefficient as
well as the column 'nominal_power' are taken into account.
Parameters
----------
turbine_type : string
Specifies the turbine type data is fetched for.
file_ : string
Specifies the source of the turbine data.
See the example below for how to use the example data.
Returns
-------
Tuple (pandas.DataFrame, float)
Power curve or power coefficient curve (pandas.DataFrame) and nominal
power (float). Power (coefficient) curve DataFrame contains power
coefficient curve values (dimensionless) or power curve values in W
as column names with the corresponding wind speeds in m/s.
Examples
--------
>>> from windpowerlib import wind_turbine
>>> import os
>>> source = os.path.join(os.path.dirname(__file__), '../example/data',
... 'example_power_curves.csv')
>>> example_turbine = {
... 'hub_height': 100,
... 'rotor_diameter': 70,
... 'name': 'DUMMY 3',
... 'fetch_curve': 'power_curve',
... 'data_source': source}
>>> e_t_1 = wind_turbine.WindTurbine(**example_turbine)
>>> print(e_t_1.power_curve['value'][7])
18000.0
>>> print(e_t_1.nominal_power)
150000 | Below is the the instruction that describes the task:
### Input:
r"""
Fetches power (coefficient) curve data from a csv file.
See `example_power_curves.csv' and `example_power_coefficient_curves.csv`
in example/data for the required format of a csv file. The self-provided
csv file may contain more columns than the example files. Only columns
containing wind speed and the corresponding power or power coefficient as
well as the column 'nominal_power' are taken into account.
Parameters
----------
turbine_type : string
Specifies the turbine type data is fetched for.
file_ : string
Specifies the source of the turbine data.
See the example below for how to use the example data.
Returns
-------
Tuple (pandas.DataFrame, float)
Power curve or power coefficient curve (pandas.DataFrame) and nominal
power (float). Power (coefficient) curve DataFrame contains power
coefficient curve values (dimensionless) or power curve values in W
as column names with the corresponding wind speeds in m/s.
Examples
--------
>>> from windpowerlib import wind_turbine
>>> import os
>>> source = os.path.join(os.path.dirname(__file__), '../example/data',
... 'example_power_curves.csv')
>>> example_turbine = {
... 'hub_height': 100,
... 'rotor_diameter': 70,
... 'name': 'DUMMY 3',
... 'fetch_curve': 'power_curve',
... 'data_source': source}
>>> e_t_1 = wind_turbine.WindTurbine(**example_turbine)
>>> print(e_t_1.power_curve['value'][7])
18000.0
>>> print(e_t_1.nominal_power)
150000
### Response:
def get_turbine_data_from_file(turbine_type, file_):
r"""
Fetches power (coefficient) curve data from a csv file.
See `example_power_curves.csv' and `example_power_coefficient_curves.csv`
in example/data for the required format of a csv file. The self-provided
csv file may contain more columns than the example files. Only columns
containing wind speed and the corresponding power or power coefficient as
well as the column 'nominal_power' are taken into account.
Parameters
----------
turbine_type : string
Specifies the turbine type data is fetched for.
file_ : string
Specifies the source of the turbine data.
See the example below for how to use the example data.
Returns
-------
Tuple (pandas.DataFrame, float)
Power curve or power coefficient curve (pandas.DataFrame) and nominal
power (float). Power (coefficient) curve DataFrame contains power
coefficient curve values (dimensionless) or power curve values in W
as column names with the corresponding wind speeds in m/s.
Examples
--------
>>> from windpowerlib import wind_turbine
>>> import os
>>> source = os.path.join(os.path.dirname(__file__), '../example/data',
... 'example_power_curves.csv')
>>> example_turbine = {
... 'hub_height': 100,
... 'rotor_diameter': 70,
... 'name': 'DUMMY 3',
... 'fetch_curve': 'power_curve',
... 'data_source': source}
>>> e_t_1 = wind_turbine.WindTurbine(**example_turbine)
>>> print(e_t_1.power_curve['value'][7])
18000.0
>>> print(e_t_1.nominal_power)
150000
"""
def isfloat(x):
try:
float(x)
return x
except ValueError:
return False
try:
df = pd.read_csv(file_, index_col=0)
except FileNotFoundError:
raise FileNotFoundError("The file '{}' was not found.".format(file_))
wpp_df = df[df.turbine_id == turbine_type]
# if turbine not in data file
if wpp_df.shape[0] == 0:
pd.set_option('display.max_rows', len(df))
logging.info('Possible types: \n{0}'.format(df.turbine_id))
pd.reset_option('display.max_rows')
sys.exit('Cannot find the wind converter type: {0}'.format(
turbine_type))
# if turbine in data file select power (coefficient) curve columns and
# drop nans
cols = [_ for _ in wpp_df.columns if isfloat(_)]
curve_data = wpp_df[cols].dropna(axis=1)
df = curve_data.transpose().reset_index()
df.columns = ['wind_speed', 'value']
df['wind_speed'] = df['wind_speed'].apply(lambda x: float(x))
nominal_power = wpp_df['p_nom'].iloc[0]
return df, nominal_power |
def _evaluate(self,R,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
Phi(R,phi,t)
HISTORY:
2011-10-19 - Started - Bovy (IAS)
"""
#Calculate relevant time
if not self._tform is None:
if t < self._tform:
smooth= 0.
elif t < self._tsteady:
deltat= t-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else: #fully on
smooth= 1.
else:
smooth= 1.
return smooth*self._twophio/2.*R**self._p\
*m.cos(2.*(phi-self._phib)) | NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
Phi(R,phi,t)
HISTORY:
2011-10-19 - Started - Bovy (IAS) | Below is the the instruction that describes the task:
### Input:
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
Phi(R,phi,t)
HISTORY:
2011-10-19 - Started - Bovy (IAS)
### Response:
def _evaluate(self,R,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
Phi(R,phi,t)
HISTORY:
2011-10-19 - Started - Bovy (IAS)
"""
#Calculate relevant time
if not self._tform is None:
if t < self._tform:
smooth= 0.
elif t < self._tsteady:
deltat= t-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else: #fully on
smooth= 1.
else:
smooth= 1.
return smooth*self._twophio/2.*R**self._p\
*m.cos(2.*(phi-self._phib)) |
def get_head(name, line, releases):
"""
Checks if `line` is a head
:param name: str, package name
:param line: str, line
:param releases: list, releases
:return: str, version if this is a valid head, False otherwise
"""
if not line:
return False
# if this line begins with an invalid starting character, return early.
# invalid characters are those used by various markup languages to introduce a new
# new list item
for char in INVALID_LINE_START:
# markdown uses ** for bold text, we also want to make sure to not exclude lines
# that contain a release
if line.startswith(char) and not line.startswith("**") and not "release" in line.lower():
return False
# if this line ends with a "." this isn't a valid head, return early.
for char in INVALID_LINE_ENDS:
if line.endswith(char):
return False
# if the line contains "python", it is not a valid head. It's more likely that the mantainers
# are talking about a Python release in general.
# Note the leading ' ' to not exclude lines like python-foolibrary
if 'python ' in line.lower():
return False
# Our goal is to find a somewhat parseable line. For this to work, we need to remove all
# parts that are not needed so that:
# release (12/12/2016) v2.0.3
# becomes something like
# 12 12 2016 v2.0.3
# remove all needless clutter from the line, but preserve characters that are used as
# seperators like "/" and "\".
line = line.replace("/", " ").replace("\\", " ")
uncluttered = re.sub("[^0123456789. a-zA-Z]", "", line).strip().lower()
# we are looking for a valid head here. If the head contains "release" or "version" we are
# pretty close but don't want them included when we try to parse the version. Remove them.
for intro in COMMON_RELEASE_INTRODUCTION:
if uncluttered.startswith(intro):
uncluttered = uncluttered.replace(intro, "")
# some projects use the project name as a prefix, remove it
uncluttered_name = re.sub("[^0123456789. a-zA-Z]", "", name).strip().lower()
uncluttered = uncluttered.replace(uncluttered_name, "").strip()
# now that all the clutter is removed, the line should be relatively short. If this is a valid
# head the only thing left should be the version and possibly some datestamp. We are going
# to count the length and assume a length of 8 for the version part, 8 for the datestamp and
# 2 as a safety. Leaving us with a max line length of 18
#if len(uncluttered) > 40:
# return False
# split the line in parts and sort these parts by "." count in reversed order. This turns a
# line like "12 12 2016 v2.0.3 into ['v2.0.3', "12", "12", "2016"]
parts = uncluttered.split(" ")
# if a line contains more than 6 parts, it's unlikely that this is a valid head
if len(parts) >= 8:
# nevertheless: if there's a '.' in one of the first three items, we might be able to
# find a valid head here.
if not ("." in parts[0] or "." in parts[1] or "." in parts[2]):
return False
if len(parts) > 1:
parts = parts[::len(parts)-1]
parts.sort(key=lambda s: "." in s, reverse=True)
# loop over all our parts an find a parseable version
for part in parts:
# remove the "v" prefix as it is not parseable
if part.startswith("v"):
part = part[1:]
# if there is no "." in this part, continue with the next one
if "." not in part:
continue
# looking good so far, return the version if it is parseable
try:
Version(part)
return part
except InvalidVersion as e:
pass
return False | Checks if `line` is a head
:param name: str, package name
:param line: str, line
:param releases: list, releases
:return: str, version if this is a valid head, False otherwise | Below is the the instruction that describes the task:
### Input:
Checks if `line` is a head
:param name: str, package name
:param line: str, line
:param releases: list, releases
:return: str, version if this is a valid head, False otherwise
### Response:
def get_head(name, line, releases):
"""
Checks if `line` is a head
:param name: str, package name
:param line: str, line
:param releases: list, releases
:return: str, version if this is a valid head, False otherwise
"""
if not line:
return False
# if this line begins with an invalid starting character, return early.
# invalid characters are those used by various markup languages to introduce a new
# new list item
for char in INVALID_LINE_START:
# markdown uses ** for bold text, we also want to make sure to not exclude lines
# that contain a release
if line.startswith(char) and not line.startswith("**") and not "release" in line.lower():
return False
# if this line ends with a "." this isn't a valid head, return early.
for char in INVALID_LINE_ENDS:
if line.endswith(char):
return False
# if the line contains "python", it is not a valid head. It's more likely that the mantainers
# are talking about a Python release in general.
# Note the leading ' ' to not exclude lines like python-foolibrary
if 'python ' in line.lower():
return False
# Our goal is to find a somewhat parseable line. For this to work, we need to remove all
# parts that are not needed so that:
# release (12/12/2016) v2.0.3
# becomes something like
# 12 12 2016 v2.0.3
# remove all needless clutter from the line, but preserve characters that are used as
# seperators like "/" and "\".
line = line.replace("/", " ").replace("\\", " ")
uncluttered = re.sub("[^0123456789. a-zA-Z]", "", line).strip().lower()
# we are looking for a valid head here. If the head contains "release" or "version" we are
# pretty close but don't want them included when we try to parse the version. Remove them.
for intro in COMMON_RELEASE_INTRODUCTION:
if uncluttered.startswith(intro):
uncluttered = uncluttered.replace(intro, "")
# some projects use the project name as a prefix, remove it
uncluttered_name = re.sub("[^0123456789. a-zA-Z]", "", name).strip().lower()
uncluttered = uncluttered.replace(uncluttered_name, "").strip()
# now that all the clutter is removed, the line should be relatively short. If this is a valid
# head the only thing left should be the version and possibly some datestamp. We are going
# to count the length and assume a length of 8 for the version part, 8 for the datestamp and
# 2 as a safety. Leaving us with a max line length of 18
#if len(uncluttered) > 40:
# return False
# split the line in parts and sort these parts by "." count in reversed order. This turns a
# line like "12 12 2016 v2.0.3 into ['v2.0.3', "12", "12", "2016"]
parts = uncluttered.split(" ")
# if a line contains more than 6 parts, it's unlikely that this is a valid head
if len(parts) >= 8:
# nevertheless: if there's a '.' in one of the first three items, we might be able to
# find a valid head here.
if not ("." in parts[0] or "." in parts[1] or "." in parts[2]):
return False
if len(parts) > 1:
parts = parts[::len(parts)-1]
parts.sort(key=lambda s: "." in s, reverse=True)
# loop over all our parts an find a parseable version
for part in parts:
# remove the "v" prefix as it is not parseable
if part.startswith("v"):
part = part[1:]
# if there is no "." in this part, continue with the next one
if "." not in part:
continue
# looking good so far, return the version if it is parseable
try:
Version(part)
return part
except InvalidVersion as e:
pass
return False |
def leaveEvent(self, event):
""" Reimplemented to start the hide timer.
"""
super(CallTipWidget, self).leaveEvent(event)
self._leave_event_hide() | Reimplemented to start the hide timer. | Below is the the instruction that describes the task:
### Input:
Reimplemented to start the hide timer.
### Response:
def leaveEvent(self, event):
""" Reimplemented to start the hide timer.
"""
super(CallTipWidget, self).leaveEvent(event)
self._leave_event_hide() |
def authorize(self, me, state=None, next_url=None, scope='read'):
"""Authorize a user via Micropub.
Args:
me (string): the authing user's URL. if it does not begin with
https?://, http:// will be prepended.
state (string, optional): passed through the whole auth process,
useful if you want to maintain some state, e.g. the starting page
to return to when auth is complete.
next_url (string, optional): deprecated and replaced by the more
general "state". still here for backward compatibility.
scope (string, optional): a space-separated string of micropub
scopes. 'read' by default.
Returns:
a redirect to the user's specified authorization
https://indieauth.com/auth if none is provided.
"""
redirect_url = flask.url_for(
self.flask_endpoint_for_function(self._authorized_handler),
_external=True)
return self._start_indieauth(
me, redirect_url, state or next_url, scope) | Authorize a user via Micropub.
Args:
me (string): the authing user's URL. if it does not begin with
https?://, http:// will be prepended.
state (string, optional): passed through the whole auth process,
useful if you want to maintain some state, e.g. the starting page
to return to when auth is complete.
next_url (string, optional): deprecated and replaced by the more
general "state". still here for backward compatibility.
scope (string, optional): a space-separated string of micropub
scopes. 'read' by default.
Returns:
a redirect to the user's specified authorization
https://indieauth.com/auth if none is provided. | Below is the the instruction that describes the task:
### Input:
Authorize a user via Micropub.
Args:
me (string): the authing user's URL. if it does not begin with
https?://, http:// will be prepended.
state (string, optional): passed through the whole auth process,
useful if you want to maintain some state, e.g. the starting page
to return to when auth is complete.
next_url (string, optional): deprecated and replaced by the more
general "state". still here for backward compatibility.
scope (string, optional): a space-separated string of micropub
scopes. 'read' by default.
Returns:
a redirect to the user's specified authorization
https://indieauth.com/auth if none is provided.
### Response:
def authorize(self, me, state=None, next_url=None, scope='read'):
"""Authorize a user via Micropub.
Args:
me (string): the authing user's URL. if it does not begin with
https?://, http:// will be prepended.
state (string, optional): passed through the whole auth process,
useful if you want to maintain some state, e.g. the starting page
to return to when auth is complete.
next_url (string, optional): deprecated and replaced by the more
general "state". still here for backward compatibility.
scope (string, optional): a space-separated string of micropub
scopes. 'read' by default.
Returns:
a redirect to the user's specified authorization
https://indieauth.com/auth if none is provided.
"""
redirect_url = flask.url_for(
self.flask_endpoint_for_function(self._authorized_handler),
_external=True)
return self._start_indieauth(
me, redirect_url, state or next_url, scope) |
def map_concepts_to_indicators(
self, n: int = 1, min_temporal_res: Optional[str] = None
):
""" Map each concept node in the AnalysisGraph instance to one or more
tangible quantities, known as 'indicators'.
Args:
n: Number of matches to keep
min_temporal_res: Minimum temporal resolution that the indicators
must have data for.
"""
for node in self.nodes(data=True):
query_parts = [
"select Indicator from concept_to_indicator_mapping",
f"where `Concept` like '{node[0]}'",
]
# TODO May need to delve into SQL/database stuff a bit more deeply
# for this. Foreign keys perhaps?
query = " ".join(query_parts)
results = engine.execute(query)
if min_temporal_res is not None:
if min_temporal_res not in ["month"]:
raise ValueError("min_temporal_res must be 'month'")
vars_with_required_temporal_resolution = [
r[0]
for r in engine.execute(
"select distinct `Variable` from indicator where "
f"`{min_temporal_res.capitalize()}` is not null"
)
]
results = [
r
for r in results
if r[0] in vars_with_required_temporal_resolution
]
node[1]["indicators"] = {
x: Indicator(x, "MITRE12")
for x in [r[0] for r in take(n, results)]
} | Map each concept node in the AnalysisGraph instance to one or more
tangible quantities, known as 'indicators'.
Args:
n: Number of matches to keep
min_temporal_res: Minimum temporal resolution that the indicators
must have data for. | Below is the the instruction that describes the task:
### Input:
Map each concept node in the AnalysisGraph instance to one or more
tangible quantities, known as 'indicators'.
Args:
n: Number of matches to keep
min_temporal_res: Minimum temporal resolution that the indicators
must have data for.
### Response:
def map_concepts_to_indicators(
self, n: int = 1, min_temporal_res: Optional[str] = None
):
""" Map each concept node in the AnalysisGraph instance to one or more
tangible quantities, known as 'indicators'.
Args:
n: Number of matches to keep
min_temporal_res: Minimum temporal resolution that the indicators
must have data for.
"""
for node in self.nodes(data=True):
query_parts = [
"select Indicator from concept_to_indicator_mapping",
f"where `Concept` like '{node[0]}'",
]
# TODO May need to delve into SQL/database stuff a bit more deeply
# for this. Foreign keys perhaps?
query = " ".join(query_parts)
results = engine.execute(query)
if min_temporal_res is not None:
if min_temporal_res not in ["month"]:
raise ValueError("min_temporal_res must be 'month'")
vars_with_required_temporal_resolution = [
r[0]
for r in engine.execute(
"select distinct `Variable` from indicator where "
f"`{min_temporal_res.capitalize()}` is not null"
)
]
results = [
r
for r in results
if r[0] in vars_with_required_temporal_resolution
]
node[1]["indicators"] = {
x: Indicator(x, "MITRE12")
for x in [r[0] for r in take(n, results)]
} |
def plot_entities(self, show=False, annotations=True, color=None):
"""
Plot the entities of the path, with no notion of topology
"""
import matplotlib.pyplot as plt
plt.axes().set_aspect('equal', 'datalim')
eformat = {'Line0': {'color': 'g', 'linewidth': 1},
'Line1': {'color': 'y', 'linewidth': 1},
'Arc0': {'color': 'r', 'linewidth': 1},
'Arc1': {'color': 'b', 'linewidth': 1},
'Bezier0': {'color': 'k', 'linewidth': 1},
'Bezier1': {'color': 'k', 'linewidth': 1},
'BSpline0': {'color': 'm', 'linewidth': 1},
'BSpline1': {'color': 'm', 'linewidth': 1}}
for entity in self.entities:
if annotations and hasattr(entity, 'plot'):
entity.plot(self.vertices)
continue
discrete = entity.discrete(self.vertices)
e_key = entity.__class__.__name__ + str(int(entity.closed))
fmt = eformat[e_key]
if color is not None:
# passed color will override other optons
fmt['color'] = color
elif hasattr(entity, 'color'):
# if entity has specified color use it
fmt['color'] = entity.color
plt.plot(*discrete.T, **fmt)
if show:
plt.show() | Plot the entities of the path, with no notion of topology | Below is the the instruction that describes the task:
### Input:
Plot the entities of the path, with no notion of topology
### Response:
def plot_entities(self, show=False, annotations=True, color=None):
"""
Plot the entities of the path, with no notion of topology
"""
import matplotlib.pyplot as plt
plt.axes().set_aspect('equal', 'datalim')
eformat = {'Line0': {'color': 'g', 'linewidth': 1},
'Line1': {'color': 'y', 'linewidth': 1},
'Arc0': {'color': 'r', 'linewidth': 1},
'Arc1': {'color': 'b', 'linewidth': 1},
'Bezier0': {'color': 'k', 'linewidth': 1},
'Bezier1': {'color': 'k', 'linewidth': 1},
'BSpline0': {'color': 'm', 'linewidth': 1},
'BSpline1': {'color': 'm', 'linewidth': 1}}
for entity in self.entities:
if annotations and hasattr(entity, 'plot'):
entity.plot(self.vertices)
continue
discrete = entity.discrete(self.vertices)
e_key = entity.__class__.__name__ + str(int(entity.closed))
fmt = eformat[e_key]
if color is not None:
# passed color will override other optons
fmt['color'] = color
elif hasattr(entity, 'color'):
# if entity has specified color use it
fmt['color'] = entity.color
plt.plot(*discrete.T, **fmt)
if show:
plt.show() |
def getUniqueFilename(dir=None, base=None):
"""
DESCRP: Generate a filename in the directory <dir> which is
unique (i.e. not in use at the moment)
PARAMS: dir -- the directory to look in. If None, use CWD
base -- use this as the base name for the filename
RETURN: string -- the filename generated
"""
while True:
fn = str(random.randint(0, 100000)) + ".tmp"
if not os.path.exists(fn):
break
return fn | DESCRP: Generate a filename in the directory <dir> which is
unique (i.e. not in use at the moment)
PARAMS: dir -- the directory to look in. If None, use CWD
base -- use this as the base name for the filename
RETURN: string -- the filename generated | Below is the the instruction that describes the task:
### Input:
DESCRP: Generate a filename in the directory <dir> which is
unique (i.e. not in use at the moment)
PARAMS: dir -- the directory to look in. If None, use CWD
base -- use this as the base name for the filename
RETURN: string -- the filename generated
### Response:
def getUniqueFilename(dir=None, base=None):
"""
DESCRP: Generate a filename in the directory <dir> which is
unique (i.e. not in use at the moment)
PARAMS: dir -- the directory to look in. If None, use CWD
base -- use this as the base name for the filename
RETURN: string -- the filename generated
"""
while True:
fn = str(random.randint(0, 100000)) + ".tmp"
if not os.path.exists(fn):
break
return fn |
def parse_args():
"""Parses command line arguments."""
parser = ArgumentParser(description="ModelBase builder")
subparsers = parser.add_subparsers()
sql_parser = subparsers.add_parser(
"get-query",
description="Usage: e.g. psql -c \"copy ($(python3 lib/generate_models.py get-query)) to " +
"stdout with csv header\" DB_NAME postgres")
sql_parser.set_defaults(func=print_sql_query)
gen_parser = subparsers.add_parser("generate")
gen_parser.add_argument("filename", nargs="?", help="Read this file for input, or STDIN if not " \
"given")
gen_parser.add_argument("-i", "--indent", default=" ")
gen_parser.add_argument("-c", "--created-at-col-name", default="created_at")
gen_parser.add_argument("-u", "--updated-at-col-name", default="updated_at")
gen_parser.set_defaults(func=generate_models)
args = parser.parse_args()
if hasattr(args, "func"):
return args
else:
arg_parser.print_help()
sys.exit(1) | Parses command line arguments. | Below is the the instruction that describes the task:
### Input:
Parses command line arguments.
### Response:
def parse_args():
"""Parses command line arguments."""
parser = ArgumentParser(description="ModelBase builder")
subparsers = parser.add_subparsers()
sql_parser = subparsers.add_parser(
"get-query",
description="Usage: e.g. psql -c \"copy ($(python3 lib/generate_models.py get-query)) to " +
"stdout with csv header\" DB_NAME postgres")
sql_parser.set_defaults(func=print_sql_query)
gen_parser = subparsers.add_parser("generate")
gen_parser.add_argument("filename", nargs="?", help="Read this file for input, or STDIN if not " \
"given")
gen_parser.add_argument("-i", "--indent", default=" ")
gen_parser.add_argument("-c", "--created-at-col-name", default="created_at")
gen_parser.add_argument("-u", "--updated-at-col-name", default="updated_at")
gen_parser.set_defaults(func=generate_models)
args = parser.parse_args()
if hasattr(args, "func"):
return args
else:
arg_parser.print_help()
sys.exit(1) |
def uvalues(a, encoding='utf-8', fallback='iso-8859-1'):
"""Return a list of decoded values from an iterator.
If any of the values fail to decode, re-decode all values using the
fallback.
"""
try:
return encoding, [s.decode(encoding) for s in a]
except UnicodeError:
return fallback, [s.decode(fallback) for s in a] | Return a list of decoded values from an iterator.
If any of the values fail to decode, re-decode all values using the
fallback. | Below is the the instruction that describes the task:
### Input:
Return a list of decoded values from an iterator.
If any of the values fail to decode, re-decode all values using the
fallback.
### Response:
def uvalues(a, encoding='utf-8', fallback='iso-8859-1'):
"""Return a list of decoded values from an iterator.
If any of the values fail to decode, re-decode all values using the
fallback.
"""
try:
return encoding, [s.decode(encoding) for s in a]
except UnicodeError:
return fallback, [s.decode(fallback) for s in a] |
def with_actions(actions_or_group_name, actions=None):
"""Executes the list of actions before/after the function
Actions should be a list where items are action names as
strings or a dict. See frasco.actions.loaders.load_action().
"""
group = None
if isinstance(actions_or_group_name, str):
group = actions_or_group_name
else:
actions = actions_or_group_name
def decorator(f):
if isinstance(f, WithActionsDecorator):
dec = f
else:
dec = WithActionsDecorator(f)
dec.actions.extend(load_actions(actions, group=group))
return dec
return decorator | Executes the list of actions before/after the function
Actions should be a list where items are action names as
strings or a dict. See frasco.actions.loaders.load_action(). | Below is the the instruction that describes the task:
### Input:
Executes the list of actions before/after the function
Actions should be a list where items are action names as
strings or a dict. See frasco.actions.loaders.load_action().
### Response:
def with_actions(actions_or_group_name, actions=None):
"""Executes the list of actions before/after the function
Actions should be a list where items are action names as
strings or a dict. See frasco.actions.loaders.load_action().
"""
group = None
if isinstance(actions_or_group_name, str):
group = actions_or_group_name
else:
actions = actions_or_group_name
def decorator(f):
if isinstance(f, WithActionsDecorator):
dec = f
else:
dec = WithActionsDecorator(f)
dec.actions.extend(load_actions(actions, group=group))
return dec
return decorator |
def render_placeholder(self, placeholder, parent_object=None, template_name=None, cachable=None, limit_parent_language=True, fallback_language=None):
"""
The main rendering sequence for placeholders.
This will do all the magic for caching, and call :func:`render_items` in the end.
"""
placeholder_name = get_placeholder_debug_name(placeholder)
logger.debug("Rendering placeholder '%s'", placeholder_name)
# Determine whether the placeholder can be cached.
cachable = self._can_cache_merged_output(template_name, cachable)
try_cache = cachable and self.may_cache_placeholders()
logger.debug("- try_cache=%s cachable=%s template_name=%s", try_cache, cachable, template_name)
if parent_object is None:
# To support filtering the placeholders by parent language, the parent object needs to be known.
# Fortunately, the PlaceholderFieldDescriptor makes sure this doesn't require an additional query.
parent_object = placeholder.parent
# Fetch the placeholder output from cache.
language_code = get_parent_language_code(parent_object)
cache_key = None
output = None
if try_cache:
cache_key = get_placeholder_cache_key_for_parent(parent_object, placeholder.slot, language_code)
output = cache.get(cache_key)
if output:
logger.debug("- fetched cached output")
if output is None:
# Get the items, and render them
items, is_fallback = self._get_placeholder_items(placeholder, parent_object, limit_parent_language, fallback_language, try_cache)
output = self.render_items(placeholder, items, parent_object, template_name, cachable)
if is_fallback:
# Caching fallbacks is not supported yet,
# content could be rendered in a different gettext language domain.
output.cacheable = False
# Store the full-placeholder contents in the cache.
if try_cache and output.cacheable:
if output.cache_timeout is not DEFAULT_TIMEOUT:
# The timeout is based on the minimal timeout used in plugins.
cache.set(cache_key, output, output.cache_timeout)
else:
# Don't want to mix into the default 0/None issue.
cache.set(cache_key, output)
return output | The main rendering sequence for placeholders.
This will do all the magic for caching, and call :func:`render_items` in the end. | Below is the the instruction that describes the task:
### Input:
The main rendering sequence for placeholders.
This will do all the magic for caching, and call :func:`render_items` in the end.
### Response:
def render_placeholder(self, placeholder, parent_object=None, template_name=None, cachable=None, limit_parent_language=True, fallback_language=None):
"""
The main rendering sequence for placeholders.
This will do all the magic for caching, and call :func:`render_items` in the end.
"""
placeholder_name = get_placeholder_debug_name(placeholder)
logger.debug("Rendering placeholder '%s'", placeholder_name)
# Determine whether the placeholder can be cached.
cachable = self._can_cache_merged_output(template_name, cachable)
try_cache = cachable and self.may_cache_placeholders()
logger.debug("- try_cache=%s cachable=%s template_name=%s", try_cache, cachable, template_name)
if parent_object is None:
# To support filtering the placeholders by parent language, the parent object needs to be known.
# Fortunately, the PlaceholderFieldDescriptor makes sure this doesn't require an additional query.
parent_object = placeholder.parent
# Fetch the placeholder output from cache.
language_code = get_parent_language_code(parent_object)
cache_key = None
output = None
if try_cache:
cache_key = get_placeholder_cache_key_for_parent(parent_object, placeholder.slot, language_code)
output = cache.get(cache_key)
if output:
logger.debug("- fetched cached output")
if output is None:
# Get the items, and render them
items, is_fallback = self._get_placeholder_items(placeholder, parent_object, limit_parent_language, fallback_language, try_cache)
output = self.render_items(placeholder, items, parent_object, template_name, cachable)
if is_fallback:
# Caching fallbacks is not supported yet,
# content could be rendered in a different gettext language domain.
output.cacheable = False
# Store the full-placeholder contents in the cache.
if try_cache and output.cacheable:
if output.cache_timeout is not DEFAULT_TIMEOUT:
# The timeout is based on the minimal timeout used in plugins.
cache.set(cache_key, output, output.cache_timeout)
else:
# Don't want to mix into the default 0/None issue.
cache.set(cache_key, output)
return output |
def haversine(lng1, lat1, lng2, lat2):
"""Compute km by geo-coordinates
See also: haversine define https://en.wikipedia.org/wiki/Haversine_formula
"""
# Convert coordinates to floats.
lng1, lat1, lng2, lat2 = map(float, [lng1, lat1, lng2, lat2])
# Convert to radians from degrees
lng1, lat1, lng2, lat2 = map(math.radians, [lng1, lat1, lng2, lat2])
# Compute distance
dlng = lng2 - lng1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlng/2)**2
c = 2 * math.asin(math.sqrt(a))
km = 6367 * c
return km | Compute km by geo-coordinates
See also: haversine define https://en.wikipedia.org/wiki/Haversine_formula | Below is the the instruction that describes the task:
### Input:
Compute km by geo-coordinates
See also: haversine define https://en.wikipedia.org/wiki/Haversine_formula
### Response:
def haversine(lng1, lat1, lng2, lat2):
"""Compute km by geo-coordinates
See also: haversine define https://en.wikipedia.org/wiki/Haversine_formula
"""
# Convert coordinates to floats.
lng1, lat1, lng2, lat2 = map(float, [lng1, lat1, lng2, lat2])
# Convert to radians from degrees
lng1, lat1, lng2, lat2 = map(math.radians, [lng1, lat1, lng2, lat2])
# Compute distance
dlng = lng2 - lng1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlng/2)**2
c = 2 * math.asin(math.sqrt(a))
km = 6367 * c
return km |
def discharge_coefficient_to_K(D, Do, C):
r'''Converts a discharge coefficient to a standard loss coefficient,
for use in computation of the actual pressure drop of an orifice or other
device.
.. math::
K = \left[\frac{\sqrt{1-\beta^4(1-C^2)}}{C\beta^2} - 1\right]^2
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
C : float
Coefficient of discharge of the orifice, [-]
Returns
-------
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice, [-]
Notes
-----
If expansibility is used in the orifice calculation, the result will not
match with the specified pressure drop formula in [1]_; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient.
Examples
--------
>>> discharge_coefficient_to_K(D=0.07366, Do=0.05, C=0.61512)
5.2314291729754
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
return ((1.0 - beta4*(1.0 - C*C))**0.5/(C*beta2) - 1.0)**2 | r'''Converts a discharge coefficient to a standard loss coefficient,
for use in computation of the actual pressure drop of an orifice or other
device.
.. math::
K = \left[\frac{\sqrt{1-\beta^4(1-C^2)}}{C\beta^2} - 1\right]^2
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
C : float
Coefficient of discharge of the orifice, [-]
Returns
-------
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice, [-]
Notes
-----
If expansibility is used in the orifice calculation, the result will not
match with the specified pressure drop formula in [1]_; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient.
Examples
--------
>>> discharge_coefficient_to_K(D=0.07366, Do=0.05, C=0.61512)
5.2314291729754
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates. | Below is the the instruction that describes the task:
### Input:
r'''Converts a discharge coefficient to a standard loss coefficient,
for use in computation of the actual pressure drop of an orifice or other
device.
.. math::
K = \left[\frac{\sqrt{1-\beta^4(1-C^2)}}{C\beta^2} - 1\right]^2
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
C : float
Coefficient of discharge of the orifice, [-]
Returns
-------
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice, [-]
Notes
-----
If expansibility is used in the orifice calculation, the result will not
match with the specified pressure drop formula in [1]_; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient.
Examples
--------
>>> discharge_coefficient_to_K(D=0.07366, Do=0.05, C=0.61512)
5.2314291729754
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
### Response:
def discharge_coefficient_to_K(D, Do, C):
r'''Converts a discharge coefficient to a standard loss coefficient,
for use in computation of the actual pressure drop of an orifice or other
device.
.. math::
K = \left[\frac{\sqrt{1-\beta^4(1-C^2)}}{C\beta^2} - 1\right]^2
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
C : float
Coefficient of discharge of the orifice, [-]
Returns
-------
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice, [-]
Notes
-----
If expansibility is used in the orifice calculation, the result will not
match with the specified pressure drop formula in [1]_; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient.
Examples
--------
>>> discharge_coefficient_to_K(D=0.07366, Do=0.05, C=0.61512)
5.2314291729754
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
return ((1.0 - beta4*(1.0 - C*C))**0.5/(C*beta2) - 1.0)**2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.