code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def merge_segments(segments, exif=b""):
"""Merges Exif with APP0 and APP1 manipulations.
"""
if segments[1][0:2] == b"\xff\xe0" and \
segments[2][0:2] == b"\xff\xe1" and \
segments[2][4:10] == b"Exif\x00\x00":
if exif:
segments[2] = exif
segments.pop(1)
elif exif is None:
segments.pop(2)
else:
segments.pop(1)
elif segments[1][0:2] == b"\xff\xe0":
if exif:
segments[1] = exif
elif segments[1][0:2] == b"\xff\xe1" and \
segments[1][4:10] == b"Exif\x00\x00":
if exif:
segments[1] = exif
elif exif is None:
segments.pop(1)
else:
if exif:
segments.insert(1, exif)
return b"".join(segments) | Merges Exif with APP0 and APP1 manipulations. | Below is the the instruction that describes the task:
### Input:
Merges Exif with APP0 and APP1 manipulations.
### Response:
def merge_segments(segments, exif=b""):
"""Merges Exif with APP0 and APP1 manipulations.
"""
if segments[1][0:2] == b"\xff\xe0" and \
segments[2][0:2] == b"\xff\xe1" and \
segments[2][4:10] == b"Exif\x00\x00":
if exif:
segments[2] = exif
segments.pop(1)
elif exif is None:
segments.pop(2)
else:
segments.pop(1)
elif segments[1][0:2] == b"\xff\xe0":
if exif:
segments[1] = exif
elif segments[1][0:2] == b"\xff\xe1" and \
segments[1][4:10] == b"Exif\x00\x00":
if exif:
segments[1] = exif
elif exif is None:
segments.pop(1)
else:
if exif:
segments.insert(1, exif)
return b"".join(segments) |
def getParentElementCustomFilter(self, filterFunc):
'''
getParentElementCustomFilter - Runs through parent on up to document root, returning the
first tag which filterFunc(tag) returns True.
@param filterFunc <function/lambda> - A function or lambda expression that should return "True" if the passed node matches criteria.
@return <AdvancedTag/None> - First match, or None
@see getFirstElementCustomFilter for matches against children
'''
parentNode = self.parentNode
while parentNode:
if filterFunc(parentNode) is True:
return parentNode
parentNode = parentNode.parentNode
return None | getParentElementCustomFilter - Runs through parent on up to document root, returning the
first tag which filterFunc(tag) returns True.
@param filterFunc <function/lambda> - A function or lambda expression that should return "True" if the passed node matches criteria.
@return <AdvancedTag/None> - First match, or None
@see getFirstElementCustomFilter for matches against children | Below is the the instruction that describes the task:
### Input:
getParentElementCustomFilter - Runs through parent on up to document root, returning the
first tag which filterFunc(tag) returns True.
@param filterFunc <function/lambda> - A function or lambda expression that should return "True" if the passed node matches criteria.
@return <AdvancedTag/None> - First match, or None
@see getFirstElementCustomFilter for matches against children
### Response:
def getParentElementCustomFilter(self, filterFunc):
'''
getParentElementCustomFilter - Runs through parent on up to document root, returning the
first tag which filterFunc(tag) returns True.
@param filterFunc <function/lambda> - A function or lambda expression that should return "True" if the passed node matches criteria.
@return <AdvancedTag/None> - First match, or None
@see getFirstElementCustomFilter for matches against children
'''
parentNode = self.parentNode
while parentNode:
if filterFunc(parentNode) is True:
return parentNode
parentNode = parentNode.parentNode
return None |
def _build_preprocessed_function(func,
processors,
args_defaults,
varargs,
varkw):
"""
Build a preprocessed function with the same signature as `func`.
Uses `exec` internally to build a function that actually has the same
signature as `func.
"""
format_kwargs = {'func_name': func.__name__}
def mangle(name):
return 'a' + uuid4().hex + name
format_kwargs['mangled_func'] = mangled_funcname = mangle(func.__name__)
def make_processor_assignment(arg, processor_name):
template = "{arg} = {processor}({func}, '{arg}', {arg})"
return template.format(
arg=arg,
processor=processor_name,
func=mangled_funcname,
)
exec_globals = {mangled_funcname: func, 'wraps': wraps}
defaults_seen = 0
default_name_template = 'a' + uuid4().hex + '_%d'
signature = []
call_args = []
assignments = []
star_map = {
varargs: '*',
varkw: '**',
}
def name_as_arg(arg):
return star_map.get(arg, '') + arg
for arg, default in args_defaults:
if default is NO_DEFAULT:
signature.append(name_as_arg(arg))
else:
default_name = default_name_template % defaults_seen
exec_globals[default_name] = default
signature.append('='.join([name_as_arg(arg), default_name]))
defaults_seen += 1
if arg in processors:
procname = mangle('_processor_' + arg)
exec_globals[procname] = processors[arg]
assignments.append(make_processor_assignment(arg, procname))
call_args.append(name_as_arg(arg))
exec_str = dedent(
"""\
@wraps({wrapped_funcname})
def {func_name}({signature}):
{assignments}
return {wrapped_funcname}({call_args})
"""
).format(
func_name=func.__name__,
signature=', '.join(signature),
assignments='\n '.join(assignments),
wrapped_funcname=mangled_funcname,
call_args=', '.join(call_args),
)
compiled = compile(
exec_str,
func.__code__.co_filename,
mode='exec',
)
exec_locals = {}
exec_(compiled, exec_globals, exec_locals)
new_func = exec_locals[func.__name__]
code = new_func.__code__
args = {
attr: getattr(code, attr)
for attr in dir(code)
if attr.startswith('co_')
}
# Copy the firstlineno out of the underlying function so that exceptions
# get raised with the correct traceback.
# This also makes dynamic source inspection (like IPython `??` operator)
# work as intended.
try:
# Try to get the pycode object from the underlying function.
original_code = func.__code__
except AttributeError:
try:
# The underlying callable was not a function, try to grab the
# `__func__.__code__` which exists on method objects.
original_code = func.__func__.__code__
except AttributeError:
# The underlying callable does not have a `__code__`. There is
# nothing for us to correct.
return new_func
args['co_firstlineno'] = original_code.co_firstlineno
new_func.__code__ = CodeType(*map(getitem(args), _code_argorder))
return new_func | Build a preprocessed function with the same signature as `func`.
Uses `exec` internally to build a function that actually has the same
signature as `func. | Below is the the instruction that describes the task:
### Input:
Build a preprocessed function with the same signature as `func`.
Uses `exec` internally to build a function that actually has the same
signature as `func.
### Response:
def _build_preprocessed_function(func,
processors,
args_defaults,
varargs,
varkw):
"""
Build a preprocessed function with the same signature as `func`.
Uses `exec` internally to build a function that actually has the same
signature as `func.
"""
format_kwargs = {'func_name': func.__name__}
def mangle(name):
return 'a' + uuid4().hex + name
format_kwargs['mangled_func'] = mangled_funcname = mangle(func.__name__)
def make_processor_assignment(arg, processor_name):
template = "{arg} = {processor}({func}, '{arg}', {arg})"
return template.format(
arg=arg,
processor=processor_name,
func=mangled_funcname,
)
exec_globals = {mangled_funcname: func, 'wraps': wraps}
defaults_seen = 0
default_name_template = 'a' + uuid4().hex + '_%d'
signature = []
call_args = []
assignments = []
star_map = {
varargs: '*',
varkw: '**',
}
def name_as_arg(arg):
return star_map.get(arg, '') + arg
for arg, default in args_defaults:
if default is NO_DEFAULT:
signature.append(name_as_arg(arg))
else:
default_name = default_name_template % defaults_seen
exec_globals[default_name] = default
signature.append('='.join([name_as_arg(arg), default_name]))
defaults_seen += 1
if arg in processors:
procname = mangle('_processor_' + arg)
exec_globals[procname] = processors[arg]
assignments.append(make_processor_assignment(arg, procname))
call_args.append(name_as_arg(arg))
exec_str = dedent(
"""\
@wraps({wrapped_funcname})
def {func_name}({signature}):
{assignments}
return {wrapped_funcname}({call_args})
"""
).format(
func_name=func.__name__,
signature=', '.join(signature),
assignments='\n '.join(assignments),
wrapped_funcname=mangled_funcname,
call_args=', '.join(call_args),
)
compiled = compile(
exec_str,
func.__code__.co_filename,
mode='exec',
)
exec_locals = {}
exec_(compiled, exec_globals, exec_locals)
new_func = exec_locals[func.__name__]
code = new_func.__code__
args = {
attr: getattr(code, attr)
for attr in dir(code)
if attr.startswith('co_')
}
# Copy the firstlineno out of the underlying function so that exceptions
# get raised with the correct traceback.
# This also makes dynamic source inspection (like IPython `??` operator)
# work as intended.
try:
# Try to get the pycode object from the underlying function.
original_code = func.__code__
except AttributeError:
try:
# The underlying callable was not a function, try to grab the
# `__func__.__code__` which exists on method objects.
original_code = func.__func__.__code__
except AttributeError:
# The underlying callable does not have a `__code__`. There is
# nothing for us to correct.
return new_func
args['co_firstlineno'] = original_code.co_firstlineno
new_func.__code__ = CodeType(*map(getitem(args), _code_argorder))
return new_func |
def ancestors(self, start, generations=None):
"""
Return the subgraph of all nodes from which the given vertex is
reachable, including that vertex.
If specified, the optional `generations` argument specifies how
many generations to limit to.
"""
visited = self.vertex_set()
visited.add(start)
to_visit = deque([(start, 0)])
while to_visit:
vertex, depth = to_visit.popleft()
if depth == generations:
continue
for parent in self.parents(vertex):
if parent not in visited:
visited.add(parent)
to_visit.append((parent, depth+1))
return self.full_subgraph(visited) | Return the subgraph of all nodes from which the given vertex is
reachable, including that vertex.
If specified, the optional `generations` argument specifies how
many generations to limit to. | Below is the the instruction that describes the task:
### Input:
Return the subgraph of all nodes from which the given vertex is
reachable, including that vertex.
If specified, the optional `generations` argument specifies how
many generations to limit to.
### Response:
def ancestors(self, start, generations=None):
"""
Return the subgraph of all nodes from which the given vertex is
reachable, including that vertex.
If specified, the optional `generations` argument specifies how
many generations to limit to.
"""
visited = self.vertex_set()
visited.add(start)
to_visit = deque([(start, 0)])
while to_visit:
vertex, depth = to_visit.popleft()
if depth == generations:
continue
for parent in self.parents(vertex):
if parent not in visited:
visited.add(parent)
to_visit.append((parent, depth+1))
return self.full_subgraph(visited) |
def _pstoref16(ins):
""" Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer.
"""
value = ins.quad[2]
offset = ins.quad[1]
indirect = offset[0] == '*'
if indirect:
offset = offset[1:]
I = int(offset)
if I >= 0:
I += 4 # Return Address + "push IX"
output = _f16_oper(value)
if indirect:
output.append('ld bc, %i' % I)
output.append('call __PISTORE32')
REQUIRES.add('pistore32.asm')
return output
# direct store
output.append('ld bc, %i' % I)
output.append('call __PSTORE32')
REQUIRES.add('pstore32.asm')
return output | Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer. | Below is the the instruction that describes the task:
### Input:
Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer.
### Response:
def _pstoref16(ins):
""" Stores 2nd parameter at stack pointer (SP) + X, being
X 1st parameter.
1st operand must be a SIGNED integer.
"""
value = ins.quad[2]
offset = ins.quad[1]
indirect = offset[0] == '*'
if indirect:
offset = offset[1:]
I = int(offset)
if I >= 0:
I += 4 # Return Address + "push IX"
output = _f16_oper(value)
if indirect:
output.append('ld bc, %i' % I)
output.append('call __PISTORE32')
REQUIRES.add('pistore32.asm')
return output
# direct store
output.append('ld bc, %i' % I)
output.append('call __PSTORE32')
REQUIRES.add('pstore32.asm')
return output |
def apply_inheritance(self):
"""Apply inheritance over templates
Template can be used in the following objects::
* hosts
* contacts
* services
* servicedependencies
* hostdependencies
* timeperiods
* hostsextinfo
* servicesextinfo
* serviceescalations
* hostescalations
* escalations
:return: None
"""
# inheritance properties by template
self.hosts.apply_inheritance()
self.contacts.apply_inheritance()
self.services.apply_inheritance()
self.servicedependencies.apply_inheritance()
self.hostdependencies.apply_inheritance()
# Also timeperiods
self.timeperiods.apply_inheritance()
# Also "Hostextinfo"
self.hostsextinfo.apply_inheritance()
# Also "Serviceextinfo"
self.servicesextinfo.apply_inheritance()
# Now escalations too
self.serviceescalations.apply_inheritance()
self.hostescalations.apply_inheritance()
self.escalations.apply_inheritance() | Apply inheritance over templates
Template can be used in the following objects::
* hosts
* contacts
* services
* servicedependencies
* hostdependencies
* timeperiods
* hostsextinfo
* servicesextinfo
* serviceescalations
* hostescalations
* escalations
:return: None | Below is the the instruction that describes the task:
### Input:
Apply inheritance over templates
Template can be used in the following objects::
* hosts
* contacts
* services
* servicedependencies
* hostdependencies
* timeperiods
* hostsextinfo
* servicesextinfo
* serviceescalations
* hostescalations
* escalations
:return: None
### Response:
def apply_inheritance(self):
"""Apply inheritance over templates
Template can be used in the following objects::
* hosts
* contacts
* services
* servicedependencies
* hostdependencies
* timeperiods
* hostsextinfo
* servicesextinfo
* serviceescalations
* hostescalations
* escalations
:return: None
"""
# inheritance properties by template
self.hosts.apply_inheritance()
self.contacts.apply_inheritance()
self.services.apply_inheritance()
self.servicedependencies.apply_inheritance()
self.hostdependencies.apply_inheritance()
# Also timeperiods
self.timeperiods.apply_inheritance()
# Also "Hostextinfo"
self.hostsextinfo.apply_inheritance()
# Also "Serviceextinfo"
self.servicesextinfo.apply_inheritance()
# Now escalations too
self.serviceescalations.apply_inheritance()
self.hostescalations.apply_inheritance()
self.escalations.apply_inheritance() |
def get_catalogs(self):
"""Gets the catalog list resulting from the search.
return: (osid.cataloging.CatalogList) - the catalogs list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.CatalogList(self._results, runtime=self._runtime) | Gets the catalog list resulting from the search.
return: (osid.cataloging.CatalogList) - the catalogs list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the catalog list resulting from the search.
return: (osid.cataloging.CatalogList) - the catalogs list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_catalogs(self):
"""Gets the catalog list resulting from the search.
return: (osid.cataloging.CatalogList) - the catalogs list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.CatalogList(self._results, runtime=self._runtime) |
def get_charset(message, default="utf-8"):
"""Get the message charset"""
if message.get_content_charset():
return message.get_content_charset()
if message.get_charset():
return message.get_charset()
return default | Get the message charset | Below is the the instruction that describes the task:
### Input:
Get the message charset
### Response:
def get_charset(message, default="utf-8"):
"""Get the message charset"""
if message.get_content_charset():
return message.get_content_charset()
if message.get_charset():
return message.get_charset()
return default |
def _collection(self):
"""
Effectively retrieve data according to lazy_collection.
If we have a stored collection, without any result, return an empty list
"""
old_sort_limits_and_len_mode = None if self._sort_limits is None else self._sort_limits.copy(), self._len_mode
old_sorts = None if self._sort is None else self._sort.copy(),\
None if self._sort_by_sortedset is None else self._sort_by_sortedset.copy()
try:
if self.stored_key and not self._stored_len:
if self._len_mode:
self._len = 0
self._len_mode = False
self._sort_limits = {}
return []
# Manage sort desc added by original `__getitem__` when we sort by score
if self._sort_by_sortedset and self._sort and self._sort.get('desc'):
self._sort = None
self._sort_by_sortedset['desc'] = not self._sort_by_sortedset.get('desc', False)
return super(ExtendedCollectionManager, self)._collection
finally:
self._sort_limits, self._len_mode = old_sort_limits_and_len_mode
self._sort, self._sort_by_sortedset = old_sorts | Effectively retrieve data according to lazy_collection.
If we have a stored collection, without any result, return an empty list | Below is the the instruction that describes the task:
### Input:
Effectively retrieve data according to lazy_collection.
If we have a stored collection, without any result, return an empty list
### Response:
def _collection(self):
"""
Effectively retrieve data according to lazy_collection.
If we have a stored collection, without any result, return an empty list
"""
old_sort_limits_and_len_mode = None if self._sort_limits is None else self._sort_limits.copy(), self._len_mode
old_sorts = None if self._sort is None else self._sort.copy(),\
None if self._sort_by_sortedset is None else self._sort_by_sortedset.copy()
try:
if self.stored_key and not self._stored_len:
if self._len_mode:
self._len = 0
self._len_mode = False
self._sort_limits = {}
return []
# Manage sort desc added by original `__getitem__` when we sort by score
if self._sort_by_sortedset and self._sort and self._sort.get('desc'):
self._sort = None
self._sort_by_sortedset['desc'] = not self._sort_by_sortedset.get('desc', False)
return super(ExtendedCollectionManager, self)._collection
finally:
self._sort_limits, self._len_mode = old_sort_limits_and_len_mode
self._sort, self._sort_by_sortedset = old_sorts |
def get_program(self, n, timeout=2.0, max_retries=2):
""" Get a program from the drive.
Gets program 'n' from the drive and returns its commands.
Parameters
----------
n : int
Which program to get.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the program. The
trailing 'END' is removed. Empty if there was an error.
Notes
-----
The command sent to the drive is '!TPROG PROGn'.
See Also
--------
set_program_profile : Sets a program or profile.
run_program_profile : Runs a program or profile.
"""
# Send the 'TPROG PROGn' command to read the program.
response = self.driver.send_command( \
'TPROG PROG' + str(int(n)), timeout=timeout, \
immediate=True, max_retries=max_retries)
# If there was an error, then return empty. Otherwise, return
# the response lines but strip the leading '*' first and the
# 'END' at the end of the list.
if self.driver.command_error(response) \
or len(response[4]) == 0:
return []
else:
if '*END' in response[4]:
response[4].remove('*END')
return [line[1:] for line in response[4]] | Get a program from the drive.
Gets program 'n' from the drive and returns its commands.
Parameters
----------
n : int
Which program to get.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the program. The
trailing 'END' is removed. Empty if there was an error.
Notes
-----
The command sent to the drive is '!TPROG PROGn'.
See Also
--------
set_program_profile : Sets a program or profile.
run_program_profile : Runs a program or profile. | Below is the the instruction that describes the task:
### Input:
Get a program from the drive.
Gets program 'n' from the drive and returns its commands.
Parameters
----------
n : int
Which program to get.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the program. The
trailing 'END' is removed. Empty if there was an error.
Notes
-----
The command sent to the drive is '!TPROG PROGn'.
See Also
--------
set_program_profile : Sets a program or profile.
run_program_profile : Runs a program or profile.
### Response:
def get_program(self, n, timeout=2.0, max_retries=2):
""" Get a program from the drive.
Gets program 'n' from the drive and returns its commands.
Parameters
----------
n : int
Which program to get.
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
max_retries : int, optional
Maximum number of retries to do per command in the case of
errors.
Returns
-------
commands : list of str
``list`` of ``str`` commands making up the program. The
trailing 'END' is removed. Empty if there was an error.
Notes
-----
The command sent to the drive is '!TPROG PROGn'.
See Also
--------
set_program_profile : Sets a program or profile.
run_program_profile : Runs a program or profile.
"""
# Send the 'TPROG PROGn' command to read the program.
response = self.driver.send_command( \
'TPROG PROG' + str(int(n)), timeout=timeout, \
immediate=True, max_retries=max_retries)
# If there was an error, then return empty. Otherwise, return
# the response lines but strip the leading '*' first and the
# 'END' at the end of the list.
if self.driver.command_error(response) \
or len(response[4]) == 0:
return []
else:
if '*END' in response[4]:
response[4].remove('*END')
return [line[1:] for line in response[4]] |
def skill_delete(self, skill_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/skills#delete-skill-by-id"
api_path = "/api/v2/skills/{skill_id}"
api_path = api_path.format(skill_id=skill_id)
return self.call(api_path, method="DELETE", **kwargs) | https://developer.zendesk.com/rest_api/docs/chat/skills#delete-skill-by-id | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/chat/skills#delete-skill-by-id
### Response:
def skill_delete(self, skill_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/skills#delete-skill-by-id"
api_path = "/api/v2/skills/{skill_id}"
api_path = api_path.format(skill_id=skill_id)
return self.call(api_path, method="DELETE", **kwargs) |
def extract_user_keywords_generator(twitter_lists_gen, lemmatizing="wordnet"):
"""
Based on the user-related lists I have downloaded, annotate the users.
Inputs: - twitter_lists_gen: A python generator that yields a user Twitter id and a generator of Twitter lists.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Yields: - user_twitter_id: A Twitter user id.
- user_annotation: A python dictionary that contains two dicts:
* bag_of_lemmas: Maps emmas to multiplicity.
* lemma_to_keywordbag: A python dictionary that maps stems/lemmas to original topic keywords.
"""
####################################################################################################################
# Extract keywords serially.
####################################################################################################################
for user_twitter_id, twitter_lists_list in twitter_lists_gen:
if twitter_lists_list is not None:
if "lists" in twitter_lists_list.keys():
twitter_lists_list = twitter_lists_list["lists"]
bag_of_lemmas, lemma_to_keywordbag = user_twitter_list_bag_of_words(twitter_lists_list, lemmatizing)
for lemma, keywordbag in lemma_to_keywordbag.items():
lemma_to_keywordbag[lemma] = dict(keywordbag)
lemma_to_keywordbag = dict(lemma_to_keywordbag)
user_annotation = dict()
user_annotation["bag_of_lemmas"] = bag_of_lemmas
user_annotation["lemma_to_keywordbag"] = lemma_to_keywordbag
yield user_twitter_id, user_annotation | Based on the user-related lists I have downloaded, annotate the users.
Inputs: - twitter_lists_gen: A python generator that yields a user Twitter id and a generator of Twitter lists.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Yields: - user_twitter_id: A Twitter user id.
- user_annotation: A python dictionary that contains two dicts:
* bag_of_lemmas: Maps emmas to multiplicity.
* lemma_to_keywordbag: A python dictionary that maps stems/lemmas to original topic keywords. | Below is the the instruction that describes the task:
### Input:
Based on the user-related lists I have downloaded, annotate the users.
Inputs: - twitter_lists_gen: A python generator that yields a user Twitter id and a generator of Twitter lists.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Yields: - user_twitter_id: A Twitter user id.
- user_annotation: A python dictionary that contains two dicts:
* bag_of_lemmas: Maps emmas to multiplicity.
* lemma_to_keywordbag: A python dictionary that maps stems/lemmas to original topic keywords.
### Response:
def extract_user_keywords_generator(twitter_lists_gen, lemmatizing="wordnet"):
"""
Based on the user-related lists I have downloaded, annotate the users.
Inputs: - twitter_lists_gen: A python generator that yields a user Twitter id and a generator of Twitter lists.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Yields: - user_twitter_id: A Twitter user id.
- user_annotation: A python dictionary that contains two dicts:
* bag_of_lemmas: Maps emmas to multiplicity.
* lemma_to_keywordbag: A python dictionary that maps stems/lemmas to original topic keywords.
"""
####################################################################################################################
# Extract keywords serially.
####################################################################################################################
for user_twitter_id, twitter_lists_list in twitter_lists_gen:
if twitter_lists_list is not None:
if "lists" in twitter_lists_list.keys():
twitter_lists_list = twitter_lists_list["lists"]
bag_of_lemmas, lemma_to_keywordbag = user_twitter_list_bag_of_words(twitter_lists_list, lemmatizing)
for lemma, keywordbag in lemma_to_keywordbag.items():
lemma_to_keywordbag[lemma] = dict(keywordbag)
lemma_to_keywordbag = dict(lemma_to_keywordbag)
user_annotation = dict()
user_annotation["bag_of_lemmas"] = bag_of_lemmas
user_annotation["lemma_to_keywordbag"] = lemma_to_keywordbag
yield user_twitter_id, user_annotation |
def list_types_view(request, semester, profile=None):
"""
View the details of a particular WorkshiftType.
"""
page_name = "Workshift Types"
full_management = utils.can_manage(request.user, semester)
any_management = utils.can_manage(request.user, semester, any_pool=True)
types = WorkshiftType.objects.all()
type_shifts = [
RegularWorkshift.objects.filter(
workshift_type=i,
pool__semester=semester,
).order_by("day")
for i in types
]
shift_edits = [
[
(
shift,
full_management or
utils.can_manage(request.user, semester, pool=shift.pool),
)
for shift in shifts
]
for shifts in type_shifts
]
return render_to_response("list_types.html", {
"page_name": page_name,
"type_tuples": zip(types, shift_edits),
"can_edit": any_management,
}, context_instance=RequestContext(request)) | View the details of a particular WorkshiftType. | Below is the the instruction that describes the task:
### Input:
View the details of a particular WorkshiftType.
### Response:
def list_types_view(request, semester, profile=None):
"""
View the details of a particular WorkshiftType.
"""
page_name = "Workshift Types"
full_management = utils.can_manage(request.user, semester)
any_management = utils.can_manage(request.user, semester, any_pool=True)
types = WorkshiftType.objects.all()
type_shifts = [
RegularWorkshift.objects.filter(
workshift_type=i,
pool__semester=semester,
).order_by("day")
for i in types
]
shift_edits = [
[
(
shift,
full_management or
utils.can_manage(request.user, semester, pool=shift.pool),
)
for shift in shifts
]
for shifts in type_shifts
]
return render_to_response("list_types.html", {
"page_name": page_name,
"type_tuples": zip(types, shift_edits),
"can_edit": any_management,
}, context_instance=RequestContext(request)) |
def outer(vector1, vector2=None):
"""
Construct the outer product of two vectors.
The second vector argument is optional, if absent the projector
of the first vector will be returned.
Args:
vector1 (ndarray): the first vector.
vector2 (ndarray): the (optional) second vector.
Returns:
np.array: The matrix |v1><v2|.
"""
if vector2 is None:
vector2 = np.array(vector1).conj()
else:
vector2 = np.array(vector2).conj()
return np.outer(vector1, vector2) | Construct the outer product of two vectors.
The second vector argument is optional, if absent the projector
of the first vector will be returned.
Args:
vector1 (ndarray): the first vector.
vector2 (ndarray): the (optional) second vector.
Returns:
np.array: The matrix |v1><v2|. | Below is the the instruction that describes the task:
### Input:
Construct the outer product of two vectors.
The second vector argument is optional, if absent the projector
of the first vector will be returned.
Args:
vector1 (ndarray): the first vector.
vector2 (ndarray): the (optional) second vector.
Returns:
np.array: The matrix |v1><v2|.
### Response:
def outer(vector1, vector2=None):
"""
Construct the outer product of two vectors.
The second vector argument is optional, if absent the projector
of the first vector will be returned.
Args:
vector1 (ndarray): the first vector.
vector2 (ndarray): the (optional) second vector.
Returns:
np.array: The matrix |v1><v2|.
"""
if vector2 is None:
vector2 = np.array(vector1).conj()
else:
vector2 = np.array(vector2).conj()
return np.outer(vector1, vector2) |
def salt_proxy():
'''
Start a proxy minion.
'''
import salt.cli.daemons
import salt.utils.platform
import multiprocessing
if '' in sys.path:
sys.path.remove('')
if salt.utils.platform.is_windows():
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
if '--disable-keepalive' in sys.argv:
sys.argv.remove('--disable-keepalive')
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
# keep one minion subprocess running
while True:
try:
queue = multiprocessing.Queue()
except Exception:
# This breaks in containers
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
process = multiprocessing.Process(target=proxy_minion_process, args=(queue,))
process.start()
try:
process.join()
try:
restart_delay = queue.get(block=False)
except Exception:
if process.exitcode == 0:
# Minion process ended naturally, Ctrl+C or --version
break
restart_delay = 60
if restart_delay == 0:
# Minion process ended naturally, Ctrl+C, --version, etc.
sys.exit(process.exitcode)
# delay restart to reduce flooding and allow network resources to close
time.sleep(restart_delay)
except KeyboardInterrupt:
break
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig() | Start a proxy minion. | Below is the the instruction that describes the task:
### Input:
Start a proxy minion.
### Response:
def salt_proxy():
'''
Start a proxy minion.
'''
import salt.cli.daemons
import salt.utils.platform
import multiprocessing
if '' in sys.path:
sys.path.remove('')
if salt.utils.platform.is_windows():
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
if '--disable-keepalive' in sys.argv:
sys.argv.remove('--disable-keepalive')
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
# keep one minion subprocess running
while True:
try:
queue = multiprocessing.Queue()
except Exception:
# This breaks in containers
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
process = multiprocessing.Process(target=proxy_minion_process, args=(queue,))
process.start()
try:
process.join()
try:
restart_delay = queue.get(block=False)
except Exception:
if process.exitcode == 0:
# Minion process ended naturally, Ctrl+C or --version
break
restart_delay = 60
if restart_delay == 0:
# Minion process ended naturally, Ctrl+C, --version, etc.
sys.exit(process.exitcode)
# delay restart to reduce flooding and allow network resources to close
time.sleep(restart_delay)
except KeyboardInterrupt:
break
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig() |
def deflections_of_galaxies_from_sub_grid(sub_grid, galaxies):
"""Compute the deflections of a list of galaxies from an input sub-grid, by summing the individual deflections \
of each galaxy's mass profile.
The deflections are calculated on the sub-grid and binned-up to the original regular grid by taking the mean value \
of every set of sub-pixels.
If no galaxies are entered into the function, an array of all zeros is returned.
Parameters
-----------
sub_grid : RegularGrid
The grid (regular or sub) of (y,x) arc-second coordinates at the centre of every unmasked pixel which the \
deflections is calculated on.
galaxies : [galaxy.Galaxy]
The galaxies whose mass profiles are used to compute the surface densities.
"""
if galaxies:
return sum(map(lambda galaxy: galaxy.deflections_from_grid(sub_grid), galaxies))
else:
return np.full((sub_grid.shape[0], 2), 0.0) | Compute the deflections of a list of galaxies from an input sub-grid, by summing the individual deflections \
of each galaxy's mass profile.
The deflections are calculated on the sub-grid and binned-up to the original regular grid by taking the mean value \
of every set of sub-pixels.
If no galaxies are entered into the function, an array of all zeros is returned.
Parameters
-----------
sub_grid : RegularGrid
The grid (regular or sub) of (y,x) arc-second coordinates at the centre of every unmasked pixel which the \
deflections is calculated on.
galaxies : [galaxy.Galaxy]
The galaxies whose mass profiles are used to compute the surface densities. | Below is the the instruction that describes the task:
### Input:
Compute the deflections of a list of galaxies from an input sub-grid, by summing the individual deflections \
of each galaxy's mass profile.
The deflections are calculated on the sub-grid and binned-up to the original regular grid by taking the mean value \
of every set of sub-pixels.
If no galaxies are entered into the function, an array of all zeros is returned.
Parameters
-----------
sub_grid : RegularGrid
The grid (regular or sub) of (y,x) arc-second coordinates at the centre of every unmasked pixel which the \
deflections is calculated on.
galaxies : [galaxy.Galaxy]
The galaxies whose mass profiles are used to compute the surface densities.
### Response:
def deflections_of_galaxies_from_sub_grid(sub_grid, galaxies):
"""Compute the deflections of a list of galaxies from an input sub-grid, by summing the individual deflections \
of each galaxy's mass profile.
The deflections are calculated on the sub-grid and binned-up to the original regular grid by taking the mean value \
of every set of sub-pixels.
If no galaxies are entered into the function, an array of all zeros is returned.
Parameters
-----------
sub_grid : RegularGrid
The grid (regular or sub) of (y,x) arc-second coordinates at the centre of every unmasked pixel which the \
deflections is calculated on.
galaxies : [galaxy.Galaxy]
The galaxies whose mass profiles are used to compute the surface densities.
"""
if galaxies:
return sum(map(lambda galaxy: galaxy.deflections_from_grid(sub_grid), galaxies))
else:
return np.full((sub_grid.shape[0], 2), 0.0) |
def get_column_type(column, column_values):
"""
Returns the type of the given column based on its row values on the given RunSetResult.
@param column: the column to return the correct ColumnType for
@param column_values: the column values to consider
@return: a tuple of a type object describing the column - the concrete ColumnType is stored in the attribute 'type',
the display unit of the column, which may be None,
the source unit of the column, which may be None,
and the scale factor to convert from the source unit to the display unit.
If no scaling is necessary for conversion, this value is 1.
"""
try:
return _get_column_type_heur(column, column_values)
except util.TableDefinitionError as e:
logging.error("Column type couldn't be determined: {}".format(e.message))
return ColumnType.text, None, None, 1 | Returns the type of the given column based on its row values on the given RunSetResult.
@param column: the column to return the correct ColumnType for
@param column_values: the column values to consider
@return: a tuple of a type object describing the column - the concrete ColumnType is stored in the attribute 'type',
the display unit of the column, which may be None,
the source unit of the column, which may be None,
and the scale factor to convert from the source unit to the display unit.
If no scaling is necessary for conversion, this value is 1. | Below is the the instruction that describes the task:
### Input:
Returns the type of the given column based on its row values on the given RunSetResult.
@param column: the column to return the correct ColumnType for
@param column_values: the column values to consider
@return: a tuple of a type object describing the column - the concrete ColumnType is stored in the attribute 'type',
the display unit of the column, which may be None,
the source unit of the column, which may be None,
and the scale factor to convert from the source unit to the display unit.
If no scaling is necessary for conversion, this value is 1.
### Response:
def get_column_type(column, column_values):
"""
Returns the type of the given column based on its row values on the given RunSetResult.
@param column: the column to return the correct ColumnType for
@param column_values: the column values to consider
@return: a tuple of a type object describing the column - the concrete ColumnType is stored in the attribute 'type',
the display unit of the column, which may be None,
the source unit of the column, which may be None,
and the scale factor to convert from the source unit to the display unit.
If no scaling is necessary for conversion, this value is 1.
"""
try:
return _get_column_type_heur(column, column_values)
except util.TableDefinitionError as e:
logging.error("Column type couldn't be determined: {}".format(e.message))
return ColumnType.text, None, None, 1 |
def _get_ipv4_from_binary(self, bin_addr):
"""Converts binary address to Ipv4 format."""
return socket.inet_ntop(socket.AF_INET, struct.pack("!L", bin_addr)) | Converts binary address to Ipv4 format. | Below is the the instruction that describes the task:
### Input:
Converts binary address to Ipv4 format.
### Response:
def _get_ipv4_from_binary(self, bin_addr):
"""Converts binary address to Ipv4 format."""
return socket.inet_ntop(socket.AF_INET, struct.pack("!L", bin_addr)) |
def start(self):
""" SubFinder 入口,开始函数
"""
self.logger.info('开始')
videofiles = self._filter_path(self.path)
l = len(videofiles)
if l == 0:
self.logger.info(
'在 {} 下没有发现视频文件'.format(self.path))
return
else:
self.logger.info('找到 {} 个视频文件'.format(l))
for f in videofiles:
self._history[f] = []
self.pool.spawn(self._download, f)
self.pool.join()
self.logger.info('='*20 + '下载完成' + '='*20)
for v, subs in self._history.items():
basename = os.path.basename(v)
self.logger.info(
'{}: 下载 {} 个字幕'.format(basename, len(subs))) | SubFinder 入口,开始函数 | Below is the the instruction that describes the task:
### Input:
SubFinder 入口,开始函数
### Response:
def start(self):
""" SubFinder 入口,开始函数
"""
self.logger.info('开始')
videofiles = self._filter_path(self.path)
l = len(videofiles)
if l == 0:
self.logger.info(
'在 {} 下没有发现视频文件'.format(self.path))
return
else:
self.logger.info('找到 {} 个视频文件'.format(l))
for f in videofiles:
self._history[f] = []
self.pool.spawn(self._download, f)
self.pool.join()
self.logger.info('='*20 + '下载完成' + '='*20)
for v, subs in self._history.items():
basename = os.path.basename(v)
self.logger.info(
'{}: 下载 {} 个字幕'.format(basename, len(subs))) |
def truncate(text, length=255):
"""
Splits the message into a list of strings of of length `length`
Args:
text (str): The text to be divided
length (int, optional): The length of the chunks of text. \
Defaults to 255.
Returns:
list: Text divided into chunks of length `length`
"""
lines = []
i = 0
while i < len(text) - 1:
try:
lines.append(text[i:i+length])
i += length
except IndexError as e:
lines.append(text[i:])
return lines | Splits the message into a list of strings of of length `length`
Args:
text (str): The text to be divided
length (int, optional): The length of the chunks of text. \
Defaults to 255.
Returns:
list: Text divided into chunks of length `length` | Below is the the instruction that describes the task:
### Input:
Splits the message into a list of strings of of length `length`
Args:
text (str): The text to be divided
length (int, optional): The length of the chunks of text. \
Defaults to 255.
Returns:
list: Text divided into chunks of length `length`
### Response:
def truncate(text, length=255):
"""
Splits the message into a list of strings of of length `length`
Args:
text (str): The text to be divided
length (int, optional): The length of the chunks of text. \
Defaults to 255.
Returns:
list: Text divided into chunks of length `length`
"""
lines = []
i = 0
while i < len(text) - 1:
try:
lines.append(text[i:i+length])
i += length
except IndexError as e:
lines.append(text[i:])
return lines |
def extract_(dstore, dspath):
"""
Extracts an HDF5 path object from the datastore, for instance
extract(dstore, 'sitecol').
"""
obj = dstore[dspath]
if isinstance(obj, Dataset):
return ArrayWrapper(obj.value, obj.attrs)
elif isinstance(obj, Group):
return ArrayWrapper(numpy.array(list(obj)), obj.attrs)
else:
return obj | Extracts an HDF5 path object from the datastore, for instance
extract(dstore, 'sitecol'). | Below is the the instruction that describes the task:
### Input:
Extracts an HDF5 path object from the datastore, for instance
extract(dstore, 'sitecol').
### Response:
def extract_(dstore, dspath):
"""
Extracts an HDF5 path object from the datastore, for instance
extract(dstore, 'sitecol').
"""
obj = dstore[dspath]
if isinstance(obj, Dataset):
return ArrayWrapper(obj.value, obj.attrs)
elif isinstance(obj, Group):
return ArrayWrapper(numpy.array(list(obj)), obj.attrs)
else:
return obj |
def plot(self, fmt=None):
"""
Make a simple plot of the legend.
Simply calls Decor.plot() on all of its members.
TODO: Build a more attractive plot.
"""
for d in self.__list:
d.plot(fmt=fmt)
return None | Make a simple plot of the legend.
Simply calls Decor.plot() on all of its members.
TODO: Build a more attractive plot. | Below is the the instruction that describes the task:
### Input:
Make a simple plot of the legend.
Simply calls Decor.plot() on all of its members.
TODO: Build a more attractive plot.
### Response:
def plot(self, fmt=None):
"""
Make a simple plot of the legend.
Simply calls Decor.plot() on all of its members.
TODO: Build a more attractive plot.
"""
for d in self.__list:
d.plot(fmt=fmt)
return None |
def mutate_add_connection(self, config):
'''
Attempt to add a new connection, the only restriction being that the output
node cannot be one of the network input pins.
'''
possible_outputs = list(iterkeys(self.nodes))
out_node = choice(possible_outputs)
possible_inputs = possible_outputs + config.input_keys
in_node = choice(possible_inputs)
if in_node == out_node:
return
# # Don't duplicate connections.
# key = (in_node, out_node)
# if key in self.connections:
# return
cg = self.create_connection(config, in_node, out_node)
self.connections[cg.key] = cg | Attempt to add a new connection, the only restriction being that the output
node cannot be one of the network input pins. | Below is the the instruction that describes the task:
### Input:
Attempt to add a new connection, the only restriction being that the output
node cannot be one of the network input pins.
### Response:
def mutate_add_connection(self, config):
'''
Attempt to add a new connection, the only restriction being that the output
node cannot be one of the network input pins.
'''
possible_outputs = list(iterkeys(self.nodes))
out_node = choice(possible_outputs)
possible_inputs = possible_outputs + config.input_keys
in_node = choice(possible_inputs)
if in_node == out_node:
return
# # Don't duplicate connections.
# key = (in_node, out_node)
# if key in self.connections:
# return
cg = self.create_connection(config, in_node, out_node)
self.connections[cg.key] = cg |
def Canonicalize(node, output=None, **kw):
'''Canonicalize(node, output=None, **kw) -> UTF-8
Canonicalize a DOM document/element node and all descendents.
Return the text; if output is specified then output.write will
be called to output the text and None will be returned
Keyword parameters:
nsdict: a dictionary of prefix:uri namespace entries
assumed to exist in the surrounding context
comments: keep comments if non-zero (default is 0)
subset: Canonical XML subsetting resulting from XPath
(default is [])
unsuppressedPrefixes: do exclusive C14N, and this specifies the
prefixes that should be inherited.
'''
if output:
apply(_implementation, (node, output.write), kw)
else:
s = StringIO.StringIO()
apply(_implementation, (node, s.write), kw)
return s.getvalue() | Canonicalize(node, output=None, **kw) -> UTF-8
Canonicalize a DOM document/element node and all descendents.
Return the text; if output is specified then output.write will
be called to output the text and None will be returned
Keyword parameters:
nsdict: a dictionary of prefix:uri namespace entries
assumed to exist in the surrounding context
comments: keep comments if non-zero (default is 0)
subset: Canonical XML subsetting resulting from XPath
(default is [])
unsuppressedPrefixes: do exclusive C14N, and this specifies the
prefixes that should be inherited. | Below is the the instruction that describes the task:
### Input:
Canonicalize(node, output=None, **kw) -> UTF-8
Canonicalize a DOM document/element node and all descendents.
Return the text; if output is specified then output.write will
be called to output the text and None will be returned
Keyword parameters:
nsdict: a dictionary of prefix:uri namespace entries
assumed to exist in the surrounding context
comments: keep comments if non-zero (default is 0)
subset: Canonical XML subsetting resulting from XPath
(default is [])
unsuppressedPrefixes: do exclusive C14N, and this specifies the
prefixes that should be inherited.
### Response:
def Canonicalize(node, output=None, **kw):
'''Canonicalize(node, output=None, **kw) -> UTF-8
Canonicalize a DOM document/element node and all descendents.
Return the text; if output is specified then output.write will
be called to output the text and None will be returned
Keyword parameters:
nsdict: a dictionary of prefix:uri namespace entries
assumed to exist in the surrounding context
comments: keep comments if non-zero (default is 0)
subset: Canonical XML subsetting resulting from XPath
(default is [])
unsuppressedPrefixes: do exclusive C14N, and this specifies the
prefixes that should be inherited.
'''
if output:
apply(_implementation, (node, output.write), kw)
else:
s = StringIO.StringIO()
apply(_implementation, (node, s.write), kw)
return s.getvalue() |
def run_fib_with_clear(r):
""" Run Fibonacci generator r times. """
for i in range(r):
if randint(RAND_MIN, RAND_MAX) == RAND_MIN:
fib.cache_clear()
fib2.cache_clear()
res = fib(PythonInt(FIB))
if RESULT != res:
raise ValueError("Expected %d, Got %d" % (RESULT, res)) | Run Fibonacci generator r times. | Below is the the instruction that describes the task:
### Input:
Run Fibonacci generator r times.
### Response:
def run_fib_with_clear(r):
""" Run Fibonacci generator r times. """
for i in range(r):
if randint(RAND_MIN, RAND_MAX) == RAND_MIN:
fib.cache_clear()
fib2.cache_clear()
res = fib(PythonInt(FIB))
if RESULT != res:
raise ValueError("Expected %d, Got %d" % (RESULT, res)) |
def next_paragraph_style(self):
"""
|_ParagraphStyle| object representing the style to be applied
automatically to a new paragraph inserted after a paragraph of this
style. Returns self if no next paragraph style is defined. Assigning
|None| or *self* removes the setting such that new paragraphs are
created using this same style.
"""
next_style_elm = self._element.next_style
if next_style_elm is None:
return self
if next_style_elm.type != WD_STYLE_TYPE.PARAGRAPH:
return self
return StyleFactory(next_style_elm) | |_ParagraphStyle| object representing the style to be applied
automatically to a new paragraph inserted after a paragraph of this
style. Returns self if no next paragraph style is defined. Assigning
|None| or *self* removes the setting such that new paragraphs are
created using this same style. | Below is the the instruction that describes the task:
### Input:
|_ParagraphStyle| object representing the style to be applied
automatically to a new paragraph inserted after a paragraph of this
style. Returns self if no next paragraph style is defined. Assigning
|None| or *self* removes the setting such that new paragraphs are
created using this same style.
### Response:
def next_paragraph_style(self):
"""
|_ParagraphStyle| object representing the style to be applied
automatically to a new paragraph inserted after a paragraph of this
style. Returns self if no next paragraph style is defined. Assigning
|None| or *self* removes the setting such that new paragraphs are
created using this same style.
"""
next_style_elm = self._element.next_style
if next_style_elm is None:
return self
if next_style_elm.type != WD_STYLE_TYPE.PARAGRAPH:
return self
return StyleFactory(next_style_elm) |
def find_element(self, name, type=ElementType.ANY):
"""Find an elemnent in the document with the given name - could be a PartStudio, Assembly or blob.
Args:
name: str
the name of the element.
Returns:
- onshapepy.uri of the element
"""
for e in self.e_list:
# if a type is specified and this isn't it, move to the next loop.
if type.value and not e['elementType'] == type:
continue
if e["name"] == name:
uri = self.uri
uri.eid = e["id"]
return uri | Find an elemnent in the document with the given name - could be a PartStudio, Assembly or blob.
Args:
name: str
the name of the element.
Returns:
- onshapepy.uri of the element | Below is the the instruction that describes the task:
### Input:
Find an elemnent in the document with the given name - could be a PartStudio, Assembly or blob.
Args:
name: str
the name of the element.
Returns:
- onshapepy.uri of the element
### Response:
def find_element(self, name, type=ElementType.ANY):
"""Find an elemnent in the document with the given name - could be a PartStudio, Assembly or blob.
Args:
name: str
the name of the element.
Returns:
- onshapepy.uri of the element
"""
for e in self.e_list:
# if a type is specified and this isn't it, move to the next loop.
if type.value and not e['elementType'] == type:
continue
if e["name"] == name:
uri = self.uri
uri.eid = e["id"]
return uri |
def blackbody(self, T):
"""Calculate the contribution of a blackbody through this filter. *T* is the
blackbody temperature in Kelvin. Returns a band-averaged spectrum in
f_λ units.
We use the composite Simpson's rule to integrate over the points at
which the filter response is sampled. Note that this is a different
technique than used by `synphot`, and so may give slightly different
answers than that function.
"""
from scipy.integrate import simps
d = self._ensure_data()
# factor of pi is going from specific intensity (sr^-1) to unidirectional
# inner factor of 1e-8 is Å to cm
# outer factor of 1e-8 is f_λ in cm^-1 to f_λ in Å^-1
from .cgs import blambda
numer_samples = d.resp * np.pi * blambda(d.wlen * 1e-8, T) * 1e-8
numer = simps(numer_samples, d.wlen)
denom = simps(d.resp, d.wlen)
return numer / denom | Calculate the contribution of a blackbody through this filter. *T* is the
blackbody temperature in Kelvin. Returns a band-averaged spectrum in
f_λ units.
We use the composite Simpson's rule to integrate over the points at
which the filter response is sampled. Note that this is a different
technique than used by `synphot`, and so may give slightly different
answers than that function. | Below is the the instruction that describes the task:
### Input:
Calculate the contribution of a blackbody through this filter. *T* is the
blackbody temperature in Kelvin. Returns a band-averaged spectrum in
f_λ units.
We use the composite Simpson's rule to integrate over the points at
which the filter response is sampled. Note that this is a different
technique than used by `synphot`, and so may give slightly different
answers than that function.
### Response:
def blackbody(self, T):
"""Calculate the contribution of a blackbody through this filter. *T* is the
blackbody temperature in Kelvin. Returns a band-averaged spectrum in
f_λ units.
We use the composite Simpson's rule to integrate over the points at
which the filter response is sampled. Note that this is a different
technique than used by `synphot`, and so may give slightly different
answers than that function.
"""
from scipy.integrate import simps
d = self._ensure_data()
# factor of pi is going from specific intensity (sr^-1) to unidirectional
# inner factor of 1e-8 is Å to cm
# outer factor of 1e-8 is f_λ in cm^-1 to f_λ in Å^-1
from .cgs import blambda
numer_samples = d.resp * np.pi * blambda(d.wlen * 1e-8, T) * 1e-8
numer = simps(numer_samples, d.wlen)
denom = simps(d.resp, d.wlen)
return numer / denom |
def find_route_by_view_name(self, view_name):
"""Find a route in the router based on the specified view name.
:param view_name: string of view name to search by
:return: tuple containing (uri, Route)
"""
if not view_name:
return (None, None)
for uri, route in self.routes_all.items():
if route.name == view_name:
return uri, route
return (None, None) | Find a route in the router based on the specified view name.
:param view_name: string of view name to search by
:return: tuple containing (uri, Route) | Below is the the instruction that describes the task:
### Input:
Find a route in the router based on the specified view name.
:param view_name: string of view name to search by
:return: tuple containing (uri, Route)
### Response:
def find_route_by_view_name(self, view_name):
"""Find a route in the router based on the specified view name.
:param view_name: string of view name to search by
:return: tuple containing (uri, Route)
"""
if not view_name:
return (None, None)
for uri, route in self.routes_all.items():
if route.name == view_name:
return uri, route
return (None, None) |
def main():
"""
Main Entry Point
"""
args = parser.parse_args()
filehandle = args.filename
delim = args.delimiter
columns = args.columns[0]
if not columns:
for line in filehandle:
print line,
exit(0)
cs = list(chain.from_iterable(columns))
fields = (extract_fields(line, delim, cs) for line in filehandle)
for line in fields:
print ' '.join(line)
args.filename.close() | Main Entry Point | Below is the the instruction that describes the task:
### Input:
Main Entry Point
### Response:
def main():
"""
Main Entry Point
"""
args = parser.parse_args()
filehandle = args.filename
delim = args.delimiter
columns = args.columns[0]
if not columns:
for line in filehandle:
print line,
exit(0)
cs = list(chain.from_iterable(columns))
fields = (extract_fields(line, delim, cs) for line in filehandle)
for line in fields:
print ' '.join(line)
args.filename.close() |
def csep_close(ra, rb):
"""Return the closest separation vector between each point in one set,
and every point in a second set.
Parameters
----------
ra, rb: float array-like, shape (n, d) and (m, d) in d dimensions.
Two sets of points. `ra` is the set of points from which the closest
separation vectors to points `rb` are calculated.
Returns
-------
csep_close: float array-like, shape (n, m, d)
csep[i] is the closest separation vector from point ra[j]
to any point rb[i].
Note the un-intuitive vector direction.
"""
seps = csep(ra, rb)
seps_sq = np.sum(np.square(seps), axis=-1)
i_close = np.argmin(seps_sq, axis=-1)
i_all = list(range(len(seps)))
sep = seps[i_all, i_close]
sep_sq = seps_sq[i_all, i_close]
return sep, sep_sq | Return the closest separation vector between each point in one set,
and every point in a second set.
Parameters
----------
ra, rb: float array-like, shape (n, d) and (m, d) in d dimensions.
Two sets of points. `ra` is the set of points from which the closest
separation vectors to points `rb` are calculated.
Returns
-------
csep_close: float array-like, shape (n, m, d)
csep[i] is the closest separation vector from point ra[j]
to any point rb[i].
Note the un-intuitive vector direction. | Below is the the instruction that describes the task:
### Input:
Return the closest separation vector between each point in one set,
and every point in a second set.
Parameters
----------
ra, rb: float array-like, shape (n, d) and (m, d) in d dimensions.
Two sets of points. `ra` is the set of points from which the closest
separation vectors to points `rb` are calculated.
Returns
-------
csep_close: float array-like, shape (n, m, d)
csep[i] is the closest separation vector from point ra[j]
to any point rb[i].
Note the un-intuitive vector direction.
### Response:
def csep_close(ra, rb):
"""Return the closest separation vector between each point in one set,
and every point in a second set.
Parameters
----------
ra, rb: float array-like, shape (n, d) and (m, d) in d dimensions.
Two sets of points. `ra` is the set of points from which the closest
separation vectors to points `rb` are calculated.
Returns
-------
csep_close: float array-like, shape (n, m, d)
csep[i] is the closest separation vector from point ra[j]
to any point rb[i].
Note the un-intuitive vector direction.
"""
seps = csep(ra, rb)
seps_sq = np.sum(np.square(seps), axis=-1)
i_close = np.argmin(seps_sq, axis=-1)
i_all = list(range(len(seps)))
sep = seps[i_all, i_close]
sep_sq = seps_sq[i_all, i_close]
return sep, sep_sq |
def which_bin(exes):
'''
Scan over some possible executables and return the first one that is found
'''
if not isinstance(exes, Iterable):
return None
for exe in exes:
path = which(exe)
if not path:
continue
return path
return None | Scan over some possible executables and return the first one that is found | Below is the the instruction that describes the task:
### Input:
Scan over some possible executables and return the first one that is found
### Response:
def which_bin(exes):
'''
Scan over some possible executables and return the first one that is found
'''
if not isinstance(exes, Iterable):
return None
for exe in exes:
path = which(exe)
if not path:
continue
return path
return None |
def ising_to_qubo(h, J, offset=0.0):
"""Convert an Ising problem to a QUBO problem.
Map an Ising model defined on spins (variables with {-1, +1} values) to quadratic
unconstrained binary optimization (QUBO) formulation :math:`x' Q x` defined over
binary variables (0 or 1 values), where the linear term is contained along the diagonal of Q.
Return matrix Q that defines the model as well as the offset in energy between the two
problem formulations:
.. math::
s' J s + h' s = offset + x' Q x
See :meth:`~dimod.utilities.qubo_to_ising` for the inverse function.
Args:
h (dict[variable, bias]):
Linear biases as a dict of the form {v: bias, ...}, where keys are variables of
the model and values are biases.
J (dict[(variable, variable), bias]):
Quadratic biases as a dict of the form {(u, v): bias, ...}, where keys
are 2-tuples of variables of the model and values are quadratic biases
associated with the pair of variables (the interaction).
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, float): A 2-tuple containing:
dict: QUBO coefficients.
float: New energy offset.
Examples:
This example converts an Ising problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to a QUBO problem.
>>> import dimod
>>> h = {1: 1, 2: 1}
>>> J = {(1, 2): 1}
>>> dimod.ising_to_qubo(h, J, 0.5) # doctest: +SKIP
({(1, 1): 0.0, (1, 2): 4.0, (2, 2): 0.0}, -0.5)
"""
# the linear biases are the easiest
q = {(v, v): 2. * bias for v, bias in iteritems(h)}
# next the quadratic biases
for (u, v), bias in iteritems(J):
if bias == 0.0:
continue
q[(u, v)] = 4. * bias
q[(u, u)] -= 2. * bias
q[(v, v)] -= 2. * bias
# finally calculate the offset
offset += sum(itervalues(J)) - sum(itervalues(h))
return q, offset | Convert an Ising problem to a QUBO problem.
Map an Ising model defined on spins (variables with {-1, +1} values) to quadratic
unconstrained binary optimization (QUBO) formulation :math:`x' Q x` defined over
binary variables (0 or 1 values), where the linear term is contained along the diagonal of Q.
Return matrix Q that defines the model as well as the offset in energy between the two
problem formulations:
.. math::
s' J s + h' s = offset + x' Q x
See :meth:`~dimod.utilities.qubo_to_ising` for the inverse function.
Args:
h (dict[variable, bias]):
Linear biases as a dict of the form {v: bias, ...}, where keys are variables of
the model and values are biases.
J (dict[(variable, variable), bias]):
Quadratic biases as a dict of the form {(u, v): bias, ...}, where keys
are 2-tuples of variables of the model and values are quadratic biases
associated with the pair of variables (the interaction).
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, float): A 2-tuple containing:
dict: QUBO coefficients.
float: New energy offset.
Examples:
This example converts an Ising problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to a QUBO problem.
>>> import dimod
>>> h = {1: 1, 2: 1}
>>> J = {(1, 2): 1}
>>> dimod.ising_to_qubo(h, J, 0.5) # doctest: +SKIP
({(1, 1): 0.0, (1, 2): 4.0, (2, 2): 0.0}, -0.5) | Below is the the instruction that describes the task:
### Input:
Convert an Ising problem to a QUBO problem.
Map an Ising model defined on spins (variables with {-1, +1} values) to quadratic
unconstrained binary optimization (QUBO) formulation :math:`x' Q x` defined over
binary variables (0 or 1 values), where the linear term is contained along the diagonal of Q.
Return matrix Q that defines the model as well as the offset in energy between the two
problem formulations:
.. math::
s' J s + h' s = offset + x' Q x
See :meth:`~dimod.utilities.qubo_to_ising` for the inverse function.
Args:
h (dict[variable, bias]):
Linear biases as a dict of the form {v: bias, ...}, where keys are variables of
the model and values are biases.
J (dict[(variable, variable), bias]):
Quadratic biases as a dict of the form {(u, v): bias, ...}, where keys
are 2-tuples of variables of the model and values are quadratic biases
associated with the pair of variables (the interaction).
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, float): A 2-tuple containing:
dict: QUBO coefficients.
float: New energy offset.
Examples:
This example converts an Ising problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to a QUBO problem.
>>> import dimod
>>> h = {1: 1, 2: 1}
>>> J = {(1, 2): 1}
>>> dimod.ising_to_qubo(h, J, 0.5) # doctest: +SKIP
({(1, 1): 0.0, (1, 2): 4.0, (2, 2): 0.0}, -0.5)
### Response:
def ising_to_qubo(h, J, offset=0.0):
"""Convert an Ising problem to a QUBO problem.
Map an Ising model defined on spins (variables with {-1, +1} values) to quadratic
unconstrained binary optimization (QUBO) formulation :math:`x' Q x` defined over
binary variables (0 or 1 values), where the linear term is contained along the diagonal of Q.
Return matrix Q that defines the model as well as the offset in energy between the two
problem formulations:
.. math::
s' J s + h' s = offset + x' Q x
See :meth:`~dimod.utilities.qubo_to_ising` for the inverse function.
Args:
h (dict[variable, bias]):
Linear biases as a dict of the form {v: bias, ...}, where keys are variables of
the model and values are biases.
J (dict[(variable, variable), bias]):
Quadratic biases as a dict of the form {(u, v): bias, ...}, where keys
are 2-tuples of variables of the model and values are quadratic biases
associated with the pair of variables (the interaction).
offset (numeric, optional, default=0):
Constant offset to be applied to the energy. Default 0.
Returns:
(dict, float): A 2-tuple containing:
dict: QUBO coefficients.
float: New energy offset.
Examples:
This example converts an Ising problem of two variables that have positive
biases of value 1 and are positively coupled with an interaction of value 1
to a QUBO problem.
>>> import dimod
>>> h = {1: 1, 2: 1}
>>> J = {(1, 2): 1}
>>> dimod.ising_to_qubo(h, J, 0.5) # doctest: +SKIP
({(1, 1): 0.0, (1, 2): 4.0, (2, 2): 0.0}, -0.5)
"""
# the linear biases are the easiest
q = {(v, v): 2. * bias for v, bias in iteritems(h)}
# next the quadratic biases
for (u, v), bias in iteritems(J):
if bias == 0.0:
continue
q[(u, v)] = 4. * bias
q[(u, u)] -= 2. * bias
q[(v, v)] -= 2. * bias
# finally calculate the offset
offset += sum(itervalues(J)) - sum(itervalues(h))
return q, offset |
def _AddClearFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ClearField(self, field_name):
try:
field = message_descriptor.fields_by_name[field_name]
except KeyError:
try:
field = message_descriptor.oneofs_by_name[field_name]
if field in self._oneofs:
field = self._oneofs[field]
else:
return
except KeyError:
raise ValueError('Protocol message %s() has no "%s" field.' %
(message_descriptor.name, field_name))
if field in self._fields:
# To match the C++ implementation, we need to invalidate iterators
# for map fields when ClearField() happens.
if hasattr(self._fields[field], 'InvalidateIterators'):
self._fields[field].InvalidateIterators()
# Note: If the field is a sub-message, its listener will still point
# at us. That's fine, because the worst than can happen is that it
# will call _Modified() and invalidate our byte size. Big deal.
del self._fields[field]
if self._oneofs.get(field.containing_oneof, None) is field:
del self._oneofs[field.containing_oneof]
# Always call _Modified() -- even if nothing was changed, this is
# a mutating method, and thus calling it should cause the field to become
# present in the parent message.
self._Modified()
cls.ClearField = ClearField | Helper for _AddMessageMethods(). | Below is the the instruction that describes the task:
### Input:
Helper for _AddMessageMethods().
### Response:
def _AddClearFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ClearField(self, field_name):
try:
field = message_descriptor.fields_by_name[field_name]
except KeyError:
try:
field = message_descriptor.oneofs_by_name[field_name]
if field in self._oneofs:
field = self._oneofs[field]
else:
return
except KeyError:
raise ValueError('Protocol message %s() has no "%s" field.' %
(message_descriptor.name, field_name))
if field in self._fields:
# To match the C++ implementation, we need to invalidate iterators
# for map fields when ClearField() happens.
if hasattr(self._fields[field], 'InvalidateIterators'):
self._fields[field].InvalidateIterators()
# Note: If the field is a sub-message, its listener will still point
# at us. That's fine, because the worst than can happen is that it
# will call _Modified() and invalidate our byte size. Big deal.
del self._fields[field]
if self._oneofs.get(field.containing_oneof, None) is field:
del self._oneofs[field.containing_oneof]
# Always call _Modified() -- even if nothing was changed, this is
# a mutating method, and thus calling it should cause the field to become
# present in the parent message.
self._Modified()
cls.ClearField = ClearField |
def log(self, from_date=None, to_date=None, branches=None, encoding='utf-8'):
"""Read the commit log from the repository.
The method returns the Git log of the repository using the
following options:
git log --raw --numstat --pretty=fuller --decorate=full
--all --reverse --topo-order --parents -M -C -c
--remotes=origin
When `from_date` is given, it gets the commits equal or older
than that date. This date is given in a datetime object.
The list of branches is a list of strings, with the names of the
branches to fetch. If the list of branches is empty, no commit
is fetched. If the list of branches is None, all commits
for all branches will be fetched.
:param from_date: fetch commits newer than a specific
date (inclusive)
:param branches: names of branches to fetch from (default: None)
:param encoding: encode the log using this format
:returns: a generator where each item is a line from the log
:raises EmptyRepositoryError: when the repository is empty and
the action cannot be performed
:raises RepositoryError: when an error occurs fetching the log
"""
if self.is_empty():
logger.warning("Git %s repository is empty; unable to get the log",
self.uri)
raise EmptyRepositoryError(repository=self.uri)
cmd_log = ['git', 'log', '--reverse', '--topo-order']
cmd_log.extend(self.GIT_PRETTY_OUTPUT_OPTS)
if from_date:
dt = from_date.strftime("%Y-%m-%d %H:%M:%S %z")
cmd_log.append('--since=' + dt)
if to_date:
dt = to_date.strftime("%Y-%m-%d %H:%M:%S %z")
cmd_log.append('--until=' + dt)
if branches is None:
cmd_log.extend(['--branches', '--tags', '--remotes=origin'])
elif len(branches) == 0:
cmd_log.append('--max-count=0')
else:
branches = ['refs/heads/' + branch for branch in branches]
cmd_log.extend(branches)
for line in self._exec_nb(cmd_log, cwd=self.dirpath, env=self.gitenv):
yield line
logger.debug("Git log fetched from %s repository (%s)",
self.uri, self.dirpath) | Read the commit log from the repository.
The method returns the Git log of the repository using the
following options:
git log --raw --numstat --pretty=fuller --decorate=full
--all --reverse --topo-order --parents -M -C -c
--remotes=origin
When `from_date` is given, it gets the commits equal or older
than that date. This date is given in a datetime object.
The list of branches is a list of strings, with the names of the
branches to fetch. If the list of branches is empty, no commit
is fetched. If the list of branches is None, all commits
for all branches will be fetched.
:param from_date: fetch commits newer than a specific
date (inclusive)
:param branches: names of branches to fetch from (default: None)
:param encoding: encode the log using this format
:returns: a generator where each item is a line from the log
:raises EmptyRepositoryError: when the repository is empty and
the action cannot be performed
:raises RepositoryError: when an error occurs fetching the log | Below is the the instruction that describes the task:
### Input:
Read the commit log from the repository.
The method returns the Git log of the repository using the
following options:
git log --raw --numstat --pretty=fuller --decorate=full
--all --reverse --topo-order --parents -M -C -c
--remotes=origin
When `from_date` is given, it gets the commits equal or older
than that date. This date is given in a datetime object.
The list of branches is a list of strings, with the names of the
branches to fetch. If the list of branches is empty, no commit
is fetched. If the list of branches is None, all commits
for all branches will be fetched.
:param from_date: fetch commits newer than a specific
date (inclusive)
:param branches: names of branches to fetch from (default: None)
:param encoding: encode the log using this format
:returns: a generator where each item is a line from the log
:raises EmptyRepositoryError: when the repository is empty and
the action cannot be performed
:raises RepositoryError: when an error occurs fetching the log
### Response:
def log(self, from_date=None, to_date=None, branches=None, encoding='utf-8'):
"""Read the commit log from the repository.
The method returns the Git log of the repository using the
following options:
git log --raw --numstat --pretty=fuller --decorate=full
--all --reverse --topo-order --parents -M -C -c
--remotes=origin
When `from_date` is given, it gets the commits equal or older
than that date. This date is given in a datetime object.
The list of branches is a list of strings, with the names of the
branches to fetch. If the list of branches is empty, no commit
is fetched. If the list of branches is None, all commits
for all branches will be fetched.
:param from_date: fetch commits newer than a specific
date (inclusive)
:param branches: names of branches to fetch from (default: None)
:param encoding: encode the log using this format
:returns: a generator where each item is a line from the log
:raises EmptyRepositoryError: when the repository is empty and
the action cannot be performed
:raises RepositoryError: when an error occurs fetching the log
"""
if self.is_empty():
logger.warning("Git %s repository is empty; unable to get the log",
self.uri)
raise EmptyRepositoryError(repository=self.uri)
cmd_log = ['git', 'log', '--reverse', '--topo-order']
cmd_log.extend(self.GIT_PRETTY_OUTPUT_OPTS)
if from_date:
dt = from_date.strftime("%Y-%m-%d %H:%M:%S %z")
cmd_log.append('--since=' + dt)
if to_date:
dt = to_date.strftime("%Y-%m-%d %H:%M:%S %z")
cmd_log.append('--until=' + dt)
if branches is None:
cmd_log.extend(['--branches', '--tags', '--remotes=origin'])
elif len(branches) == 0:
cmd_log.append('--max-count=0')
else:
branches = ['refs/heads/' + branch for branch in branches]
cmd_log.extend(branches)
for line in self._exec_nb(cmd_log, cwd=self.dirpath, env=self.gitenv):
yield line
logger.debug("Git log fetched from %s repository (%s)",
self.uri, self.dirpath) |
def log_transition(self, transition, from_state, instance, *args, **kwargs):
"""Generic transition logging."""
save = kwargs.pop('save', True)
log = kwargs.pop('log', True)
super(Workflow, self).log_transition(
transition, from_state, instance, *args, **kwargs)
if save:
instance.save()
if log:
self.db_log(transition, from_state, instance, *args, **kwargs) | Generic transition logging. | Below is the the instruction that describes the task:
### Input:
Generic transition logging.
### Response:
def log_transition(self, transition, from_state, instance, *args, **kwargs):
"""Generic transition logging."""
save = kwargs.pop('save', True)
log = kwargs.pop('log', True)
super(Workflow, self).log_transition(
transition, from_state, instance, *args, **kwargs)
if save:
instance.save()
if log:
self.db_log(transition, from_state, instance, *args, **kwargs) |
def get_requisite_objectives(self, objective_id):
"""Gets a list of ``Objectives`` that are the immediate requisites for the given ``Objective``.
In plenary mode, the returned list contains all of the immediate
requisites, or an error results if an ``Objective`` is not found
or inaccessible. Otherwise, inaccessible ``Objectives`` may be
omitted from the list and may present the elements in any order
including returning a unique set.
arg: objective_id (osid.id.Id): ``Id`` of the ``Objective``
return: (osid.learning.ObjectiveList) - the returned requisite
``Objectives``
raise: NotFound - ``objective_id`` not found
raise: NullArgument - ``objective_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.learning.ObjectiveRequisiteSession.get_requisite_objectives_template
# NOTE: This implementation currently ignores plenary view
requisite_type = Type(**Relationship().get_type_data('OBJECTIVE.REQUISITE'))
relm = self._get_provider_manager('RELATIONSHIP')
rls = relm.get_relationship_lookup_session(proxy=self._proxy)
rls.use_federated_family_view()
requisite_relationships = rls.get_relationships_by_genus_type_for_source(objective_id,
requisite_type)
destination_ids = [ObjectId(r.get_destination_id().identifier)
for r in requisite_relationships]
collection = JSONClientValidated('learning',
collection='Objective',
runtime=self._runtime)
result = collection.find({'_id': {'$in': destination_ids}})
return objects.ObjectiveList(result, runtime=self._runtime) | Gets a list of ``Objectives`` that are the immediate requisites for the given ``Objective``.
In plenary mode, the returned list contains all of the immediate
requisites, or an error results if an ``Objective`` is not found
or inaccessible. Otherwise, inaccessible ``Objectives`` may be
omitted from the list and may present the elements in any order
including returning a unique set.
arg: objective_id (osid.id.Id): ``Id`` of the ``Objective``
return: (osid.learning.ObjectiveList) - the returned requisite
``Objectives``
raise: NotFound - ``objective_id`` not found
raise: NullArgument - ``objective_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets a list of ``Objectives`` that are the immediate requisites for the given ``Objective``.
In plenary mode, the returned list contains all of the immediate
requisites, or an error results if an ``Objective`` is not found
or inaccessible. Otherwise, inaccessible ``Objectives`` may be
omitted from the list and may present the elements in any order
including returning a unique set.
arg: objective_id (osid.id.Id): ``Id`` of the ``Objective``
return: (osid.learning.ObjectiveList) - the returned requisite
``Objectives``
raise: NotFound - ``objective_id`` not found
raise: NullArgument - ``objective_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
### Response:
def get_requisite_objectives(self, objective_id):
"""Gets a list of ``Objectives`` that are the immediate requisites for the given ``Objective``.
In plenary mode, the returned list contains all of the immediate
requisites, or an error results if an ``Objective`` is not found
or inaccessible. Otherwise, inaccessible ``Objectives`` may be
omitted from the list and may present the elements in any order
including returning a unique set.
arg: objective_id (osid.id.Id): ``Id`` of the ``Objective``
return: (osid.learning.ObjectiveList) - the returned requisite
``Objectives``
raise: NotFound - ``objective_id`` not found
raise: NullArgument - ``objective_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.learning.ObjectiveRequisiteSession.get_requisite_objectives_template
# NOTE: This implementation currently ignores plenary view
requisite_type = Type(**Relationship().get_type_data('OBJECTIVE.REQUISITE'))
relm = self._get_provider_manager('RELATIONSHIP')
rls = relm.get_relationship_lookup_session(proxy=self._proxy)
rls.use_federated_family_view()
requisite_relationships = rls.get_relationships_by_genus_type_for_source(objective_id,
requisite_type)
destination_ids = [ObjectId(r.get_destination_id().identifier)
for r in requisite_relationships]
collection = JSONClientValidated('learning',
collection='Objective',
runtime=self._runtime)
result = collection.find({'_id': {'$in': destination_ids}})
return objects.ObjectiveList(result, runtime=self._runtime) |
def find_1wf_files(self):
"""
Abinit adds the idir-ipert index at the end of the 1WF file and this breaks the extension
e.g. out_1WF4. This method scans the files in the directories and returns a list of namedtuple
Each named tuple gives the `path` of the 1FK file and the `pertcase` index.
"""
regex = re.compile(r"out_1WF(\d+)(\.nc)?$")
wf_paths = [f for f in self.list_filepaths() if regex.match(os.path.basename(f))]
if not wf_paths: return None
# Build list of (pertcase, path) tuples.
pertfile_list = []
for path in wf_paths:
name = os.path.basename(path)
match = regex.match(name)
pertcase, ncext = match.groups()
pertfile_list.append((int(pertcase), path))
# DSU sort.
pertfile_list = sorted(pertfile_list, key=lambda t: t[0])
return [dict2namedtuple(pertcase=item[0], path=item[1]) for item in pertfile_list] | Abinit adds the idir-ipert index at the end of the 1WF file and this breaks the extension
e.g. out_1WF4. This method scans the files in the directories and returns a list of namedtuple
Each named tuple gives the `path` of the 1FK file and the `pertcase` index. | Below is the the instruction that describes the task:
### Input:
Abinit adds the idir-ipert index at the end of the 1WF file and this breaks the extension
e.g. out_1WF4. This method scans the files in the directories and returns a list of namedtuple
Each named tuple gives the `path` of the 1FK file and the `pertcase` index.
### Response:
def find_1wf_files(self):
"""
Abinit adds the idir-ipert index at the end of the 1WF file and this breaks the extension
e.g. out_1WF4. This method scans the files in the directories and returns a list of namedtuple
Each named tuple gives the `path` of the 1FK file and the `pertcase` index.
"""
regex = re.compile(r"out_1WF(\d+)(\.nc)?$")
wf_paths = [f for f in self.list_filepaths() if regex.match(os.path.basename(f))]
if not wf_paths: return None
# Build list of (pertcase, path) tuples.
pertfile_list = []
for path in wf_paths:
name = os.path.basename(path)
match = regex.match(name)
pertcase, ncext = match.groups()
pertfile_list.append((int(pertcase), path))
# DSU sort.
pertfile_list = sorted(pertfile_list, key=lambda t: t[0])
return [dict2namedtuple(pertcase=item[0], path=item[1]) for item in pertfile_list] |
def read_resource_list(self, uri):
"""Read resource list from specified URI else raise exception."""
self.logger.info("Reading resource list %s" % (uri))
try:
resource_list = ResourceList(allow_multifile=self.allow_multifile,
mapper=self.mapper)
resource_list.read(uri=uri)
except Exception as e:
raise ClientError("Can't read source resource list from %s (%s)" %
(uri, str(e)))
self.logger.debug("Finished reading resource list")
return(resource_list) | Read resource list from specified URI else raise exception. | Below is the the instruction that describes the task:
### Input:
Read resource list from specified URI else raise exception.
### Response:
def read_resource_list(self, uri):
"""Read resource list from specified URI else raise exception."""
self.logger.info("Reading resource list %s" % (uri))
try:
resource_list = ResourceList(allow_multifile=self.allow_multifile,
mapper=self.mapper)
resource_list.read(uri=uri)
except Exception as e:
raise ClientError("Can't read source resource list from %s (%s)" %
(uri, str(e)))
self.logger.debug("Finished reading resource list")
return(resource_list) |
def inside(points, polygons, short_circuit='any', precision=0.001):
"""
Test whether each of the points is within the given set of polygons.
Parameters
----------
points : array-like[N][2] or list of array-like[N][2]
Coordinates of the points to be tested or groups of points to be
tested together.
polygons : polygon or array-like
Polygons to be tested against. Must be a ``PolygonSet``,
``CellReference``, ``CellArray``, or an array. The array may
contain any of the previous objects or an array-like[N][2] of
vertices of a polygon.
short_circuit : {'any', 'all'}
If `points` is a list of point groups, testing within each group
will be short-circuited if any of the points in the group is
inside ('any') or outside ('all') the polygons. If `points` is
simply a list of points, this parameter has no effect.
precision : float
Desired precision for rounding vertice coordinates.
Returns
-------
out : tuple
Tuple of booleans indicating if each of the points or point
groups is inside the set of polygons.
"""
poly = []
if isinstance(polygons, PolygonSet):
poly.extend(polygons.polygons)
elif isinstance(polygons, CellReference) or isinstance(
polygons, CellArray):
poly.extend(polygons.get_polygons())
else:
for obj in polygons:
if isinstance(obj, PolygonSet):
poly.extend(obj.polygons)
elif isinstance(obj, CellReference) or isinstance(obj, CellArray):
poly.extend(obj.get_polygons())
else:
poly.append(obj)
if hasattr(points[0][0], '__iter__'):
pts = points
sc = 1 if short_circuit == 'any' else -1
else:
pts = (points, )
sc = 0
return clipper.inside(pts, poly, sc, 1 / precision) | Test whether each of the points is within the given set of polygons.
Parameters
----------
points : array-like[N][2] or list of array-like[N][2]
Coordinates of the points to be tested or groups of points to be
tested together.
polygons : polygon or array-like
Polygons to be tested against. Must be a ``PolygonSet``,
``CellReference``, ``CellArray``, or an array. The array may
contain any of the previous objects or an array-like[N][2] of
vertices of a polygon.
short_circuit : {'any', 'all'}
If `points` is a list of point groups, testing within each group
will be short-circuited if any of the points in the group is
inside ('any') or outside ('all') the polygons. If `points` is
simply a list of points, this parameter has no effect.
precision : float
Desired precision for rounding vertice coordinates.
Returns
-------
out : tuple
Tuple of booleans indicating if each of the points or point
groups is inside the set of polygons. | Below is the the instruction that describes the task:
### Input:
Test whether each of the points is within the given set of polygons.
Parameters
----------
points : array-like[N][2] or list of array-like[N][2]
Coordinates of the points to be tested or groups of points to be
tested together.
polygons : polygon or array-like
Polygons to be tested against. Must be a ``PolygonSet``,
``CellReference``, ``CellArray``, or an array. The array may
contain any of the previous objects or an array-like[N][2] of
vertices of a polygon.
short_circuit : {'any', 'all'}
If `points` is a list of point groups, testing within each group
will be short-circuited if any of the points in the group is
inside ('any') or outside ('all') the polygons. If `points` is
simply a list of points, this parameter has no effect.
precision : float
Desired precision for rounding vertice coordinates.
Returns
-------
out : tuple
Tuple of booleans indicating if each of the points or point
groups is inside the set of polygons.
### Response:
def inside(points, polygons, short_circuit='any', precision=0.001):
"""
Test whether each of the points is within the given set of polygons.
Parameters
----------
points : array-like[N][2] or list of array-like[N][2]
Coordinates of the points to be tested or groups of points to be
tested together.
polygons : polygon or array-like
Polygons to be tested against. Must be a ``PolygonSet``,
``CellReference``, ``CellArray``, or an array. The array may
contain any of the previous objects or an array-like[N][2] of
vertices of a polygon.
short_circuit : {'any', 'all'}
If `points` is a list of point groups, testing within each group
will be short-circuited if any of the points in the group is
inside ('any') or outside ('all') the polygons. If `points` is
simply a list of points, this parameter has no effect.
precision : float
Desired precision for rounding vertice coordinates.
Returns
-------
out : tuple
Tuple of booleans indicating if each of the points or point
groups is inside the set of polygons.
"""
poly = []
if isinstance(polygons, PolygonSet):
poly.extend(polygons.polygons)
elif isinstance(polygons, CellReference) or isinstance(
polygons, CellArray):
poly.extend(polygons.get_polygons())
else:
for obj in polygons:
if isinstance(obj, PolygonSet):
poly.extend(obj.polygons)
elif isinstance(obj, CellReference) or isinstance(obj, CellArray):
poly.extend(obj.get_polygons())
else:
poly.append(obj)
if hasattr(points[0][0], '__iter__'):
pts = points
sc = 1 if short_circuit == 'any' else -1
else:
pts = (points, )
sc = 0
return clipper.inside(pts, poly, sc, 1 / precision) |
def _cov_for(self, imls):
"""
Clip `imls` to the range associated with the support of the
vulnerability function and returns the corresponding
covariance values by linear interpolation. For instance
if the range is [0.005, 0.0269] and the imls are
[0.0049, 0.006, 0.027], the clipped imls are
[0.005, 0.006, 0.0269].
"""
return self._covs_i1d(
numpy.piecewise(
imls,
[imls > self.imls[-1], imls < self.imls[0]],
[self.imls[-1], self.imls[0], lambda x: x])) | Clip `imls` to the range associated with the support of the
vulnerability function and returns the corresponding
covariance values by linear interpolation. For instance
if the range is [0.005, 0.0269] and the imls are
[0.0049, 0.006, 0.027], the clipped imls are
[0.005, 0.006, 0.0269]. | Below is the the instruction that describes the task:
### Input:
Clip `imls` to the range associated with the support of the
vulnerability function and returns the corresponding
covariance values by linear interpolation. For instance
if the range is [0.005, 0.0269] and the imls are
[0.0049, 0.006, 0.027], the clipped imls are
[0.005, 0.006, 0.0269].
### Response:
def _cov_for(self, imls):
"""
Clip `imls` to the range associated with the support of the
vulnerability function and returns the corresponding
covariance values by linear interpolation. For instance
if the range is [0.005, 0.0269] and the imls are
[0.0049, 0.006, 0.027], the clipped imls are
[0.005, 0.006, 0.0269].
"""
return self._covs_i1d(
numpy.piecewise(
imls,
[imls > self.imls[-1], imls < self.imls[0]],
[self.imls[-1], self.imls[0], lambda x: x])) |
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
} | Return facts of the device. | Below is the the instruction that describes the task:
### Input:
Return facts of the device.
### Response:
def get_facts(self):
"""Return facts of the device."""
output = self.device.facts
uptime = self.device.uptime or -1
interfaces = junos_views.junos_iface_table(self.device)
interfaces.get()
interface_list = interfaces.keys()
return {
'vendor': u'Juniper',
'model': py23_compat.text_type(output['model']),
'serial_number': py23_compat.text_type(output['serialnumber']),
'os_version': py23_compat.text_type(output['version']),
'hostname': py23_compat.text_type(output['hostname']),
'fqdn': py23_compat.text_type(output['fqdn']),
'uptime': uptime,
'interface_list': interface_list
} |
def get_root_families(self):
"""Gets the root families in the family hierarchy.
A node with no parents is an orphan. While all family ``Ids``
are known to the hierarchy, an orphan does not appear in the
hierarchy unless explicitly added as a root node or child of
another node.
return: (osid.relationship.FamilyList) - the root families
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_root_bins
if self._catalog_session is not None:
return self._catalog_session.get_root_catalogs()
return FamilyLookupSession(
self._proxy,
self._runtime).get_families_by_ids(list(self.get_root_family_ids())) | Gets the root families in the family hierarchy.
A node with no parents is an orphan. While all family ``Ids``
are known to the hierarchy, an orphan does not appear in the
hierarchy unless explicitly added as a root node or child of
another node.
return: (osid.relationship.FamilyList) - the root families
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the root families in the family hierarchy.
A node with no parents is an orphan. While all family ``Ids``
are known to the hierarchy, an orphan does not appear in the
hierarchy unless explicitly added as a root node or child of
another node.
return: (osid.relationship.FamilyList) - the root families
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
### Response:
def get_root_families(self):
"""Gets the root families in the family hierarchy.
A node with no parents is an orphan. While all family ``Ids``
are known to the hierarchy, an orphan does not appear in the
hierarchy unless explicitly added as a root node or child of
another node.
return: (osid.relationship.FamilyList) - the root families
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_root_bins
if self._catalog_session is not None:
return self._catalog_session.get_root_catalogs()
return FamilyLookupSession(
self._proxy,
self._runtime).get_families_by_ids(list(self.get_root_family_ids())) |
def detail_view(self, request, module, preview):
"""
Looks up a preview in the index, returning a detail view response.
"""
try:
preview = self.__previews[module][preview]
except KeyError:
raise Http404 # The provided module/preview does not exist in the index.
return preview.detail_view(request) | Looks up a preview in the index, returning a detail view response. | Below is the the instruction that describes the task:
### Input:
Looks up a preview in the index, returning a detail view response.
### Response:
def detail_view(self, request, module, preview):
"""
Looks up a preview in the index, returning a detail view response.
"""
try:
preview = self.__previews[module][preview]
except KeyError:
raise Http404 # The provided module/preview does not exist in the index.
return preview.detail_view(request) |
def query_metric_stats(self, metric_type, metric_id=None, start=None, end=None, bucketDuration=None, **query_options):
"""
Query for metric aggregates from the server. This is called buckets in the Hawkular-Metrics documentation.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id or None for tags matching only
:param start: Milliseconds since epoch or datetime instance
:param end: Milliseconds since epoch or datetime instance
:param bucketDuration: The timedelta or duration of buckets. Can be a string presentation or timedelta object
:param query_options: For possible query_options, see the Hawkular-Metrics documentation.
"""
if start is not None:
if type(start) is datetime:
query_options['start'] = datetime_to_time_millis(start)
else:
query_options['start'] = start
if end is not None:
if type(end) is datetime:
query_options['end'] = datetime_to_time_millis(end)
else:
query_options['end'] = end
if bucketDuration is not None:
if type(bucketDuration) is timedelta:
query_options['bucketDuration'] = timedelta_to_duration(bucketDuration)
else:
query_options['bucketDuration'] = bucketDuration
if metric_id is not None:
url = self._get_metrics_stats_url(self._get_metrics_single_url(metric_type, metric_id))
else:
if len(query_options) < 0:
raise HawkularError('Tags are required when querying without metric_id')
url = self._get_metrics_stats_url(self._get_url(metric_type))
return self._get(url, **query_options) | Query for metric aggregates from the server. This is called buckets in the Hawkular-Metrics documentation.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id or None for tags matching only
:param start: Milliseconds since epoch or datetime instance
:param end: Milliseconds since epoch or datetime instance
:param bucketDuration: The timedelta or duration of buckets. Can be a string presentation or timedelta object
:param query_options: For possible query_options, see the Hawkular-Metrics documentation. | Below is the the instruction that describes the task:
### Input:
Query for metric aggregates from the server. This is called buckets in the Hawkular-Metrics documentation.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id or None for tags matching only
:param start: Milliseconds since epoch or datetime instance
:param end: Milliseconds since epoch or datetime instance
:param bucketDuration: The timedelta or duration of buckets. Can be a string presentation or timedelta object
:param query_options: For possible query_options, see the Hawkular-Metrics documentation.
### Response:
def query_metric_stats(self, metric_type, metric_id=None, start=None, end=None, bucketDuration=None, **query_options):
"""
Query for metric aggregates from the server. This is called buckets in the Hawkular-Metrics documentation.
:param metric_type: MetricType to be matched (required)
:param metric_id: Exact string matching metric id or None for tags matching only
:param start: Milliseconds since epoch or datetime instance
:param end: Milliseconds since epoch or datetime instance
:param bucketDuration: The timedelta or duration of buckets. Can be a string presentation or timedelta object
:param query_options: For possible query_options, see the Hawkular-Metrics documentation.
"""
if start is not None:
if type(start) is datetime:
query_options['start'] = datetime_to_time_millis(start)
else:
query_options['start'] = start
if end is not None:
if type(end) is datetime:
query_options['end'] = datetime_to_time_millis(end)
else:
query_options['end'] = end
if bucketDuration is not None:
if type(bucketDuration) is timedelta:
query_options['bucketDuration'] = timedelta_to_duration(bucketDuration)
else:
query_options['bucketDuration'] = bucketDuration
if metric_id is not None:
url = self._get_metrics_stats_url(self._get_metrics_single_url(metric_type, metric_id))
else:
if len(query_options) < 0:
raise HawkularError('Tags are required when querying without metric_id')
url = self._get_metrics_stats_url(self._get_url(metric_type))
return self._get(url, **query_options) |
def post_has_mime_parts(request, parts):
"""Validate that a MMP POST contains all required sections.
:param request: Django Request
:param parts: [(part_type, part_name), ...]
:return: None or raises exception.
Where information is stored in the request:
part_type header: request.META['HTTP_<UPPER CASE NAME>']
part_type file: request.FILES['<name>']
part_type field: request.POST['<name>']
"""
missing = []
for part_type, part_name in parts:
if part_type == 'header':
if 'HTTP_' + part_name.upper() not in request.META:
missing.append('{}: {}'.format(part_type, part_name))
elif part_type == 'file':
if part_name not in list(request.FILES.keys()):
missing.append('{}: {}'.format(part_type, part_name))
elif part_type == 'field':
if part_name not in list(request.POST.keys()):
missing.append('{}: {}'.format(part_type, part_name))
else:
raise d1_common.types.exceptions.ServiceFailure(
0, 'Invalid part_type. part_type="{}"'.format(part_type)
)
if len(missing) > 0:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Missing part(s) in MIME Multipart document. missing="{}"'.format(
', '.join(missing)
),
) | Validate that a MMP POST contains all required sections.
:param request: Django Request
:param parts: [(part_type, part_name), ...]
:return: None or raises exception.
Where information is stored in the request:
part_type header: request.META['HTTP_<UPPER CASE NAME>']
part_type file: request.FILES['<name>']
part_type field: request.POST['<name>'] | Below is the the instruction that describes the task:
### Input:
Validate that a MMP POST contains all required sections.
:param request: Django Request
:param parts: [(part_type, part_name), ...]
:return: None or raises exception.
Where information is stored in the request:
part_type header: request.META['HTTP_<UPPER CASE NAME>']
part_type file: request.FILES['<name>']
part_type field: request.POST['<name>']
### Response:
def post_has_mime_parts(request, parts):
"""Validate that a MMP POST contains all required sections.
:param request: Django Request
:param parts: [(part_type, part_name), ...]
:return: None or raises exception.
Where information is stored in the request:
part_type header: request.META['HTTP_<UPPER CASE NAME>']
part_type file: request.FILES['<name>']
part_type field: request.POST['<name>']
"""
missing = []
for part_type, part_name in parts:
if part_type == 'header':
if 'HTTP_' + part_name.upper() not in request.META:
missing.append('{}: {}'.format(part_type, part_name))
elif part_type == 'file':
if part_name not in list(request.FILES.keys()):
missing.append('{}: {}'.format(part_type, part_name))
elif part_type == 'field':
if part_name not in list(request.POST.keys()):
missing.append('{}: {}'.format(part_type, part_name))
else:
raise d1_common.types.exceptions.ServiceFailure(
0, 'Invalid part_type. part_type="{}"'.format(part_type)
)
if len(missing) > 0:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Missing part(s) in MIME Multipart document. missing="{}"'.format(
', '.join(missing)
),
) |
def list_items(cls, repo, *args, **kwargs):
"""
Find all items of this type - subclasses can specify args and kwargs differently.
If no args are given, subclasses are obliged to return all items if no additional
arguments arg given.
:note: Favor the iter_items method as it will
:return:list(Item,...) list of item instances"""
out_list = IterableList(cls._id_attribute_)
out_list.extend(cls.iter_items(repo, *args, **kwargs))
return out_list | Find all items of this type - subclasses can specify args and kwargs differently.
If no args are given, subclasses are obliged to return all items if no additional
arguments arg given.
:note: Favor the iter_items method as it will
:return:list(Item,...) list of item instances | Below is the the instruction that describes the task:
### Input:
Find all items of this type - subclasses can specify args and kwargs differently.
If no args are given, subclasses are obliged to return all items if no additional
arguments arg given.
:note: Favor the iter_items method as it will
:return:list(Item,...) list of item instances
### Response:
def list_items(cls, repo, *args, **kwargs):
"""
Find all items of this type - subclasses can specify args and kwargs differently.
If no args are given, subclasses are obliged to return all items if no additional
arguments arg given.
:note: Favor the iter_items method as it will
:return:list(Item,...) list of item instances"""
out_list = IterableList(cls._id_attribute_)
out_list.extend(cls.iter_items(repo, *args, **kwargs))
return out_list |
def uri_tree_encode(uri_tree, type_host = HOST_REG_NAME):
"""
Percent/Query encode a raw URI tree.
"""
scheme, authority, path, query, fragment = uri_tree
if authority:
user, passwd, host, port = authority
if user:
user = pct_encode(user, USER_ENCDCT)
if passwd:
passwd = pct_encode(passwd, PASSWD_ENCDCT)
if host and type_host == HOST_REG_NAME:
host = pct_encode(host, REG_NAME_ENCDCT)
if isinstance(port, (int, long)):
port = str(port)
authority = (user, passwd, host, port)
if path:
path = pct_encode(path, P_ENCDCT)
if (not authority) and (not scheme):
# check for path-noscheme special case
sppath = path.split('/', 1)
if ':' in sppath[0]:
sppath[0] = sppath[0].replace(':', '%3A')
path = '/'.join(sppath)
if query:
query = tuple([(query_elt_encode(x, QUERY_KEY_ENCDCT),
query_elt_encode(y, QUERY_VAL_ENCDCT)) for (x, y) in query])
if fragment:
fragment = pct_encode(fragment, FRAG_ENCDCT)
return (scheme, authority, path, query, fragment) | Percent/Query encode a raw URI tree. | Below is the the instruction that describes the task:
### Input:
Percent/Query encode a raw URI tree.
### Response:
def uri_tree_encode(uri_tree, type_host = HOST_REG_NAME):
"""
Percent/Query encode a raw URI tree.
"""
scheme, authority, path, query, fragment = uri_tree
if authority:
user, passwd, host, port = authority
if user:
user = pct_encode(user, USER_ENCDCT)
if passwd:
passwd = pct_encode(passwd, PASSWD_ENCDCT)
if host and type_host == HOST_REG_NAME:
host = pct_encode(host, REG_NAME_ENCDCT)
if isinstance(port, (int, long)):
port = str(port)
authority = (user, passwd, host, port)
if path:
path = pct_encode(path, P_ENCDCT)
if (not authority) and (not scheme):
# check for path-noscheme special case
sppath = path.split('/', 1)
if ':' in sppath[0]:
sppath[0] = sppath[0].replace(':', '%3A')
path = '/'.join(sppath)
if query:
query = tuple([(query_elt_encode(x, QUERY_KEY_ENCDCT),
query_elt_encode(y, QUERY_VAL_ENCDCT)) for (x, y) in query])
if fragment:
fragment = pct_encode(fragment, FRAG_ENCDCT)
return (scheme, authority, path, query, fragment) |
def bb2hw(a:Collection[int])->np.ndarray:
"Convert bounding box points from (width,height,center) to (height,width,top,left)."
return np.array([a[1],a[0],a[3]-a[1],a[2]-a[0]]) | Convert bounding box points from (width,height,center) to (height,width,top,left). | Below is the the instruction that describes the task:
### Input:
Convert bounding box points from (width,height,center) to (height,width,top,left).
### Response:
def bb2hw(a:Collection[int])->np.ndarray:
"Convert bounding box points from (width,height,center) to (height,width,top,left)."
return np.array([a[1],a[0],a[3]-a[1],a[2]-a[0]]) |
def tags():
# type: () -> List[str]
""" Returns all tags in the repo.
Returns:
list[str]: List of all tags in the repo, sorted as versions.
All tags returned by this function will be parsed as if the contained
versions (using ``v:refname`` sorting).
"""
return shell.run(
'git tag --sort=v:refname',
capture=True,
never_pretend=True
).stdout.strip().splitlines() | Returns all tags in the repo.
Returns:
list[str]: List of all tags in the repo, sorted as versions.
All tags returned by this function will be parsed as if the contained
versions (using ``v:refname`` sorting). | Below is the the instruction that describes the task:
### Input:
Returns all tags in the repo.
Returns:
list[str]: List of all tags in the repo, sorted as versions.
All tags returned by this function will be parsed as if the contained
versions (using ``v:refname`` sorting).
### Response:
def tags():
# type: () -> List[str]
""" Returns all tags in the repo.
Returns:
list[str]: List of all tags in the repo, sorted as versions.
All tags returned by this function will be parsed as if the contained
versions (using ``v:refname`` sorting).
"""
return shell.run(
'git tag --sort=v:refname',
capture=True,
never_pretend=True
).stdout.strip().splitlines() |
def match(tgt, nodegroups=None, opts=None):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if not opts:
opts = __opts__
if not nodegroups:
log.debug('Nodegroup matcher called with no nodegroups.')
return False
if tgt in nodegroups:
matchers = salt.loader.matchers(opts)
return matchers['compound_match.match'](
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False | This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states | Below is the the instruction that describes the task:
### Input:
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
### Response:
def match(tgt, nodegroups=None, opts=None):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if not opts:
opts = __opts__
if not nodegroups:
log.debug('Nodegroup matcher called with no nodegroups.')
return False
if tgt in nodegroups:
matchers = salt.loader.matchers(opts)
return matchers['compound_match.match'](
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False |
def _cbGotHello(self, busName):
"""
Called in reply to the initial Hello remote method invocation
"""
self.busName = busName
# print 'Connection Bus Name = ', self.busName
self.factory._ok(self) | Called in reply to the initial Hello remote method invocation | Below is the the instruction that describes the task:
### Input:
Called in reply to the initial Hello remote method invocation
### Response:
def _cbGotHello(self, busName):
"""
Called in reply to the initial Hello remote method invocation
"""
self.busName = busName
# print 'Connection Bus Name = ', self.busName
self.factory._ok(self) |
def _load(self, data):
"""
Deserialize a dictionary of data into a ``pybrightcove.video.Video``
object.
"""
self.raw_data = data
self.creation_date = _convert_tstamp(data['creationDate'])
self.economics = data['economics']
self.id = data['id']
self.last_modified_date = _convert_tstamp(data['lastModifiedDate'])
self.length = data['length']
self.link_text = data['linkText']
self.link_url = data['linkURL']
self.long_description = data['longDescription']
self.name = data['name']
self.plays_total = data['playsTotal']
self.plays_trailing_week = data['playsTrailingWeek']
self.published_date = _convert_tstamp(data['publishedDate'])
self.start_date = _convert_tstamp(data.get('startDate', None))
self.end_date = _convert_tstamp(data.get('endDate', None))
self.reference_id = data['referenceId']
self.short_description = data['shortDescription']
self.tags = []
for tag in data['tags']:
self.tags.append(tag)
self.thumbnail_url = data['thumbnailURL']
self.video_still_url = data['videoStillURL'] | Deserialize a dictionary of data into a ``pybrightcove.video.Video``
object. | Below is the the instruction that describes the task:
### Input:
Deserialize a dictionary of data into a ``pybrightcove.video.Video``
object.
### Response:
def _load(self, data):
"""
Deserialize a dictionary of data into a ``pybrightcove.video.Video``
object.
"""
self.raw_data = data
self.creation_date = _convert_tstamp(data['creationDate'])
self.economics = data['economics']
self.id = data['id']
self.last_modified_date = _convert_tstamp(data['lastModifiedDate'])
self.length = data['length']
self.link_text = data['linkText']
self.link_url = data['linkURL']
self.long_description = data['longDescription']
self.name = data['name']
self.plays_total = data['playsTotal']
self.plays_trailing_week = data['playsTrailingWeek']
self.published_date = _convert_tstamp(data['publishedDate'])
self.start_date = _convert_tstamp(data.get('startDate', None))
self.end_date = _convert_tstamp(data.get('endDate', None))
self.reference_id = data['referenceId']
self.short_description = data['shortDescription']
self.tags = []
for tag in data['tags']:
self.tags.append(tag)
self.thumbnail_url = data['thumbnailURL']
self.video_still_url = data['videoStillURL'] |
def signal(*args, **kwargs):
from .core import Signal
"""A signal decorator designed to work both in the simpler way, like:
.. code:: python
@signal
def validation_function(arg1, ...):
'''Some doc'''
and also as a double-called decorator, like
.. code:: python
@signal(SignalOptions.EXEC_CONCURRENT)
def validation_function(arg1, ...):
'''Some doc'''
"""
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return Signal(fvalidation=args[0])
else:
sig = Signal(*args, **kwargs)
def wrapper(fvalidation):
sig._set_fvalidation(fvalidation)
return sig
return wrapper | A signal decorator designed to work both in the simpler way, like:
.. code:: python
@signal
def validation_function(arg1, ...):
'''Some doc'''
and also as a double-called decorator, like
.. code:: python
@signal(SignalOptions.EXEC_CONCURRENT)
def validation_function(arg1, ...):
'''Some doc''' | Below is the the instruction that describes the task:
### Input:
A signal decorator designed to work both in the simpler way, like:
.. code:: python
@signal
def validation_function(arg1, ...):
'''Some doc'''
and also as a double-called decorator, like
.. code:: python
@signal(SignalOptions.EXEC_CONCURRENT)
def validation_function(arg1, ...):
'''Some doc'''
### Response:
def signal(*args, **kwargs):
from .core import Signal
"""A signal decorator designed to work both in the simpler way, like:
.. code:: python
@signal
def validation_function(arg1, ...):
'''Some doc'''
and also as a double-called decorator, like
.. code:: python
@signal(SignalOptions.EXEC_CONCURRENT)
def validation_function(arg1, ...):
'''Some doc'''
"""
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return Signal(fvalidation=args[0])
else:
sig = Signal(*args, **kwargs)
def wrapper(fvalidation):
sig._set_fvalidation(fvalidation)
return sig
return wrapper |
def tabbedPane(self, req, tag):
"""
Render a tabbed pane tab for each top-level
L{xmantissa.ixmantissa.IPreferenceCollection} tab
"""
navigation = webnav.getTabs(self.aggregator.getPreferenceCollections())
pages = list()
for tab in navigation:
f = inevow.IRenderer(
self.aggregator.store.getItemByID(tab.storeID))
f.tab = tab
if hasattr(f, 'setFragmentParent'):
f.setFragmentParent(self)
pages.append((tab.name, f))
f = tabbedPane.TabbedPaneFragment(pages, name='preference-editor')
f.setFragmentParent(self)
return f | Render a tabbed pane tab for each top-level
L{xmantissa.ixmantissa.IPreferenceCollection} tab | Below is the the instruction that describes the task:
### Input:
Render a tabbed pane tab for each top-level
L{xmantissa.ixmantissa.IPreferenceCollection} tab
### Response:
def tabbedPane(self, req, tag):
"""
Render a tabbed pane tab for each top-level
L{xmantissa.ixmantissa.IPreferenceCollection} tab
"""
navigation = webnav.getTabs(self.aggregator.getPreferenceCollections())
pages = list()
for tab in navigation:
f = inevow.IRenderer(
self.aggregator.store.getItemByID(tab.storeID))
f.tab = tab
if hasattr(f, 'setFragmentParent'):
f.setFragmentParent(self)
pages.append((tab.name, f))
f = tabbedPane.TabbedPaneFragment(pages, name='preference-editor')
f.setFragmentParent(self)
return f |
def Shift(self, n):
"""
[
{
'self': [1, 2, 3, 4, 5],
'n': 3,
'assert': lambda ret: list(ret) == [4, 5, 1, 2, 3]
}
]
"""
headn = tuple(Take(self, n))
yield from self
yield from headn | [
{
'self': [1, 2, 3, 4, 5],
'n': 3,
'assert': lambda ret: list(ret) == [4, 5, 1, 2, 3]
}
] | Below is the the instruction that describes the task:
### Input:
[
{
'self': [1, 2, 3, 4, 5],
'n': 3,
'assert': lambda ret: list(ret) == [4, 5, 1, 2, 3]
}
]
### Response:
def Shift(self, n):
"""
[
{
'self': [1, 2, 3, 4, 5],
'n': 3,
'assert': lambda ret: list(ret) == [4, 5, 1, 2, 3]
}
]
"""
headn = tuple(Take(self, n))
yield from self
yield from headn |
def execute_substep(stmt,
global_def,
global_vars,
task='',
task_params='',
proc_vars={},
shared_vars=[],
config={}):
'''Execute a substep with specific input etc
Substep executed by this function should be self-contained. It can contain
tasks (which will be sent to the master process) but not nested workflows.
The executor checks step signatures and might skip the substep if it has
been executed and the signature matches.
The executor accepts connections to the controller, and a socket using
which the results will be returned. However, the calling process should
take care of the connection and disconnection of controller sockets and
this function only takes care of the connection and disconnection of
result socket.
stmt:
Main statement of the substep
global_def:
Global definitions, might define functions useful to the substep
task:
External task
proc_vars:
Environmental variables, signature variables etc
shared_vars:
Variables that should be returned after the execution
config:
Runmode, signature mode, verbosity, etc.
The return value should be a dictionary with the following keys:
index: index of the substep within the step
ret_code: (all) return code, 0 for successful
sig_skipped: (optional) return if the step is skipped due to signature
shared: (optional) shared variable as specified by 'shared_vars'
stdout: (optional) if in interactive mode
stderr: (optional) if in interactive mode
exception: (optional) if an exception occures
'''
assert not env.zmq_context.closed
assert 'workflow_id' in proc_vars
assert 'step_id' in proc_vars
assert '_input' in proc_vars
assert '_output' in proc_vars
assert '_depends' in proc_vars
assert 'step_output' in proc_vars
assert '_index' in proc_vars
assert 'result_push_socket' in config["sockets"]
# this should not happen but check nevertheless
if env.result_socket_port is not None and env.result_socket_port != config[
"sockets"]["result_push_socket"]:
close_socket(env.result_socket)
env.result_socket = None
if env.result_socket is None:
env.result_socket = create_socket(env.zmq_context, zmq.PUSH)
env.result_socket_port = config["sockets"]["result_push_socket"]
env.result_socket.connect(f'tcp://127.0.0.1:{env.result_socket_port}')
res = _execute_substep(
stmt=stmt,
global_def=global_def,
global_vars=global_vars,
task=task,
task_params=task_params,
proc_vars=proc_vars,
shared_vars=shared_vars,
config=config)
env.result_socket.send_pyobj(res) | Execute a substep with specific input etc
Substep executed by this function should be self-contained. It can contain
tasks (which will be sent to the master process) but not nested workflows.
The executor checks step signatures and might skip the substep if it has
been executed and the signature matches.
The executor accepts connections to the controller, and a socket using
which the results will be returned. However, the calling process should
take care of the connection and disconnection of controller sockets and
this function only takes care of the connection and disconnection of
result socket.
stmt:
Main statement of the substep
global_def:
Global definitions, might define functions useful to the substep
task:
External task
proc_vars:
Environmental variables, signature variables etc
shared_vars:
Variables that should be returned after the execution
config:
Runmode, signature mode, verbosity, etc.
The return value should be a dictionary with the following keys:
index: index of the substep within the step
ret_code: (all) return code, 0 for successful
sig_skipped: (optional) return if the step is skipped due to signature
shared: (optional) shared variable as specified by 'shared_vars'
stdout: (optional) if in interactive mode
stderr: (optional) if in interactive mode
exception: (optional) if an exception occures | Below is the the instruction that describes the task:
### Input:
Execute a substep with specific input etc
Substep executed by this function should be self-contained. It can contain
tasks (which will be sent to the master process) but not nested workflows.
The executor checks step signatures and might skip the substep if it has
been executed and the signature matches.
The executor accepts connections to the controller, and a socket using
which the results will be returned. However, the calling process should
take care of the connection and disconnection of controller sockets and
this function only takes care of the connection and disconnection of
result socket.
stmt:
Main statement of the substep
global_def:
Global definitions, might define functions useful to the substep
task:
External task
proc_vars:
Environmental variables, signature variables etc
shared_vars:
Variables that should be returned after the execution
config:
Runmode, signature mode, verbosity, etc.
The return value should be a dictionary with the following keys:
index: index of the substep within the step
ret_code: (all) return code, 0 for successful
sig_skipped: (optional) return if the step is skipped due to signature
shared: (optional) shared variable as specified by 'shared_vars'
stdout: (optional) if in interactive mode
stderr: (optional) if in interactive mode
exception: (optional) if an exception occures
### Response:
def execute_substep(stmt,
global_def,
global_vars,
task='',
task_params='',
proc_vars={},
shared_vars=[],
config={}):
'''Execute a substep with specific input etc
Substep executed by this function should be self-contained. It can contain
tasks (which will be sent to the master process) but not nested workflows.
The executor checks step signatures and might skip the substep if it has
been executed and the signature matches.
The executor accepts connections to the controller, and a socket using
which the results will be returned. However, the calling process should
take care of the connection and disconnection of controller sockets and
this function only takes care of the connection and disconnection of
result socket.
stmt:
Main statement of the substep
global_def:
Global definitions, might define functions useful to the substep
task:
External task
proc_vars:
Environmental variables, signature variables etc
shared_vars:
Variables that should be returned after the execution
config:
Runmode, signature mode, verbosity, etc.
The return value should be a dictionary with the following keys:
index: index of the substep within the step
ret_code: (all) return code, 0 for successful
sig_skipped: (optional) return if the step is skipped due to signature
shared: (optional) shared variable as specified by 'shared_vars'
stdout: (optional) if in interactive mode
stderr: (optional) if in interactive mode
exception: (optional) if an exception occures
'''
assert not env.zmq_context.closed
assert 'workflow_id' in proc_vars
assert 'step_id' in proc_vars
assert '_input' in proc_vars
assert '_output' in proc_vars
assert '_depends' in proc_vars
assert 'step_output' in proc_vars
assert '_index' in proc_vars
assert 'result_push_socket' in config["sockets"]
# this should not happen but check nevertheless
if env.result_socket_port is not None and env.result_socket_port != config[
"sockets"]["result_push_socket"]:
close_socket(env.result_socket)
env.result_socket = None
if env.result_socket is None:
env.result_socket = create_socket(env.zmq_context, zmq.PUSH)
env.result_socket_port = config["sockets"]["result_push_socket"]
env.result_socket.connect(f'tcp://127.0.0.1:{env.result_socket_port}')
res = _execute_substep(
stmt=stmt,
global_def=global_def,
global_vars=global_vars,
task=task,
task_params=task_params,
proc_vars=proc_vars,
shared_vars=shared_vars,
config=config)
env.result_socket.send_pyobj(res) |
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data) | Override this method if you wish to handle the decoded data
differently. | Below is the the instruction that describes the task:
### Input:
Override this method if you wish to handle the decoded data
differently.
### Response:
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data) |
def _post_resource(self, url, body):
"""
Canvas POST method.
"""
params = {}
self._set_as_user(params)
headers = {'Content-Type': 'application/json',
'Accept': 'application/json',
'Connection': 'keep-alive'}
url = url + self._params(params)
response = DAO.postURL(url, headers, json.dumps(body))
if not (response.status == 200 or response.status == 204):
raise DataFailureException(url, response.status, response.data)
return json.loads(response.data) | Canvas POST method. | Below is the the instruction that describes the task:
### Input:
Canvas POST method.
### Response:
def _post_resource(self, url, body):
"""
Canvas POST method.
"""
params = {}
self._set_as_user(params)
headers = {'Content-Type': 'application/json',
'Accept': 'application/json',
'Connection': 'keep-alive'}
url = url + self._params(params)
response = DAO.postURL(url, headers, json.dumps(body))
if not (response.status == 200 or response.status == 204):
raise DataFailureException(url, response.status, response.data)
return json.loads(response.data) |
def parse_substitutions(self, messages):
"""
Parse substitutions in a supplied message
:param messages: A tuple messages being parsed (normalized, case preserved, raw)
:type messages: tuple of (str, str, str)
:return: Substituted messages (normalized, case preserved, raw)
:rtype : tuple of (str, str, str)
"""
# If no substitutions have been defined, just normalize the message
if not self._substitutions:
self._log.info('No substitutions to process')
return messages
self._log.info('Processing message substitutions')
def substitute(sub_group, sub_message):
word, substitution = sub_group
return word.sub(substitution, sub_message)
normalized, preserve_case, raw = messages
for sub_normalized, sub_preserve_case, sub_raw in self._substitutions:
normalized = substitute(sub_normalized, normalized)
preserve_case = substitute(sub_preserve_case, preserve_case)
raw = substitute(sub_raw, raw)
return normalized, preserve_case, raw | Parse substitutions in a supplied message
:param messages: A tuple messages being parsed (normalized, case preserved, raw)
:type messages: tuple of (str, str, str)
:return: Substituted messages (normalized, case preserved, raw)
:rtype : tuple of (str, str, str) | Below is the the instruction that describes the task:
### Input:
Parse substitutions in a supplied message
:param messages: A tuple messages being parsed (normalized, case preserved, raw)
:type messages: tuple of (str, str, str)
:return: Substituted messages (normalized, case preserved, raw)
:rtype : tuple of (str, str, str)
### Response:
def parse_substitutions(self, messages):
"""
Parse substitutions in a supplied message
:param messages: A tuple messages being parsed (normalized, case preserved, raw)
:type messages: tuple of (str, str, str)
:return: Substituted messages (normalized, case preserved, raw)
:rtype : tuple of (str, str, str)
"""
# If no substitutions have been defined, just normalize the message
if not self._substitutions:
self._log.info('No substitutions to process')
return messages
self._log.info('Processing message substitutions')
def substitute(sub_group, sub_message):
word, substitution = sub_group
return word.sub(substitution, sub_message)
normalized, preserve_case, raw = messages
for sub_normalized, sub_preserve_case, sub_raw in self._substitutions:
normalized = substitute(sub_normalized, normalized)
preserve_case = substitute(sub_preserve_case, preserve_case)
raw = substitute(sub_raw, raw)
return normalized, preserve_case, raw |
def check( state_engine, nameop, block_id, checked_ops ):
"""
Revoke a name--make it available for registration.
* it must be well-formed
* its namespace must be ready.
* the name must be registered
* it must be sent by the name owner
NAME_REVOKE isn't allowed during an import, so the name's namespace must be ready.
Return True if accepted
Return False if not
"""
name = nameop['name']
sender = nameop['sender']
namespace_id = get_namespace_from_name( name )
# name must be well-formed
if not is_b40( name ) or "+" in name or name.count(".") > 1:
log.warning("Malformed name '%s': non-base-38 characters" % name)
return False
# name must exist
name_rec = state_engine.get_name( name )
if name_rec is None:
log.warning("Name '%s' does not exist" % name)
return False
# namespace must be ready
if not state_engine.is_namespace_ready( namespace_id ):
log.warning("Namespace '%s' is not ready" % namespace_id )
return False
# name must not be revoked
if state_engine.is_name_revoked( name ):
log.warning("Name '%s' is revoked" % name)
return False
# name must not be expired as of *this* block
if state_engine.is_name_expired( name, block_id ):
log.warning("Name '%s' is expired" % name)
return False
# name must not be in grace period in this block
if state_engine.is_name_in_grace_period(name, block_id):
log.warning("Name '{}' is in the renewal grace period. It can only be renewed at this time.".format(name))
return False
# the name must be registered
if not state_engine.is_name_registered( name ):
log.warning("Name '%s' is not registered" % name )
return False
# the sender must own this name
if not state_engine.is_name_owner( name, sender ):
log.warning("Name '%s' is not owned by %s" % (name, sender))
return False
# apply state transition
nameop['revoked'] = True
nameop['value_hash'] = None
return True | Revoke a name--make it available for registration.
* it must be well-formed
* its namespace must be ready.
* the name must be registered
* it must be sent by the name owner
NAME_REVOKE isn't allowed during an import, so the name's namespace must be ready.
Return True if accepted
Return False if not | Below is the the instruction that describes the task:
### Input:
Revoke a name--make it available for registration.
* it must be well-formed
* its namespace must be ready.
* the name must be registered
* it must be sent by the name owner
NAME_REVOKE isn't allowed during an import, so the name's namespace must be ready.
Return True if accepted
Return False if not
### Response:
def check( state_engine, nameop, block_id, checked_ops ):
"""
Revoke a name--make it available for registration.
* it must be well-formed
* its namespace must be ready.
* the name must be registered
* it must be sent by the name owner
NAME_REVOKE isn't allowed during an import, so the name's namespace must be ready.
Return True if accepted
Return False if not
"""
name = nameop['name']
sender = nameop['sender']
namespace_id = get_namespace_from_name( name )
# name must be well-formed
if not is_b40( name ) or "+" in name or name.count(".") > 1:
log.warning("Malformed name '%s': non-base-38 characters" % name)
return False
# name must exist
name_rec = state_engine.get_name( name )
if name_rec is None:
log.warning("Name '%s' does not exist" % name)
return False
# namespace must be ready
if not state_engine.is_namespace_ready( namespace_id ):
log.warning("Namespace '%s' is not ready" % namespace_id )
return False
# name must not be revoked
if state_engine.is_name_revoked( name ):
log.warning("Name '%s' is revoked" % name)
return False
# name must not be expired as of *this* block
if state_engine.is_name_expired( name, block_id ):
log.warning("Name '%s' is expired" % name)
return False
# name must not be in grace period in this block
if state_engine.is_name_in_grace_period(name, block_id):
log.warning("Name '{}' is in the renewal grace period. It can only be renewed at this time.".format(name))
return False
# the name must be registered
if not state_engine.is_name_registered( name ):
log.warning("Name '%s' is not registered" % name )
return False
# the sender must own this name
if not state_engine.is_name_owner( name, sender ):
log.warning("Name '%s' is not owned by %s" % (name, sender))
return False
# apply state transition
nameop['revoked'] = True
nameop['value_hash'] = None
return True |
def __cqt_response(y, n_fft, hop_length, fft_basis, mode):
'''Compute the filter response with a target STFT hop.'''
# Compute the STFT matrix
D = stft(y, n_fft=n_fft, hop_length=hop_length,
window='ones',
pad_mode=mode)
# And filter response energy
return fft_basis.dot(D) | Compute the filter response with a target STFT hop. | Below is the the instruction that describes the task:
### Input:
Compute the filter response with a target STFT hop.
### Response:
def __cqt_response(y, n_fft, hop_length, fft_basis, mode):
'''Compute the filter response with a target STFT hop.'''
# Compute the STFT matrix
D = stft(y, n_fft=n_fft, hop_length=hop_length,
window='ones',
pad_mode=mode)
# And filter response energy
return fft_basis.dot(D) |
def covar_rescaling_factor_efficient(C):
"""
Returns the rescaling factor for the Gower normalizion on covariance matrix C
the rescaled covariance matrix has sample variance of 1
"""
n = C.shape[0]
P = sp.eye(n) - sp.ones((n,n))/float(n)
CP = C - C.mean(0)[:, sp.newaxis]
trPCP = sp.sum(P * CP)
r = (n-1) / trPCP
return r | Returns the rescaling factor for the Gower normalizion on covariance matrix C
the rescaled covariance matrix has sample variance of 1 | Below is the the instruction that describes the task:
### Input:
Returns the rescaling factor for the Gower normalizion on covariance matrix C
the rescaled covariance matrix has sample variance of 1
### Response:
def covar_rescaling_factor_efficient(C):
"""
Returns the rescaling factor for the Gower normalizion on covariance matrix C
the rescaled covariance matrix has sample variance of 1
"""
n = C.shape[0]
P = sp.eye(n) - sp.ones((n,n))/float(n)
CP = C - C.mean(0)[:, sp.newaxis]
trPCP = sp.sum(P * CP)
r = (n-1) / trPCP
return r |
def make_sqla_column_compatible(self, sqla_col, label=None):
"""Takes a sql alchemy column object and adds label info if supported by engine.
:param sqla_col: sql alchemy column instance
:param label: alias/label that column is expected to have
:return: either a sql alchemy column or label instance if supported by engine
"""
label_expected = label or sqla_col.name
db_engine_spec = self.database.db_engine_spec
if db_engine_spec.supports_column_aliases:
label = db_engine_spec.make_label_compatible(label_expected)
sqla_col = sqla_col.label(label)
sqla_col._df_label_expected = label_expected
return sqla_col | Takes a sql alchemy column object and adds label info if supported by engine.
:param sqla_col: sql alchemy column instance
:param label: alias/label that column is expected to have
:return: either a sql alchemy column or label instance if supported by engine | Below is the the instruction that describes the task:
### Input:
Takes a sql alchemy column object and adds label info if supported by engine.
:param sqla_col: sql alchemy column instance
:param label: alias/label that column is expected to have
:return: either a sql alchemy column or label instance if supported by engine
### Response:
def make_sqla_column_compatible(self, sqla_col, label=None):
"""Takes a sql alchemy column object and adds label info if supported by engine.
:param sqla_col: sql alchemy column instance
:param label: alias/label that column is expected to have
:return: either a sql alchemy column or label instance if supported by engine
"""
label_expected = label or sqla_col.name
db_engine_spec = self.database.db_engine_spec
if db_engine_spec.supports_column_aliases:
label = db_engine_spec.make_label_compatible(label_expected)
sqla_col = sqla_col.label(label)
sqla_col._df_label_expected = label_expected
return sqla_col |
def validate_otp(hsm, from_key):
"""
Try to validate an OTP from a YubiKey using the internal database
on the YubiHSM.
`from_key' is the modhex encoded string emitted when you press the
button on your YubiKey.
Will only return on succesfull validation. All failures will result
in an L{pyhsm.exception.YHSM_CommandFailed}.
@param hsm: The YHSM instance
@param from_key: The OTP from a YubiKey (in modhex)
@type hsm: L{pyhsm.YHSM}
@type from_key: string
@returns: validation response, if successful
@rtype: L{YHSM_ValidationResult}
@see: L{pyhsm.db_cmd.YHSM_Cmd_DB_Validate_OTP.parse_result}
"""
public_id, otp = split_id_otp(from_key)
return hsm.db_validate_yubikey_otp(modhex_decode(public_id).decode('hex'),
modhex_decode(otp).decode('hex')
) | Try to validate an OTP from a YubiKey using the internal database
on the YubiHSM.
`from_key' is the modhex encoded string emitted when you press the
button on your YubiKey.
Will only return on succesfull validation. All failures will result
in an L{pyhsm.exception.YHSM_CommandFailed}.
@param hsm: The YHSM instance
@param from_key: The OTP from a YubiKey (in modhex)
@type hsm: L{pyhsm.YHSM}
@type from_key: string
@returns: validation response, if successful
@rtype: L{YHSM_ValidationResult}
@see: L{pyhsm.db_cmd.YHSM_Cmd_DB_Validate_OTP.parse_result} | Below is the the instruction that describes the task:
### Input:
Try to validate an OTP from a YubiKey using the internal database
on the YubiHSM.
`from_key' is the modhex encoded string emitted when you press the
button on your YubiKey.
Will only return on succesfull validation. All failures will result
in an L{pyhsm.exception.YHSM_CommandFailed}.
@param hsm: The YHSM instance
@param from_key: The OTP from a YubiKey (in modhex)
@type hsm: L{pyhsm.YHSM}
@type from_key: string
@returns: validation response, if successful
@rtype: L{YHSM_ValidationResult}
@see: L{pyhsm.db_cmd.YHSM_Cmd_DB_Validate_OTP.parse_result}
### Response:
def validate_otp(hsm, from_key):
"""
Try to validate an OTP from a YubiKey using the internal database
on the YubiHSM.
`from_key' is the modhex encoded string emitted when you press the
button on your YubiKey.
Will only return on succesfull validation. All failures will result
in an L{pyhsm.exception.YHSM_CommandFailed}.
@param hsm: The YHSM instance
@param from_key: The OTP from a YubiKey (in modhex)
@type hsm: L{pyhsm.YHSM}
@type from_key: string
@returns: validation response, if successful
@rtype: L{YHSM_ValidationResult}
@see: L{pyhsm.db_cmd.YHSM_Cmd_DB_Validate_OTP.parse_result}
"""
public_id, otp = split_id_otp(from_key)
return hsm.db_validate_yubikey_otp(modhex_decode(public_id).decode('hex'),
modhex_decode(otp).decode('hex')
) |
def DbPutClassProperty(self, argin):
""" Create / Update class property(ies)
:param argin: Str[0] = Tango class name
Str[1] = Property number
Str[2] = Property name
Str[3] = Property value number
Str[4] = Property value 1
Str[n] = Property value n
....
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid """
self._log.debug("In DbPutClassProperty()")
class_name = argin[0]
nb_properties = int(argin[1])
self.db.put_class_property(class_name, nb_properties, argin[2:]) | Create / Update class property(ies)
:param argin: Str[0] = Tango class name
Str[1] = Property number
Str[2] = Property name
Str[3] = Property value number
Str[4] = Property value 1
Str[n] = Property value n
....
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid | Below is the the instruction that describes the task:
### Input:
Create / Update class property(ies)
:param argin: Str[0] = Tango class name
Str[1] = Property number
Str[2] = Property name
Str[3] = Property value number
Str[4] = Property value 1
Str[n] = Property value n
....
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid
### Response:
def DbPutClassProperty(self, argin):
""" Create / Update class property(ies)
:param argin: Str[0] = Tango class name
Str[1] = Property number
Str[2] = Property name
Str[3] = Property value number
Str[4] = Property value 1
Str[n] = Property value n
....
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid """
self._log.debug("In DbPutClassProperty()")
class_name = argin[0]
nb_properties = int(argin[1])
self.db.put_class_property(class_name, nb_properties, argin[2:]) |
def bind_bottom_up(lower, upper, __fval=None, **fval):
"""Bind 2 layers for dissection.
The upper layer will be chosen for dissection on top of the lower layer, if
ALL the passed arguments are validated. If multiple calls are made with the same # noqa: E501
layers, the last one will be used as default.
ex:
>>> bind_bottom_up(Ether, SNAP, type=0x1234)
>>> Ether(b'\xff\xff\xff\xff\xff\xff\xd0P\x99V\xdd\xf9\x124\x00\x00\x00\x00\x00') # noqa: E501
<Ether dst=ff:ff:ff:ff:ff:ff src=d0:50:99:56:dd:f9 type=0x1234 |<SNAP OUI=0x0 code=0x0 |>> # noqa: E501
"""
if __fval is not None:
fval.update(__fval)
lower.payload_guess = lower.payload_guess[:]
lower.payload_guess.append((fval, upper)) | Bind 2 layers for dissection.
The upper layer will be chosen for dissection on top of the lower layer, if
ALL the passed arguments are validated. If multiple calls are made with the same # noqa: E501
layers, the last one will be used as default.
ex:
>>> bind_bottom_up(Ether, SNAP, type=0x1234)
>>> Ether(b'\xff\xff\xff\xff\xff\xff\xd0P\x99V\xdd\xf9\x124\x00\x00\x00\x00\x00') # noqa: E501
<Ether dst=ff:ff:ff:ff:ff:ff src=d0:50:99:56:dd:f9 type=0x1234 |<SNAP OUI=0x0 code=0x0 |>> # noqa: E501 | Below is the the instruction that describes the task:
### Input:
Bind 2 layers for dissection.
The upper layer will be chosen for dissection on top of the lower layer, if
ALL the passed arguments are validated. If multiple calls are made with the same # noqa: E501
layers, the last one will be used as default.
ex:
>>> bind_bottom_up(Ether, SNAP, type=0x1234)
>>> Ether(b'\xff\xff\xff\xff\xff\xff\xd0P\x99V\xdd\xf9\x124\x00\x00\x00\x00\x00') # noqa: E501
<Ether dst=ff:ff:ff:ff:ff:ff src=d0:50:99:56:dd:f9 type=0x1234 |<SNAP OUI=0x0 code=0x0 |>> # noqa: E501
### Response:
def bind_bottom_up(lower, upper, __fval=None, **fval):
"""Bind 2 layers for dissection.
The upper layer will be chosen for dissection on top of the lower layer, if
ALL the passed arguments are validated. If multiple calls are made with the same # noqa: E501
layers, the last one will be used as default.
ex:
>>> bind_bottom_up(Ether, SNAP, type=0x1234)
>>> Ether(b'\xff\xff\xff\xff\xff\xff\xd0P\x99V\xdd\xf9\x124\x00\x00\x00\x00\x00') # noqa: E501
<Ether dst=ff:ff:ff:ff:ff:ff src=d0:50:99:56:dd:f9 type=0x1234 |<SNAP OUI=0x0 code=0x0 |>> # noqa: E501
"""
if __fval is not None:
fval.update(__fval)
lower.payload_guess = lower.payload_guess[:]
lower.payload_guess.append((fval, upper)) |
def array_size(x, axis):
"""Calculate the size of `x` along `axis` dimensions only."""
axis_shape = x.shape if axis is None else tuple(x.shape[a] for a in axis)
return max(numpy.prod(axis_shape), 1) | Calculate the size of `x` along `axis` dimensions only. | Below is the the instruction that describes the task:
### Input:
Calculate the size of `x` along `axis` dimensions only.
### Response:
def array_size(x, axis):
"""Calculate the size of `x` along `axis` dimensions only."""
axis_shape = x.shape if axis is None else tuple(x.shape[a] for a in axis)
return max(numpy.prod(axis_shape), 1) |
def make_predicate_object_combinator(function, p, o):
""" Combinator to hold predicate object pairs until a subject is supplied and then
call a function that accepts a subject, predicate, and object.
Create a combinator to defer production of a triple until the missing pieces are supplied.
Note that the naming here tells you what is stored IN the combinator. The argument to the
combinator is the piece that is missing. """
def predicate_object_combinator(subject):
return function(subject, p, o)
return predicate_object_combinator | Combinator to hold predicate object pairs until a subject is supplied and then
call a function that accepts a subject, predicate, and object.
Create a combinator to defer production of a triple until the missing pieces are supplied.
Note that the naming here tells you what is stored IN the combinator. The argument to the
combinator is the piece that is missing. | Below is the the instruction that describes the task:
### Input:
Combinator to hold predicate object pairs until a subject is supplied and then
call a function that accepts a subject, predicate, and object.
Create a combinator to defer production of a triple until the missing pieces are supplied.
Note that the naming here tells you what is stored IN the combinator. The argument to the
combinator is the piece that is missing.
### Response:
def make_predicate_object_combinator(function, p, o):
""" Combinator to hold predicate object pairs until a subject is supplied and then
call a function that accepts a subject, predicate, and object.
Create a combinator to defer production of a triple until the missing pieces are supplied.
Note that the naming here tells you what is stored IN the combinator. The argument to the
combinator is the piece that is missing. """
def predicate_object_combinator(subject):
return function(subject, p, o)
return predicate_object_combinator |
def convert_unicode_character_to_ascii_repr(match_obj):
"""
Converts a matched pattern from a unicode character to an ASCII representation
For example the emoji 🍆 would get converted to the literal <U+01F346>
"""
match = match_obj.group(0)
code_point = ord(match)
hex_repr = hex(code_point)
hex_code_point = hex_repr[2:]
hex_value = hex_code_point.zfill(6).upper()
return '<U+{}>'.format(hex_value) | Converts a matched pattern from a unicode character to an ASCII representation
For example the emoji 🍆 would get converted to the literal <U+01F346> | Below is the the instruction that describes the task:
### Input:
Converts a matched pattern from a unicode character to an ASCII representation
For example the emoji 🍆 would get converted to the literal <U+01F346>
### Response:
def convert_unicode_character_to_ascii_repr(match_obj):
"""
Converts a matched pattern from a unicode character to an ASCII representation
For example the emoji 🍆 would get converted to the literal <U+01F346>
"""
match = match_obj.group(0)
code_point = ord(match)
hex_repr = hex(code_point)
hex_code_point = hex_repr[2:]
hex_value = hex_code_point.zfill(6).upper()
return '<U+{}>'.format(hex_value) |
def attributes_js(cls, attributes):
"""
Generates JS code to look up attributes on JS objects from
an attributes specification dictionary. If the specification
references a plotting particular plotting handle it will also
generate JS code to get the ID of the object.
Simple example (when referencing cb_data or cb_obj):
Input : {'x': 'cb_data.geometry.x'}
Output : data['x'] = cb_data['geometry']['x']
Example referencing plot handle:
Input : {'x0': 'x_range.attributes.start'}
Output : if ((x_range !== undefined)) {
data['x0'] = {id: x_range['id'], value: x_range['attributes']['start']}
}
"""
assign_template = '{assign}{{id: {obj_name}["id"], value: {obj_name}{attr_getters}}};\n'
conditional_template = 'if (({obj_name} != undefined)) {{ {assign} }}'
code = ''
for key, attr_path in sorted(attributes.items()):
data_assign = 'data["{key}"] = '.format(key=key)
attrs = attr_path.split('.')
obj_name = attrs[0]
attr_getters = ''.join(['["{attr}"]'.format(attr=attr)
for attr in attrs[1:]])
if obj_name not in ['cb_obj', 'cb_data']:
assign_str = assign_template.format(
assign=data_assign, obj_name=obj_name, attr_getters=attr_getters
)
code += conditional_template.format(
obj_name=obj_name, assign=assign_str
)
else:
assign_str = ''.join([data_assign, obj_name, attr_getters, ';\n'])
code += assign_str
return code | Generates JS code to look up attributes on JS objects from
an attributes specification dictionary. If the specification
references a plotting particular plotting handle it will also
generate JS code to get the ID of the object.
Simple example (when referencing cb_data or cb_obj):
Input : {'x': 'cb_data.geometry.x'}
Output : data['x'] = cb_data['geometry']['x']
Example referencing plot handle:
Input : {'x0': 'x_range.attributes.start'}
Output : if ((x_range !== undefined)) {
data['x0'] = {id: x_range['id'], value: x_range['attributes']['start']}
} | Below is the the instruction that describes the task:
### Input:
Generates JS code to look up attributes on JS objects from
an attributes specification dictionary. If the specification
references a plotting particular plotting handle it will also
generate JS code to get the ID of the object.
Simple example (when referencing cb_data or cb_obj):
Input : {'x': 'cb_data.geometry.x'}
Output : data['x'] = cb_data['geometry']['x']
Example referencing plot handle:
Input : {'x0': 'x_range.attributes.start'}
Output : if ((x_range !== undefined)) {
data['x0'] = {id: x_range['id'], value: x_range['attributes']['start']}
}
### Response:
def attributes_js(cls, attributes):
"""
Generates JS code to look up attributes on JS objects from
an attributes specification dictionary. If the specification
references a plotting particular plotting handle it will also
generate JS code to get the ID of the object.
Simple example (when referencing cb_data or cb_obj):
Input : {'x': 'cb_data.geometry.x'}
Output : data['x'] = cb_data['geometry']['x']
Example referencing plot handle:
Input : {'x0': 'x_range.attributes.start'}
Output : if ((x_range !== undefined)) {
data['x0'] = {id: x_range['id'], value: x_range['attributes']['start']}
}
"""
assign_template = '{assign}{{id: {obj_name}["id"], value: {obj_name}{attr_getters}}};\n'
conditional_template = 'if (({obj_name} != undefined)) {{ {assign} }}'
code = ''
for key, attr_path in sorted(attributes.items()):
data_assign = 'data["{key}"] = '.format(key=key)
attrs = attr_path.split('.')
obj_name = attrs[0]
attr_getters = ''.join(['["{attr}"]'.format(attr=attr)
for attr in attrs[1:]])
if obj_name not in ['cb_obj', 'cb_data']:
assign_str = assign_template.format(
assign=data_assign, obj_name=obj_name, attr_getters=attr_getters
)
code += conditional_template.format(
obj_name=obj_name, assign=assign_str
)
else:
assign_str = ''.join([data_assign, obj_name, attr_getters, ';\n'])
code += assign_str
return code |
def notCalled(cls, spy): #pylint: disable=invalid-name
"""
Checking the inspector is not called
Args: SinonSpy
"""
cls.__is_spy(spy)
if not (not spy.called):
raise cls.failException(cls.message) | Checking the inspector is not called
Args: SinonSpy | Below is the the instruction that describes the task:
### Input:
Checking the inspector is not called
Args: SinonSpy
### Response:
def notCalled(cls, spy): #pylint: disable=invalid-name
"""
Checking the inspector is not called
Args: SinonSpy
"""
cls.__is_spy(spy)
if not (not spy.called):
raise cls.failException(cls.message) |
def get_single_conversation(self, id, auto_mark_as_read=None, filter=None, filter_mode=None, interleave_submissions=None, scope=None):
"""
Get a single conversation.
Returns information for a single conversation for the current user. Response includes all
fields that are present in the list/index action as well as messages
and extended participant information.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - interleave_submissions
"""(Obsolete) Submissions are no
longer linked to conversations. This parameter is ignored."""
if interleave_submissions is not None:
params["interleave_submissions"] = interleave_submissions
# OPTIONAL - scope
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if scope is not None:
self._validate_enum(scope, ["unread", "starred", "archived"])
params["scope"] = scope
# OPTIONAL - filter
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if filter is not None:
params["filter"] = filter
# OPTIONAL - filter_mode
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if filter_mode is not None:
self._validate_enum(filter_mode, ["and", "or", "default or"])
params["filter_mode"] = filter_mode
# OPTIONAL - auto_mark_as_read
"""Default true. If true, unread
conversations will be automatically marked as read. This will default
to false in a future API release, so clients should explicitly send
true if that is the desired behavior."""
if auto_mark_as_read is not None:
params["auto_mark_as_read"] = auto_mark_as_read
self.logger.debug("GET /api/v1/conversations/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/conversations/{id}".format(**path), data=data, params=params, no_data=True) | Get a single conversation.
Returns information for a single conversation for the current user. Response includes all
fields that are present in the list/index action as well as messages
and extended participant information. | Below is the the instruction that describes the task:
### Input:
Get a single conversation.
Returns information for a single conversation for the current user. Response includes all
fields that are present in the list/index action as well as messages
and extended participant information.
### Response:
def get_single_conversation(self, id, auto_mark_as_read=None, filter=None, filter_mode=None, interleave_submissions=None, scope=None):
"""
Get a single conversation.
Returns information for a single conversation for the current user. Response includes all
fields that are present in the list/index action as well as messages
and extended participant information.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - interleave_submissions
"""(Obsolete) Submissions are no
longer linked to conversations. This parameter is ignored."""
if interleave_submissions is not None:
params["interleave_submissions"] = interleave_submissions
# OPTIONAL - scope
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if scope is not None:
self._validate_enum(scope, ["unread", "starred", "archived"])
params["scope"] = scope
# OPTIONAL - filter
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if filter is not None:
params["filter"] = filter
# OPTIONAL - filter_mode
"""Used when generating "visible" in the API response. See the explanation
under the {api:ConversationsController#index index API action}"""
if filter_mode is not None:
self._validate_enum(filter_mode, ["and", "or", "default or"])
params["filter_mode"] = filter_mode
# OPTIONAL - auto_mark_as_read
"""Default true. If true, unread
conversations will be automatically marked as read. This will default
to false in a future API release, so clients should explicitly send
true if that is the desired behavior."""
if auto_mark_as_read is not None:
params["auto_mark_as_read"] = auto_mark_as_read
self.logger.debug("GET /api/v1/conversations/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/conversations/{id}".format(**path), data=data, params=params, no_data=True) |
def pif_multi_search(self, multi_query):
"""
Run each in a list of PIF queries against Citrination.
:param multi_query: :class:`MultiQuery` object to execute.
:return: :class:`PifMultiSearchResult` object with the results of the query.
"""
failure_message = "Error while making PIF multi search request"
response_dict = self._get_success_json(
self._post(routes.pif_multi_search, data=json.dumps(multi_query, cls=QueryEncoder),
failure_message=failure_message))
return PifMultiSearchResult(**keys_to_snake_case(response_dict['results'])) | Run each in a list of PIF queries against Citrination.
:param multi_query: :class:`MultiQuery` object to execute.
:return: :class:`PifMultiSearchResult` object with the results of the query. | Below is the the instruction that describes the task:
### Input:
Run each in a list of PIF queries against Citrination.
:param multi_query: :class:`MultiQuery` object to execute.
:return: :class:`PifMultiSearchResult` object with the results of the query.
### Response:
def pif_multi_search(self, multi_query):
"""
Run each in a list of PIF queries against Citrination.
:param multi_query: :class:`MultiQuery` object to execute.
:return: :class:`PifMultiSearchResult` object with the results of the query.
"""
failure_message = "Error while making PIF multi search request"
response_dict = self._get_success_json(
self._post(routes.pif_multi_search, data=json.dumps(multi_query, cls=QueryEncoder),
failure_message=failure_message))
return PifMultiSearchResult(**keys_to_snake_case(response_dict['results'])) |
def truncate_file(f):
"""
Clear uploaded file and allow write to it.
Only for not too big files!!!
Also can clear simple opened file.
Examples:
truncate_file(request.FILES['file'])
with open('/tmp/file', 'rb+') as f:
truncate_file(f)
"""
if isinstance(f, InMemoryUploadedFile):
f.file = StringIO()
else:
f.seek(0)
f.truncate(0) | Clear uploaded file and allow write to it.
Only for not too big files!!!
Also can clear simple opened file.
Examples:
truncate_file(request.FILES['file'])
with open('/tmp/file', 'rb+') as f:
truncate_file(f) | Below is the the instruction that describes the task:
### Input:
Clear uploaded file and allow write to it.
Only for not too big files!!!
Also can clear simple opened file.
Examples:
truncate_file(request.FILES['file'])
with open('/tmp/file', 'rb+') as f:
truncate_file(f)
### Response:
def truncate_file(f):
"""
Clear uploaded file and allow write to it.
Only for not too big files!!!
Also can clear simple opened file.
Examples:
truncate_file(request.FILES['file'])
with open('/tmp/file', 'rb+') as f:
truncate_file(f)
"""
if isinstance(f, InMemoryUploadedFile):
f.file = StringIO()
else:
f.seek(0)
f.truncate(0) |
def get_vnetwork_portgroups_output_vnetwork_pgs_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs")
datacenter = ET.SubElement(vnetwork_pgs, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_vnetwork_portgroups_output_vnetwork_pgs_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_portgroups = ET.Element("get_vnetwork_portgroups")
config = get_vnetwork_portgroups
output = ET.SubElement(get_vnetwork_portgroups, "output")
vnetwork_pgs = ET.SubElement(output, "vnetwork-pgs")
datacenter = ET.SubElement(vnetwork_pgs, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def convolutional_barycenter2d(A, reg, weights=None, numItermax=10000, stopThr=1e-9, stabThr=1e-30, verbose=False, log=False):
"""Compute the entropic regularized wasserstein barycenter of distributions A
where A is a collection of 2D images.
The function solves the following optimization problem:
.. math::
\mathbf{a} = arg\min_\mathbf{a} \sum_i W_{reg}(\mathbf{a},\mathbf{a}_i)
where :
- :math:`W_{reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance (see ot.bregman.sinkhorn)
- :math:`\mathbf{a}_i` are training distributions (2D images) in the mast two dimensions of matrix :math:`\mathbf{A}`
- reg is the regularization strength scalar value
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [21]_
Parameters
----------
A : np.ndarray (n,w,h)
n distributions (2D images) of size w x h
reg : float
Regularization term >0
weights : np.ndarray (n,)
Weights of each image on the simplex (barycentric coodinates)
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
stabThr : float, optional
Stabilization threshold to avoid numerical precision issue
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
a : (w,h) ndarray
2D Wasserstein barycenter
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [21] Solomon, J., De Goes, F., Peyré, G., Cuturi, M., Butscher, A., Nguyen, A. & Guibas, L. (2015).
Convolutional wasserstein distances: Efficient optimal transportation on geometric domains
ACM Transactions on Graphics (TOG), 34(4), 66
"""
if weights is None:
weights = np.ones(A.shape[0]) / A.shape[0]
else:
assert(len(weights) == A.shape[0])
if log:
log = {'err': []}
b = np.zeros_like(A[0, :, :])
U = np.ones_like(A)
KV = np.ones_like(A)
cpt = 0
err = 1
# build the convolution operator
t = np.linspace(0, 1, A.shape[1])
[Y, X] = np.meshgrid(t, t)
xi1 = np.exp(-(X - Y)**2 / reg)
def K(x):
return np.dot(np.dot(xi1, x), xi1)
while (err > stopThr and cpt < numItermax):
bold = b
cpt = cpt + 1
b = np.zeros_like(A[0, :, :])
for r in range(A.shape[0]):
KV[r, :, :] = K(A[r, :, :] / np.maximum(stabThr, K(U[r, :, :])))
b += weights[r] * np.log(np.maximum(stabThr, U[r, :, :] * KV[r, :, :]))
b = np.exp(b)
for r in range(A.shape[0]):
U[r, :, :] = b / np.maximum(stabThr, KV[r, :, :])
if cpt % 10 == 1:
err = np.sum(np.abs(bold - b))
# log and verbose print
if log:
log['err'].append(err)
if verbose:
if cpt % 200 == 0:
print('{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19)
print('{:5d}|{:8e}|'.format(cpt, err))
if log:
log['niter'] = cpt
log['U'] = U
return b, log
else:
return b | Compute the entropic regularized wasserstein barycenter of distributions A
where A is a collection of 2D images.
The function solves the following optimization problem:
.. math::
\mathbf{a} = arg\min_\mathbf{a} \sum_i W_{reg}(\mathbf{a},\mathbf{a}_i)
where :
- :math:`W_{reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance (see ot.bregman.sinkhorn)
- :math:`\mathbf{a}_i` are training distributions (2D images) in the mast two dimensions of matrix :math:`\mathbf{A}`
- reg is the regularization strength scalar value
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [21]_
Parameters
----------
A : np.ndarray (n,w,h)
n distributions (2D images) of size w x h
reg : float
Regularization term >0
weights : np.ndarray (n,)
Weights of each image on the simplex (barycentric coodinates)
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
stabThr : float, optional
Stabilization threshold to avoid numerical precision issue
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
a : (w,h) ndarray
2D Wasserstein barycenter
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [21] Solomon, J., De Goes, F., Peyré, G., Cuturi, M., Butscher, A., Nguyen, A. & Guibas, L. (2015).
Convolutional wasserstein distances: Efficient optimal transportation on geometric domains
ACM Transactions on Graphics (TOG), 34(4), 66 | Below is the the instruction that describes the task:
### Input:
Compute the entropic regularized wasserstein barycenter of distributions A
where A is a collection of 2D images.
The function solves the following optimization problem:
.. math::
\mathbf{a} = arg\min_\mathbf{a} \sum_i W_{reg}(\mathbf{a},\mathbf{a}_i)
where :
- :math:`W_{reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance (see ot.bregman.sinkhorn)
- :math:`\mathbf{a}_i` are training distributions (2D images) in the mast two dimensions of matrix :math:`\mathbf{A}`
- reg is the regularization strength scalar value
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [21]_
Parameters
----------
A : np.ndarray (n,w,h)
n distributions (2D images) of size w x h
reg : float
Regularization term >0
weights : np.ndarray (n,)
Weights of each image on the simplex (barycentric coodinates)
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
stabThr : float, optional
Stabilization threshold to avoid numerical precision issue
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
a : (w,h) ndarray
2D Wasserstein barycenter
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [21] Solomon, J., De Goes, F., Peyré, G., Cuturi, M., Butscher, A., Nguyen, A. & Guibas, L. (2015).
Convolutional wasserstein distances: Efficient optimal transportation on geometric domains
ACM Transactions on Graphics (TOG), 34(4), 66
### Response:
def convolutional_barycenter2d(A, reg, weights=None, numItermax=10000, stopThr=1e-9, stabThr=1e-30, verbose=False, log=False):
"""Compute the entropic regularized wasserstein barycenter of distributions A
where A is a collection of 2D images.
The function solves the following optimization problem:
.. math::
\mathbf{a} = arg\min_\mathbf{a} \sum_i W_{reg}(\mathbf{a},\mathbf{a}_i)
where :
- :math:`W_{reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance (see ot.bregman.sinkhorn)
- :math:`\mathbf{a}_i` are training distributions (2D images) in the mast two dimensions of matrix :math:`\mathbf{A}`
- reg is the regularization strength scalar value
The algorithm used for solving the problem is the Sinkhorn-Knopp matrix scaling algorithm as proposed in [21]_
Parameters
----------
A : np.ndarray (n,w,h)
n distributions (2D images) of size w x h
reg : float
Regularization term >0
weights : np.ndarray (n,)
Weights of each image on the simplex (barycentric coodinates)
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
stabThr : float, optional
Stabilization threshold to avoid numerical precision issue
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
a : (w,h) ndarray
2D Wasserstein barycenter
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [21] Solomon, J., De Goes, F., Peyré, G., Cuturi, M., Butscher, A., Nguyen, A. & Guibas, L. (2015).
Convolutional wasserstein distances: Efficient optimal transportation on geometric domains
ACM Transactions on Graphics (TOG), 34(4), 66
"""
if weights is None:
weights = np.ones(A.shape[0]) / A.shape[0]
else:
assert(len(weights) == A.shape[0])
if log:
log = {'err': []}
b = np.zeros_like(A[0, :, :])
U = np.ones_like(A)
KV = np.ones_like(A)
cpt = 0
err = 1
# build the convolution operator
t = np.linspace(0, 1, A.shape[1])
[Y, X] = np.meshgrid(t, t)
xi1 = np.exp(-(X - Y)**2 / reg)
def K(x):
return np.dot(np.dot(xi1, x), xi1)
while (err > stopThr and cpt < numItermax):
bold = b
cpt = cpt + 1
b = np.zeros_like(A[0, :, :])
for r in range(A.shape[0]):
KV[r, :, :] = K(A[r, :, :] / np.maximum(stabThr, K(U[r, :, :])))
b += weights[r] * np.log(np.maximum(stabThr, U[r, :, :] * KV[r, :, :]))
b = np.exp(b)
for r in range(A.shape[0]):
U[r, :, :] = b / np.maximum(stabThr, KV[r, :, :])
if cpt % 10 == 1:
err = np.sum(np.abs(bold - b))
# log and verbose print
if log:
log['err'].append(err)
if verbose:
if cpt % 200 == 0:
print('{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19)
print('{:5d}|{:8e}|'.format(cpt, err))
if log:
log['niter'] = cpt
log['U'] = U
return b, log
else:
return b |
def _MakePackagePages(self, package, showprivate=False, nested=False, showinh=False):
"""An internal helper to generate all of the pages for a given package
Args:
package (module): The top-level package to document
showprivate (bool): A flag for whether or not to display private members
nested (bool): Foor internal use ONLY
Returns:
str: The file names ready to be appended to a top-level toctree
"""
def checkNoNested(mod):
try:
all = mod.__all__
except AttributeError:
return False
mems = inspect.getmembers(mod, inspect.ismodule)
mems = [m for m in mems if m[0] in mod.__all__]
if len(mems) > 0:
return False
return True
# Get package module members
mods = inspect.getmembers(package, inspect.ismodule)
# Split into modules and sub-packages
nmods, pvt, npkgs = [], [], []
for mod in mods:
# Deal with private modules
if checkNoNested(mod[1]):
if mod[0][0] == '_': pvt.append(mod)
else: nmods.append(mod)
else: npkgs.append(mod)
if showprivate: nmods += pvt
# for each member that has a nested module
# recurse and keep track of index files for that package
files = []
ignore = []
for pkg in npkgs:
pt = '%s/%s/%s' % (self.path, package.__name__.replace('.', '/'), pkg[1].__name__.split('.')[-1])
if os.path.exists(pt): shutil.rmtree(pt)
os.makedirs(pt)
ignore += inspect.getmembers(pkg[1])
f = self._MakePackagePages(pkg[1], showprivate=showprivate, nested=True, showinh=showinh)
files.append(f.split(package.__name__.replace('.', '/')+'/')[1])
if nested:
try:
name = package.__displayname__
except AttributeError:
name = package.__name__
# Create index file here
index = r'''
%s
%s
.. toctree::
:maxdepth: 5
''' % (name, '*' * len(name))
# include sub packages first
index += '\n '.join(files)
# then include modules
index += '\n ' + self._ProduceContent(nmods, showprivate=showprivate, showinh=showinh)
findex = 'content/%s/index.rst' % (package.__name__.replace('.', '/'))
# Write the file
with open(findex, 'w') as f:
if package.__doc__: f.write(package.__doc__)
f.write(index)
# return filename for index file at package level
return '\n ' + findex
# Not nested: return all files
names = '\n %s/%s/' % ( self.path, package.__name__.replace('.', '/'))
nmods = [m for m in nmods if m not in ignore]
return names.join(self._ProduceContent(nmods, showprivate=showprivate, showinh=showinh).split('\n ')+files) | An internal helper to generate all of the pages for a given package
Args:
package (module): The top-level package to document
showprivate (bool): A flag for whether or not to display private members
nested (bool): Foor internal use ONLY
Returns:
str: The file names ready to be appended to a top-level toctree | Below is the the instruction that describes the task:
### Input:
An internal helper to generate all of the pages for a given package
Args:
package (module): The top-level package to document
showprivate (bool): A flag for whether or not to display private members
nested (bool): Foor internal use ONLY
Returns:
str: The file names ready to be appended to a top-level toctree
### Response:
def _MakePackagePages(self, package, showprivate=False, nested=False, showinh=False):
"""An internal helper to generate all of the pages for a given package
Args:
package (module): The top-level package to document
showprivate (bool): A flag for whether or not to display private members
nested (bool): Foor internal use ONLY
Returns:
str: The file names ready to be appended to a top-level toctree
"""
def checkNoNested(mod):
try:
all = mod.__all__
except AttributeError:
return False
mems = inspect.getmembers(mod, inspect.ismodule)
mems = [m for m in mems if m[0] in mod.__all__]
if len(mems) > 0:
return False
return True
# Get package module members
mods = inspect.getmembers(package, inspect.ismodule)
# Split into modules and sub-packages
nmods, pvt, npkgs = [], [], []
for mod in mods:
# Deal with private modules
if checkNoNested(mod[1]):
if mod[0][0] == '_': pvt.append(mod)
else: nmods.append(mod)
else: npkgs.append(mod)
if showprivate: nmods += pvt
# for each member that has a nested module
# recurse and keep track of index files for that package
files = []
ignore = []
for pkg in npkgs:
pt = '%s/%s/%s' % (self.path, package.__name__.replace('.', '/'), pkg[1].__name__.split('.')[-1])
if os.path.exists(pt): shutil.rmtree(pt)
os.makedirs(pt)
ignore += inspect.getmembers(pkg[1])
f = self._MakePackagePages(pkg[1], showprivate=showprivate, nested=True, showinh=showinh)
files.append(f.split(package.__name__.replace('.', '/')+'/')[1])
if nested:
try:
name = package.__displayname__
except AttributeError:
name = package.__name__
# Create index file here
index = r'''
%s
%s
.. toctree::
:maxdepth: 5
''' % (name, '*' * len(name))
# include sub packages first
index += '\n '.join(files)
# then include modules
index += '\n ' + self._ProduceContent(nmods, showprivate=showprivate, showinh=showinh)
findex = 'content/%s/index.rst' % (package.__name__.replace('.', '/'))
# Write the file
with open(findex, 'w') as f:
if package.__doc__: f.write(package.__doc__)
f.write(index)
# return filename for index file at package level
return '\n ' + findex
# Not nested: return all files
names = '\n %s/%s/' % ( self.path, package.__name__.replace('.', '/'))
nmods = [m for m in nmods if m not in ignore]
return names.join(self._ProduceContent(nmods, showprivate=showprivate, showinh=showinh).split('\n ')+files) |
def add(self, item, group_by=None):
"""General purpose class to group items by certain criteria."""
key = None
if not group_by:
group_by = self.group_by
if group_by:
# if group_by is a function, use it with item as argument
if hasattr(group_by, '__call__'):
key = group_by(item)
# if the item has attribute of group_by as string, use that as key
elif isinstance(group_by, str) and hasattr(item, group_by):
key = getattr(item, group_by)
else:
key = None
# try to match str(item) with regular expression
if isinstance(group_by, str):
match = re.search(group_by, str(item))
if match:
if len(match.groups()) > 0:
key = match.group(1)
else:
key = match.group()
self.groups.setdefault(key, list()).append(item) | General purpose class to group items by certain criteria. | Below is the the instruction that describes the task:
### Input:
General purpose class to group items by certain criteria.
### Response:
def add(self, item, group_by=None):
"""General purpose class to group items by certain criteria."""
key = None
if not group_by:
group_by = self.group_by
if group_by:
# if group_by is a function, use it with item as argument
if hasattr(group_by, '__call__'):
key = group_by(item)
# if the item has attribute of group_by as string, use that as key
elif isinstance(group_by, str) and hasattr(item, group_by):
key = getattr(item, group_by)
else:
key = None
# try to match str(item) with regular expression
if isinstance(group_by, str):
match = re.search(group_by, str(item))
if match:
if len(match.groups()) > 0:
key = match.group(1)
else:
key = match.group()
self.groups.setdefault(key, list()).append(item) |
def editTemplate(id, data):
"""
Edit an existing template.
Args:
`id`: ID of the template to edit
`data`: json data to be updated
Returns:
Dictionary containing the updated details of the template.
"""
conn = Qubole.agent()
return conn.put(Template.element_path(id), data) | Edit an existing template.
Args:
`id`: ID of the template to edit
`data`: json data to be updated
Returns:
Dictionary containing the updated details of the template. | Below is the the instruction that describes the task:
### Input:
Edit an existing template.
Args:
`id`: ID of the template to edit
`data`: json data to be updated
Returns:
Dictionary containing the updated details of the template.
### Response:
def editTemplate(id, data):
"""
Edit an existing template.
Args:
`id`: ID of the template to edit
`data`: json data to be updated
Returns:
Dictionary containing the updated details of the template.
"""
conn = Qubole.agent()
return conn.put(Template.element_path(id), data) |
def contour(self, *args, **kwargs):
"""Plot contours.
If a 3D or higher Data object is passed, a lower dimensional
channel can be plotted, provided the ``squeeze`` of the channel
has ``ndim==2`` and the first two axes do not span dimensions
other than those spanned by that channel.
Parameters
----------
data : 2D WrightTools.data.Data object
Data to plot.
channel : int or string (optional)
Channel index or name. Default is 0.
dynamic_range : boolean (optional)
Force plotting of all contours, overloading for major extent. Only applies to signed
data. Default is False.
autolabel : {'none', 'both', 'x', 'y'} (optional)
Parameterize application of labels directly from data object. Default is none.
xlabel : string (optional)
xlabel. Default is None.
ylabel : string (optional)
ylabel. Default is None.
**kwargs
matplotlib.axes.Axes.contour__ optional keyword arguments.
__ https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.contour.html
Returns
-------
matplotlib.contour.QuadContourSet
"""
args, kwargs = self._parse_plot_args(*args, **kwargs, plot_type="contour")
return super().contour(*args, **kwargs) | Plot contours.
If a 3D or higher Data object is passed, a lower dimensional
channel can be plotted, provided the ``squeeze`` of the channel
has ``ndim==2`` and the first two axes do not span dimensions
other than those spanned by that channel.
Parameters
----------
data : 2D WrightTools.data.Data object
Data to plot.
channel : int or string (optional)
Channel index or name. Default is 0.
dynamic_range : boolean (optional)
Force plotting of all contours, overloading for major extent. Only applies to signed
data. Default is False.
autolabel : {'none', 'both', 'x', 'y'} (optional)
Parameterize application of labels directly from data object. Default is none.
xlabel : string (optional)
xlabel. Default is None.
ylabel : string (optional)
ylabel. Default is None.
**kwargs
matplotlib.axes.Axes.contour__ optional keyword arguments.
__ https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.contour.html
Returns
-------
matplotlib.contour.QuadContourSet | Below is the the instruction that describes the task:
### Input:
Plot contours.
If a 3D or higher Data object is passed, a lower dimensional
channel can be plotted, provided the ``squeeze`` of the channel
has ``ndim==2`` and the first two axes do not span dimensions
other than those spanned by that channel.
Parameters
----------
data : 2D WrightTools.data.Data object
Data to plot.
channel : int or string (optional)
Channel index or name. Default is 0.
dynamic_range : boolean (optional)
Force plotting of all contours, overloading for major extent. Only applies to signed
data. Default is False.
autolabel : {'none', 'both', 'x', 'y'} (optional)
Parameterize application of labels directly from data object. Default is none.
xlabel : string (optional)
xlabel. Default is None.
ylabel : string (optional)
ylabel. Default is None.
**kwargs
matplotlib.axes.Axes.contour__ optional keyword arguments.
__ https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.contour.html
Returns
-------
matplotlib.contour.QuadContourSet
### Response:
def contour(self, *args, **kwargs):
"""Plot contours.
If a 3D or higher Data object is passed, a lower dimensional
channel can be plotted, provided the ``squeeze`` of the channel
has ``ndim==2`` and the first two axes do not span dimensions
other than those spanned by that channel.
Parameters
----------
data : 2D WrightTools.data.Data object
Data to plot.
channel : int or string (optional)
Channel index or name. Default is 0.
dynamic_range : boolean (optional)
Force plotting of all contours, overloading for major extent. Only applies to signed
data. Default is False.
autolabel : {'none', 'both', 'x', 'y'} (optional)
Parameterize application of labels directly from data object. Default is none.
xlabel : string (optional)
xlabel. Default is None.
ylabel : string (optional)
ylabel. Default is None.
**kwargs
matplotlib.axes.Axes.contour__ optional keyword arguments.
__ https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.contour.html
Returns
-------
matplotlib.contour.QuadContourSet
"""
args, kwargs = self._parse_plot_args(*args, **kwargs, plot_type="contour")
return super().contour(*args, **kwargs) |
def get_abs_file_path(self, rel_file_path,
quit_on_error=None, check_relative_to_path=True):
"""
Returns the absolute file path of the given [relative] file path
to either this script or to the config file.
May throw a RuntimeError if quit_on_error is True.
:param str rel_file_path: relative file path
:param bool quit_on_error: determines if the script may throw an
exception
:return str: absolute file path of the given relative file path
:raises RuntimeError: if the file path does not exist and
quit_on_error is True
"""
if self.cfg_file_path is not None and \
check_relative_to_path and \
not self.cfg.section('Files')['relative_to_start_processes_file']:
script_dir = os.path.dirname(self.cfg_file_path)
else:
# absolute dir this script is in
script_dir = os.path.dirname(__file__)
abs_file_path = os.path.abspath(
os.path.join(script_dir, rel_file_path))
if not os.path.exists(abs_file_path):
self.log.error(abs_file_path + " does not exist.")
if quit_on_error is True:
raise RuntimeError("Imported file not found. Quit.")
return abs_file_path | Returns the absolute file path of the given [relative] file path
to either this script or to the config file.
May throw a RuntimeError if quit_on_error is True.
:param str rel_file_path: relative file path
:param bool quit_on_error: determines if the script may throw an
exception
:return str: absolute file path of the given relative file path
:raises RuntimeError: if the file path does not exist and
quit_on_error is True | Below is the the instruction that describes the task:
### Input:
Returns the absolute file path of the given [relative] file path
to either this script or to the config file.
May throw a RuntimeError if quit_on_error is True.
:param str rel_file_path: relative file path
:param bool quit_on_error: determines if the script may throw an
exception
:return str: absolute file path of the given relative file path
:raises RuntimeError: if the file path does not exist and
quit_on_error is True
### Response:
def get_abs_file_path(self, rel_file_path,
quit_on_error=None, check_relative_to_path=True):
"""
Returns the absolute file path of the given [relative] file path
to either this script or to the config file.
May throw a RuntimeError if quit_on_error is True.
:param str rel_file_path: relative file path
:param bool quit_on_error: determines if the script may throw an
exception
:return str: absolute file path of the given relative file path
:raises RuntimeError: if the file path does not exist and
quit_on_error is True
"""
if self.cfg_file_path is not None and \
check_relative_to_path and \
not self.cfg.section('Files')['relative_to_start_processes_file']:
script_dir = os.path.dirname(self.cfg_file_path)
else:
# absolute dir this script is in
script_dir = os.path.dirname(__file__)
abs_file_path = os.path.abspath(
os.path.join(script_dir, rel_file_path))
if not os.path.exists(abs_file_path):
self.log.error(abs_file_path + " does not exist.")
if quit_on_error is True:
raise RuntimeError("Imported file not found. Quit.")
return abs_file_path |
def _wait_for_lock(self, lock_file):
"""
Just sleep until the lock_file does not exist or a lock_file-related dynamic recovery flag is spotted
:param str lock_file: Lock file to wait upon.
"""
sleeptime = .5
first_message_flag = False
dot_count = 0
recover_file = self._recoverfile_from_lockfile(lock_file)
while os.path.isfile(lock_file):
if first_message_flag is False:
self.timestamp("Waiting for file lock: " + lock_file)
self._set_status_flag(WAIT_FLAG)
first_message_flag = True
else:
sys.stdout.write(".")
dot_count = dot_count + 1
if dot_count % 60 == 0:
print("") # linefeed
# prevents the issue of pypier waiting for the lock file to be gone infinitely
# in case the recovery flag is sticked by other pipeline when it's interrupted
if os.path.isfile(recover_file):
sys.stdout.write(" Dynamic recovery flag found")
break
time.sleep(sleeptime)
sleeptime = min(sleeptime + 2.5, 60)
if first_message_flag:
self.timestamp("File unlocked.")
self._set_status_flag(RUN_FLAG) | Just sleep until the lock_file does not exist or a lock_file-related dynamic recovery flag is spotted
:param str lock_file: Lock file to wait upon. | Below is the the instruction that describes the task:
### Input:
Just sleep until the lock_file does not exist or a lock_file-related dynamic recovery flag is spotted
:param str lock_file: Lock file to wait upon.
### Response:
def _wait_for_lock(self, lock_file):
"""
Just sleep until the lock_file does not exist or a lock_file-related dynamic recovery flag is spotted
:param str lock_file: Lock file to wait upon.
"""
sleeptime = .5
first_message_flag = False
dot_count = 0
recover_file = self._recoverfile_from_lockfile(lock_file)
while os.path.isfile(lock_file):
if first_message_flag is False:
self.timestamp("Waiting for file lock: " + lock_file)
self._set_status_flag(WAIT_FLAG)
first_message_flag = True
else:
sys.stdout.write(".")
dot_count = dot_count + 1
if dot_count % 60 == 0:
print("") # linefeed
# prevents the issue of pypier waiting for the lock file to be gone infinitely
# in case the recovery flag is sticked by other pipeline when it's interrupted
if os.path.isfile(recover_file):
sys.stdout.write(" Dynamic recovery flag found")
break
time.sleep(sleeptime)
sleeptime = min(sleeptime + 2.5, 60)
if first_message_flag:
self.timestamp("File unlocked.")
self._set_status_flag(RUN_FLAG) |
def decode(tai64n):
"""
Convert TAI64N string to seconds since epoch.
Note that dates before 2013 may not decode accurately due to leap second
issues. If you need correct decoding for earlier dates you can try the
tai64n package available from PyPI (U{https://pypi.python.org/pypi/tai64n}).
@param tai64n: TAI64N-encoded time, as C{unicode}.
@return: Seconds since UTC Unix epoch as C{float}.
"""
seconds, nanoseconds = struct.unpack(_STRUCTURE, a2b_hex(tai64n[1:]))
seconds -= _OFFSET
return seconds + (nanoseconds / 1000000000.0) | Convert TAI64N string to seconds since epoch.
Note that dates before 2013 may not decode accurately due to leap second
issues. If you need correct decoding for earlier dates you can try the
tai64n package available from PyPI (U{https://pypi.python.org/pypi/tai64n}).
@param tai64n: TAI64N-encoded time, as C{unicode}.
@return: Seconds since UTC Unix epoch as C{float}. | Below is the the instruction that describes the task:
### Input:
Convert TAI64N string to seconds since epoch.
Note that dates before 2013 may not decode accurately due to leap second
issues. If you need correct decoding for earlier dates you can try the
tai64n package available from PyPI (U{https://pypi.python.org/pypi/tai64n}).
@param tai64n: TAI64N-encoded time, as C{unicode}.
@return: Seconds since UTC Unix epoch as C{float}.
### Response:
def decode(tai64n):
"""
Convert TAI64N string to seconds since epoch.
Note that dates before 2013 may not decode accurately due to leap second
issues. If you need correct decoding for earlier dates you can try the
tai64n package available from PyPI (U{https://pypi.python.org/pypi/tai64n}).
@param tai64n: TAI64N-encoded time, as C{unicode}.
@return: Seconds since UTC Unix epoch as C{float}.
"""
seconds, nanoseconds = struct.unpack(_STRUCTURE, a2b_hex(tai64n[1:]))
seconds -= _OFFSET
return seconds + (nanoseconds / 1000000000.0) |
def make_new_projection_values(self,width=160):
"""Run do_step function until the diagramms have diverged from each other.
Also determines how big the figure is going to be by calculating the borders
from new residue coordinates. These are then added some buffer space.
"""
#Make gap between residues bigger if plots have a lot of rings - each ring after the 4th
#give extra 12.5px space
start = timer()
if self.topology_data.ring_number>4:
width = width + (self.topology_data.ring_number-4)*12.5
values = [v for v in self.nearest_points_projection.values()]
xy_values = [v for v in self.nearest_points_coords.values()]
coeff_value = [v for v in self.b_for_all.values()]
energy = 100
while energy > 0.2:
values, energy = self.do_step(values,xy_values,coeff_value, width)
time = timer() - start
i=0
xy_values =[]
for residue in self.nearest_points_coords:
b = self.a.boundary.parallel_offset(self.topology_data.closest_atoms[residue][0][1]*50+50,"left",join_style=2).convex_hull
self.nearest_points_projection[residue] = values[i]
self.nearest_points[residue] = b.boundary.interpolate(self.nearest_points_projection[residue] % b.boundary.length)
self.nearest_points_coords[residue] = self.nearest_points[residue].x, self.nearest_points[residue].y
xy_values.append(self.nearest_points_coords[residue])
i+=1
values = [v for v in self.nearest_points_projection.values()]
if time>30:
self.molsize1 = self.molsize1 + self.molsize1 * 0.2 #Increase molecule svg size
self.molsize2 = self.molsize2 + self.molsize2 * 0.2
self.draw_molecule()
break
#Calculate the borders of the final image
max_x = max(v[0] for k,v in self.nearest_points_coords.items())
min_x = min(v[0] for k,v in self.nearest_points_coords.items())
min_y = min(v[1] for k,v in self.nearest_points_coords.items())
max_y = max(v[1] for k,v in self.nearest_points_coords.items())
if min_x<0:
self.x_dim =(max_x-min_x)+600 #600 acts as buffer
elif max_x<self.molsize1 and min_x<0: #In case all residues are grouped on one end of the molecule
self.x_dim = (self.molsize1-min_x)+600
elif max_x<self.molsize1 and min_x>0:
self.x_dim = self.molsize1+600
else:
self.x_dim = max_x+600
if min_y<0:
self.y_dim = (max_y-min_y)+400 #400 acts as buffer
elif max_y<self.molsize2 and min_y<0:
self.y_dim = (self.molsize2-min_y)+400
elif max_y<self.molsize2 and min_y>0:
self.y_dim = self.molsize2+400
else:
self.y_dim = max_y+400
end = timer()
print "Drawing molecule:"+str(end-start) | Run do_step function until the diagramms have diverged from each other.
Also determines how big the figure is going to be by calculating the borders
from new residue coordinates. These are then added some buffer space. | Below is the the instruction that describes the task:
### Input:
Run do_step function until the diagramms have diverged from each other.
Also determines how big the figure is going to be by calculating the borders
from new residue coordinates. These are then added some buffer space.
### Response:
def make_new_projection_values(self,width=160):
"""Run do_step function until the diagramms have diverged from each other.
Also determines how big the figure is going to be by calculating the borders
from new residue coordinates. These are then added some buffer space.
"""
#Make gap between residues bigger if plots have a lot of rings - each ring after the 4th
#give extra 12.5px space
start = timer()
if self.topology_data.ring_number>4:
width = width + (self.topology_data.ring_number-4)*12.5
values = [v for v in self.nearest_points_projection.values()]
xy_values = [v for v in self.nearest_points_coords.values()]
coeff_value = [v for v in self.b_for_all.values()]
energy = 100
while energy > 0.2:
values, energy = self.do_step(values,xy_values,coeff_value, width)
time = timer() - start
i=0
xy_values =[]
for residue in self.nearest_points_coords:
b = self.a.boundary.parallel_offset(self.topology_data.closest_atoms[residue][0][1]*50+50,"left",join_style=2).convex_hull
self.nearest_points_projection[residue] = values[i]
self.nearest_points[residue] = b.boundary.interpolate(self.nearest_points_projection[residue] % b.boundary.length)
self.nearest_points_coords[residue] = self.nearest_points[residue].x, self.nearest_points[residue].y
xy_values.append(self.nearest_points_coords[residue])
i+=1
values = [v for v in self.nearest_points_projection.values()]
if time>30:
self.molsize1 = self.molsize1 + self.molsize1 * 0.2 #Increase molecule svg size
self.molsize2 = self.molsize2 + self.molsize2 * 0.2
self.draw_molecule()
break
#Calculate the borders of the final image
max_x = max(v[0] for k,v in self.nearest_points_coords.items())
min_x = min(v[0] for k,v in self.nearest_points_coords.items())
min_y = min(v[1] for k,v in self.nearest_points_coords.items())
max_y = max(v[1] for k,v in self.nearest_points_coords.items())
if min_x<0:
self.x_dim =(max_x-min_x)+600 #600 acts as buffer
elif max_x<self.molsize1 and min_x<0: #In case all residues are grouped on one end of the molecule
self.x_dim = (self.molsize1-min_x)+600
elif max_x<self.molsize1 and min_x>0:
self.x_dim = self.molsize1+600
else:
self.x_dim = max_x+600
if min_y<0:
self.y_dim = (max_y-min_y)+400 #400 acts as buffer
elif max_y<self.molsize2 and min_y<0:
self.y_dim = (self.molsize2-min_y)+400
elif max_y<self.molsize2 and min_y>0:
self.y_dim = self.molsize2+400
else:
self.y_dim = max_y+400
end = timer()
print "Drawing molecule:"+str(end-start) |
def _join(verb):
"""
Join helper
"""
data = pd.merge(verb.x, verb.y, **verb.kwargs)
# Preserve x groups
if isinstance(verb.x, GroupedDataFrame):
data.plydata_groups = list(verb.x.plydata_groups)
return data | Join helper | Below is the the instruction that describes the task:
### Input:
Join helper
### Response:
def _join(verb):
"""
Join helper
"""
data = pd.merge(verb.x, verb.y, **verb.kwargs)
# Preserve x groups
if isinstance(verb.x, GroupedDataFrame):
data.plydata_groups = list(verb.x.plydata_groups)
return data |
def cli(env, identifier):
"""Get details about a security group."""
mgr = SoftLayer.NetworkManager(env.client)
secgroup = mgr.get_securitygroup(identifier)
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
table.add_row(['id', secgroup['id']])
table.add_row(['name', secgroup.get('name') or formatting.blank()])
table.add_row(['description',
secgroup.get('description') or formatting.blank()])
rule_table = formatting.Table(['id', 'remoteIp', 'remoteGroupId',
'direction', 'ethertype', 'portRangeMin',
'portRangeMax', 'protocol'])
for rule in secgroup.get('rules', []):
rg_id = rule.get('remoteGroup', {}).get('id') or formatting.blank()
port_min = rule.get('portRangeMin')
port_max = rule.get('portRangeMax')
if port_min is None:
port_min = formatting.blank()
if port_max is None:
port_max = formatting.blank()
rule_table.add_row([rule['id'],
rule.get('remoteIp') or formatting.blank(),
rule.get('remoteGroupId', rg_id),
rule['direction'],
rule.get('ethertype') or formatting.blank(),
port_min,
port_max,
rule.get('protocol') or formatting.blank()])
table.add_row(['rules', rule_table])
vsi_table = formatting.Table(['id', 'hostname', 'interface', 'ipAddress'])
for binding in secgroup.get('networkComponentBindings', []):
try:
vsi = binding['networkComponent']['guest']
vsi_id = vsi['id']
hostname = vsi['hostname']
interface = ('PRIVATE' if binding['networkComponent']['port'] == 0
else 'PUBLIC')
ip_address = (vsi['primaryBackendIpAddress']
if binding['networkComponent']['port'] == 0
else vsi['primaryIpAddress'])
except KeyError:
vsi_id = "N/A"
hostname = "Not enough permission to view"
interface = "N/A"
ip_address = "N/A"
vsi_table.add_row([vsi_id, hostname, interface, ip_address])
table.add_row(['servers', vsi_table])
env.fout(table) | Get details about a security group. | Below is the the instruction that describes the task:
### Input:
Get details about a security group.
### Response:
def cli(env, identifier):
"""Get details about a security group."""
mgr = SoftLayer.NetworkManager(env.client)
secgroup = mgr.get_securitygroup(identifier)
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
table.add_row(['id', secgroup['id']])
table.add_row(['name', secgroup.get('name') or formatting.blank()])
table.add_row(['description',
secgroup.get('description') or formatting.blank()])
rule_table = formatting.Table(['id', 'remoteIp', 'remoteGroupId',
'direction', 'ethertype', 'portRangeMin',
'portRangeMax', 'protocol'])
for rule in secgroup.get('rules', []):
rg_id = rule.get('remoteGroup', {}).get('id') or formatting.blank()
port_min = rule.get('portRangeMin')
port_max = rule.get('portRangeMax')
if port_min is None:
port_min = formatting.blank()
if port_max is None:
port_max = formatting.blank()
rule_table.add_row([rule['id'],
rule.get('remoteIp') or formatting.blank(),
rule.get('remoteGroupId', rg_id),
rule['direction'],
rule.get('ethertype') or formatting.blank(),
port_min,
port_max,
rule.get('protocol') or formatting.blank()])
table.add_row(['rules', rule_table])
vsi_table = formatting.Table(['id', 'hostname', 'interface', 'ipAddress'])
for binding in secgroup.get('networkComponentBindings', []):
try:
vsi = binding['networkComponent']['guest']
vsi_id = vsi['id']
hostname = vsi['hostname']
interface = ('PRIVATE' if binding['networkComponent']['port'] == 0
else 'PUBLIC')
ip_address = (vsi['primaryBackendIpAddress']
if binding['networkComponent']['port'] == 0
else vsi['primaryIpAddress'])
except KeyError:
vsi_id = "N/A"
hostname = "Not enough permission to view"
interface = "N/A"
ip_address = "N/A"
vsi_table.add_row([vsi_id, hostname, interface, ip_address])
table.add_row(['servers', vsi_table])
env.fout(table) |
def get_name(cls):
"""Get the serializer name.
The name can be defined on the Meta class or will be generated
automatically from the model name.
"""
if not hasattr(cls.Meta, 'name'):
class_name = getattr(cls.get_model(), '__name__', None)
setattr(
cls.Meta,
'name',
inflection.underscore(class_name) if class_name else None
)
return cls.Meta.name | Get the serializer name.
The name can be defined on the Meta class or will be generated
automatically from the model name. | Below is the the instruction that describes the task:
### Input:
Get the serializer name.
The name can be defined on the Meta class or will be generated
automatically from the model name.
### Response:
def get_name(cls):
"""Get the serializer name.
The name can be defined on the Meta class or will be generated
automatically from the model name.
"""
if not hasattr(cls.Meta, 'name'):
class_name = getattr(cls.get_model(), '__name__', None)
setattr(
cls.Meta,
'name',
inflection.underscore(class_name) if class_name else None
)
return cls.Meta.name |
def _walk_all_files():
"""
Optimize the files from the arugments list in two batches.
One for absolute paths which are probably outside the current
working directory tree and one for relative files.
"""
# Init records
record_dirs = set()
result_set = set()
for filename in Settings.paths:
# Record dirs to put timestamps in later
filename_full = os.path.abspath(filename)
if Settings.recurse and os.path.isdir(filename_full):
record_dirs.add(filename_full)
walk_after = timestamp.get_walk_after(filename_full)
results = walk_file(filename_full, walk_after, Settings.recurse)
result_set = result_set.union(results)
bytes_in = 0
bytes_out = 0
nag_about_gifs = False
errors = []
for result in result_set:
res = result.get()
if res.error:
errors += [(res.final_filename, res.error)]
continue
bytes_in += res.bytes_in
bytes_out += res.bytes_out
nag_about_gifs = nag_about_gifs or res.nag_about_gifs
return record_dirs, bytes_in, bytes_out, nag_about_gifs, errors | Optimize the files from the arugments list in two batches.
One for absolute paths which are probably outside the current
working directory tree and one for relative files. | Below is the the instruction that describes the task:
### Input:
Optimize the files from the arugments list in two batches.
One for absolute paths which are probably outside the current
working directory tree and one for relative files.
### Response:
def _walk_all_files():
"""
Optimize the files from the arugments list in two batches.
One for absolute paths which are probably outside the current
working directory tree and one for relative files.
"""
# Init records
record_dirs = set()
result_set = set()
for filename in Settings.paths:
# Record dirs to put timestamps in later
filename_full = os.path.abspath(filename)
if Settings.recurse and os.path.isdir(filename_full):
record_dirs.add(filename_full)
walk_after = timestamp.get_walk_after(filename_full)
results = walk_file(filename_full, walk_after, Settings.recurse)
result_set = result_set.union(results)
bytes_in = 0
bytes_out = 0
nag_about_gifs = False
errors = []
for result in result_set:
res = result.get()
if res.error:
errors += [(res.final_filename, res.error)]
continue
bytes_in += res.bytes_in
bytes_out += res.bytes_out
nag_about_gifs = nag_about_gifs or res.nag_about_gifs
return record_dirs, bytes_in, bytes_out, nag_about_gifs, errors |
def rgev(xi, mu=0, sigma=1, size=None):
"""
Random generalized extreme value (GEV) variates.
"""
q = np.random.uniform(size=size)
z = flib.gev_ppf(q, xi)
return z * sigma + mu | Random generalized extreme value (GEV) variates. | Below is the the instruction that describes the task:
### Input:
Random generalized extreme value (GEV) variates.
### Response:
def rgev(xi, mu=0, sigma=1, size=None):
"""
Random generalized extreme value (GEV) variates.
"""
q = np.random.uniform(size=size)
z = flib.gev_ppf(q, xi)
return z * sigma + mu |
def bake(self, P, key='curr', closed=False, itemsize=None):
"""
Given a path P, return the baked vertices as they should be copied in
the collection if the path has already been appended.
Example:
--------
paths.append(P)
P *= 2
paths['prev'][0] = bake(P,'prev')
paths['curr'][0] = bake(P,'curr')
paths['next'][0] = bake(P,'next')
"""
itemsize = itemsize or len(P)
itemcount = len(P) / itemsize # noqa
n = itemsize
if closed:
I = np.arange(n + 3)
if key == 'prev':
I -= 2
I[0], I[1], I[-1] = n - 1, n - 1, n - 1
elif key == 'next':
I[0], I[-3], I[-2], I[-1] = 1, 0, 1, 1
else:
I -= 1
I[0], I[-1], I[n + 1] = 0, 0, 0
else:
I = np.arange(n + 2)
if key == 'prev':
I -= 2
I[0], I[1], I[-1] = 0, 0, n - 2
elif key == 'next':
I[0], I[-1], I[-2] = 1, n - 1, n - 1
else:
I -= 1
I[0], I[-1] = 0, n - 1
I = np.repeat(I, 2)
return P[I] | Given a path P, return the baked vertices as they should be copied in
the collection if the path has already been appended.
Example:
--------
paths.append(P)
P *= 2
paths['prev'][0] = bake(P,'prev')
paths['curr'][0] = bake(P,'curr')
paths['next'][0] = bake(P,'next') | Below is the the instruction that describes the task:
### Input:
Given a path P, return the baked vertices as they should be copied in
the collection if the path has already been appended.
Example:
--------
paths.append(P)
P *= 2
paths['prev'][0] = bake(P,'prev')
paths['curr'][0] = bake(P,'curr')
paths['next'][0] = bake(P,'next')
### Response:
def bake(self, P, key='curr', closed=False, itemsize=None):
"""
Given a path P, return the baked vertices as they should be copied in
the collection if the path has already been appended.
Example:
--------
paths.append(P)
P *= 2
paths['prev'][0] = bake(P,'prev')
paths['curr'][0] = bake(P,'curr')
paths['next'][0] = bake(P,'next')
"""
itemsize = itemsize or len(P)
itemcount = len(P) / itemsize # noqa
n = itemsize
if closed:
I = np.arange(n + 3)
if key == 'prev':
I -= 2
I[0], I[1], I[-1] = n - 1, n - 1, n - 1
elif key == 'next':
I[0], I[-3], I[-2], I[-1] = 1, 0, 1, 1
else:
I -= 1
I[0], I[-1], I[n + 1] = 0, 0, 0
else:
I = np.arange(n + 2)
if key == 'prev':
I -= 2
I[0], I[1], I[-1] = 0, 0, n - 2
elif key == 'next':
I[0], I[-1], I[-2] = 1, n - 1, n - 1
else:
I -= 1
I[0], I[-1] = 0, n - 1
I = np.repeat(I, 2)
return P[I] |
def copy(self):
"""Create a copy of the animation."""
animation = AnimationList()
animation.set_frame_rate(self.frame_rate)
animation.__coords = self.__coords
animation.__horizontal_flip = self.__horizontal_flip
animation.__vertical_flip = self.__vertical_flip
animation.should_repeat = self.should_repeat
animation.draw_order = self.draw_order
animation.update_order = self.update_order
for image in self.images:
new_image = Sprite()
new_image.coords = image.coords
new_image.apply_texture(image.image)
animation.images.append(new_image)
return animation | Create a copy of the animation. | Below is the the instruction that describes the task:
### Input:
Create a copy of the animation.
### Response:
def copy(self):
"""Create a copy of the animation."""
animation = AnimationList()
animation.set_frame_rate(self.frame_rate)
animation.__coords = self.__coords
animation.__horizontal_flip = self.__horizontal_flip
animation.__vertical_flip = self.__vertical_flip
animation.should_repeat = self.should_repeat
animation.draw_order = self.draw_order
animation.update_order = self.update_order
for image in self.images:
new_image = Sprite()
new_image.coords = image.coords
new_image.apply_texture(image.image)
animation.images.append(new_image)
return animation |
def create_uptime_check_config(
self,
parent,
uptime_check_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a new uptime check configuration.
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.UptimeCheckServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `uptime_check_config`:
>>> uptime_check_config = {}
>>>
>>> response = client.create_uptime_check_config(parent, uptime_check_config)
Args:
parent (str): The project in which to create the uptime check. The format is
``projects/[PROJECT_ID]``.
uptime_check_config (Union[dict, ~google.cloud.monitoring_v3.types.UptimeCheckConfig]): The new uptime check configuration.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
# Wrap the transport method to add retry and timeout logic.
if "create_uptime_check_config" not in self._inner_api_calls:
self._inner_api_calls[
"create_uptime_check_config"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_uptime_check_config,
default_retry=self._method_configs["CreateUptimeCheckConfig"].retry,
default_timeout=self._method_configs["CreateUptimeCheckConfig"].timeout,
client_info=self._client_info,
)
request = uptime_service_pb2.CreateUptimeCheckConfigRequest(
parent=parent, uptime_check_config=uptime_check_config
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_uptime_check_config"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Creates a new uptime check configuration.
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.UptimeCheckServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `uptime_check_config`:
>>> uptime_check_config = {}
>>>
>>> response = client.create_uptime_check_config(parent, uptime_check_config)
Args:
parent (str): The project in which to create the uptime check. The format is
``projects/[PROJECT_ID]``.
uptime_check_config (Union[dict, ~google.cloud.monitoring_v3.types.UptimeCheckConfig]): The new uptime check configuration.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Creates a new uptime check configuration.
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.UptimeCheckServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `uptime_check_config`:
>>> uptime_check_config = {}
>>>
>>> response = client.create_uptime_check_config(parent, uptime_check_config)
Args:
parent (str): The project in which to create the uptime check. The format is
``projects/[PROJECT_ID]``.
uptime_check_config (Union[dict, ~google.cloud.monitoring_v3.types.UptimeCheckConfig]): The new uptime check configuration.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def create_uptime_check_config(
self,
parent,
uptime_check_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a new uptime check configuration.
Example:
>>> from google.cloud import monitoring_v3
>>>
>>> client = monitoring_v3.UptimeCheckServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `uptime_check_config`:
>>> uptime_check_config = {}
>>>
>>> response = client.create_uptime_check_config(parent, uptime_check_config)
Args:
parent (str): The project in which to create the uptime check. The format is
``projects/[PROJECT_ID]``.
uptime_check_config (Union[dict, ~google.cloud.monitoring_v3.types.UptimeCheckConfig]): The new uptime check configuration.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.monitoring_v3.types.UptimeCheckConfig` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
# Wrap the transport method to add retry and timeout logic.
if "create_uptime_check_config" not in self._inner_api_calls:
self._inner_api_calls[
"create_uptime_check_config"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_uptime_check_config,
default_retry=self._method_configs["CreateUptimeCheckConfig"].retry,
default_timeout=self._method_configs["CreateUptimeCheckConfig"].timeout,
client_info=self._client_info,
)
request = uptime_service_pb2.CreateUptimeCheckConfigRequest(
parent=parent, uptime_check_config=uptime_check_config
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_uptime_check_config"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
def configure(cls, name, config, prefix='depot.'):
"""Configures an application depot.
This configures the application wide depot from a settings dictionary.
The settings dictionary is usually loaded from an application configuration
file where all the depot options are specified with a given ``prefix``.
The default ``prefix`` is *depot.*, the minimum required setting
is ``depot.backend`` which specified the required backend for files storage.
Additional options depend on the choosen backend.
"""
if name in cls._depots:
raise RuntimeError('Depot %s has already been configured' % (name,))
if cls._default_depot is None:
cls._default_depot = name
cls._depots[name] = cls.from_config(config, prefix)
return cls._depots[name] | Configures an application depot.
This configures the application wide depot from a settings dictionary.
The settings dictionary is usually loaded from an application configuration
file where all the depot options are specified with a given ``prefix``.
The default ``prefix`` is *depot.*, the minimum required setting
is ``depot.backend`` which specified the required backend for files storage.
Additional options depend on the choosen backend. | Below is the the instruction that describes the task:
### Input:
Configures an application depot.
This configures the application wide depot from a settings dictionary.
The settings dictionary is usually loaded from an application configuration
file where all the depot options are specified with a given ``prefix``.
The default ``prefix`` is *depot.*, the minimum required setting
is ``depot.backend`` which specified the required backend for files storage.
Additional options depend on the choosen backend.
### Response:
def configure(cls, name, config, prefix='depot.'):
"""Configures an application depot.
This configures the application wide depot from a settings dictionary.
The settings dictionary is usually loaded from an application configuration
file where all the depot options are specified with a given ``prefix``.
The default ``prefix`` is *depot.*, the minimum required setting
is ``depot.backend`` which specified the required backend for files storage.
Additional options depend on the choosen backend.
"""
if name in cls._depots:
raise RuntimeError('Depot %s has already been configured' % (name,))
if cls._default_depot is None:
cls._default_depot = name
cls._depots[name] = cls.from_config(config, prefix)
return cls._depots[name] |
def set_time_rate(self, value):
'''
setter
Time rate.
'''
if isinstance(value, float) is False:
raise TypeError("The type of __time_rate must be float.")
if value <= 0.0:
raise ValueError("The value of __time_rate must be greater than 0.0")
self.__time_rate = value | setter
Time rate. | Below is the the instruction that describes the task:
### Input:
setter
Time rate.
### Response:
def set_time_rate(self, value):
'''
setter
Time rate.
'''
if isinstance(value, float) is False:
raise TypeError("The type of __time_rate must be float.")
if value <= 0.0:
raise ValueError("The value of __time_rate must be greater than 0.0")
self.__time_rate = value |
def init_app(application):
"""
Initialise an application
Set up whitenoise to handle static files.
"""
config = {k: v for k, v in application.config.items() if k in SCHEMA}
kwargs = {'autorefresh': application.debug}
kwargs.update((k[11:].lower(), v) for k, v in config.items())
instance = whitenoise.WhiteNoise(application.wsgi_app, **kwargs)
instance.add_files(application.static_folder, application.static_url_path)
if not hasattr(application, 'extensions'):
application.extensions = {}
application.extensions['whitenoise'] = instance
application.wsgi_app = instance | Initialise an application
Set up whitenoise to handle static files. | Below is the the instruction that describes the task:
### Input:
Initialise an application
Set up whitenoise to handle static files.
### Response:
def init_app(application):
"""
Initialise an application
Set up whitenoise to handle static files.
"""
config = {k: v for k, v in application.config.items() if k in SCHEMA}
kwargs = {'autorefresh': application.debug}
kwargs.update((k[11:].lower(), v) for k, v in config.items())
instance = whitenoise.WhiteNoise(application.wsgi_app, **kwargs)
instance.add_files(application.static_folder, application.static_url_path)
if not hasattr(application, 'extensions'):
application.extensions = {}
application.extensions['whitenoise'] = instance
application.wsgi_app = instance |
def parse_hex_color(value):
"""
Convert a CSS color in hexadecimal notation into its R, G, B components.
:param value: A CSS color in hexadecimal notation (a string like '#000000').
:return: A tuple with three integers (with values between 0 and 255)
corresponding to the R, G and B components of the color.
:raises: :exc:`~exceptions.ValueError` on values that can't be parsed.
"""
if value.startswith('#'):
value = value[1:]
if len(value) == 3:
return (
int(value[0] * 2, 16),
int(value[1] * 2, 16),
int(value[2] * 2, 16),
)
elif len(value) == 6:
return (
int(value[0:2], 16),
int(value[2:4], 16),
int(value[4:6], 16),
)
else:
raise ValueError() | Convert a CSS color in hexadecimal notation into its R, G, B components.
:param value: A CSS color in hexadecimal notation (a string like '#000000').
:return: A tuple with three integers (with values between 0 and 255)
corresponding to the R, G and B components of the color.
:raises: :exc:`~exceptions.ValueError` on values that can't be parsed. | Below is the the instruction that describes the task:
### Input:
Convert a CSS color in hexadecimal notation into its R, G, B components.
:param value: A CSS color in hexadecimal notation (a string like '#000000').
:return: A tuple with three integers (with values between 0 and 255)
corresponding to the R, G and B components of the color.
:raises: :exc:`~exceptions.ValueError` on values that can't be parsed.
### Response:
def parse_hex_color(value):
"""
Convert a CSS color in hexadecimal notation into its R, G, B components.
:param value: A CSS color in hexadecimal notation (a string like '#000000').
:return: A tuple with three integers (with values between 0 and 255)
corresponding to the R, G and B components of the color.
:raises: :exc:`~exceptions.ValueError` on values that can't be parsed.
"""
if value.startswith('#'):
value = value[1:]
if len(value) == 3:
return (
int(value[0] * 2, 16),
int(value[1] * 2, 16),
int(value[2] * 2, 16),
)
elif len(value) == 6:
return (
int(value[0:2], 16),
int(value[2:4], 16),
int(value[4:6], 16),
)
else:
raise ValueError() |
def consume(iterator, n):
"Advance the iterator n-steps ahead. If n is none, consume entirely."
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
collections.deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None) | Advance the iterator n-steps ahead. If n is none, consume entirely. | Below is the the instruction that describes the task:
### Input:
Advance the iterator n-steps ahead. If n is none, consume entirely.
### Response:
def consume(iterator, n):
"Advance the iterator n-steps ahead. If n is none, consume entirely."
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
collections.deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None) |
def mssql_get_pk_index_name(engine: Engine,
tablename: str,
schemaname: str = MSSQL_DEFAULT_SCHEMA) -> str:
"""
For Microsoft SQL Server specifically: fetch the name of the PK index
for the specified table (in the specified schema), or ``''`` if none is
found.
"""
# http://docs.sqlalchemy.org/en/latest/core/connections.html#sqlalchemy.engine.Connection.execute # noqa
# http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.expression.text # noqa
# http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.expression.TextClause.bindparams # noqa
# http://docs.sqlalchemy.org/en/latest/core/connections.html#sqlalchemy.engine.ResultProxy # noqa
query = text("""
SELECT
kc.name AS index_name
FROM
sys.key_constraints AS kc
INNER JOIN sys.tables AS ta ON ta.object_id = kc.parent_object_id
INNER JOIN sys.schemas AS s ON ta.schema_id = s.schema_id
WHERE
kc.[type] = 'PK'
AND ta.name = :tablename
AND s.name = :schemaname
""").bindparams(
tablename=tablename,
schemaname=schemaname,
)
with contextlib.closing(
engine.execute(query)) as result: # type: ResultProxy # noqa
row = result.fetchone()
return row[0] if row else '' | For Microsoft SQL Server specifically: fetch the name of the PK index
for the specified table (in the specified schema), or ``''`` if none is
found. | Below is the the instruction that describes the task:
### Input:
For Microsoft SQL Server specifically: fetch the name of the PK index
for the specified table (in the specified schema), or ``''`` if none is
found.
### Response:
def mssql_get_pk_index_name(engine: Engine,
tablename: str,
schemaname: str = MSSQL_DEFAULT_SCHEMA) -> str:
"""
For Microsoft SQL Server specifically: fetch the name of the PK index
for the specified table (in the specified schema), or ``''`` if none is
found.
"""
# http://docs.sqlalchemy.org/en/latest/core/connections.html#sqlalchemy.engine.Connection.execute # noqa
# http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.expression.text # noqa
# http://docs.sqlalchemy.org/en/latest/core/sqlelement.html#sqlalchemy.sql.expression.TextClause.bindparams # noqa
# http://docs.sqlalchemy.org/en/latest/core/connections.html#sqlalchemy.engine.ResultProxy # noqa
query = text("""
SELECT
kc.name AS index_name
FROM
sys.key_constraints AS kc
INNER JOIN sys.tables AS ta ON ta.object_id = kc.parent_object_id
INNER JOIN sys.schemas AS s ON ta.schema_id = s.schema_id
WHERE
kc.[type] = 'PK'
AND ta.name = :tablename
AND s.name = :schemaname
""").bindparams(
tablename=tablename,
schemaname=schemaname,
)
with contextlib.closing(
engine.execute(query)) as result: # type: ResultProxy # noqa
row = result.fetchone()
return row[0] if row else '' |
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file | Save the tokenizer vocabulary to a directory or file. | Below is the the instruction that describes the task:
### Input:
Save the tokenizer vocabulary to a directory or file.
### Response:
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file))
index = token_index
writer.write(token + u'\n')
index += 1
return vocab_file |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.