code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def observation_spec(self):
"""The observation spec for the SC2 environment.
It's worth noting that the image-like observations are in y,x/row,column
order which is different than the actions which are in x,y order. This is
due to conflicting conventions, and to facilitate printing of the images.
Returns:
The dict of observation names to their tensor shapes. Shapes with a 0 can
vary in length, for example the number of valid actions depends on which
units you have selected.
"""
obs_spec = named_array.NamedDict({
"action_result": (0,), # See error.proto: ActionResult.
"alerts": (0,), # See sc2api.proto: Alert.
"available_actions": (0,),
"build_queue": (0, len(UnitLayer)), # pytype: disable=wrong-arg-types
"cargo": (0, len(UnitLayer)), # pytype: disable=wrong-arg-types
"cargo_slots_available": (1,),
"control_groups": (10, 2),
"game_loop": (1,),
"last_actions": (0,),
"multi_select": (0, len(UnitLayer)), # pytype: disable=wrong-arg-types
"player": (len(Player),), # pytype: disable=wrong-arg-types
"score_cumulative": (len(ScoreCumulative),), # pytype: disable=wrong-arg-types
"score_by_category": (len(ScoreByCategory), len(ScoreCategories)), # pytype: disable=wrong-arg-types
"score_by_vital": (len(ScoreByVital), len(ScoreVitals)), # pytype: disable=wrong-arg-types
"single_select": (0, len(UnitLayer)), # Only (n, 7) for n in (0, 1). # pytype: disable=wrong-arg-types
})
aif = self._agent_interface_format
if aif.feature_dimensions:
obs_spec["feature_screen"] = (len(SCREEN_FEATURES),
aif.feature_dimensions.screen.y,
aif.feature_dimensions.screen.x)
obs_spec["feature_minimap"] = (len(MINIMAP_FEATURES),
aif.feature_dimensions.minimap.y,
aif.feature_dimensions.minimap.x)
if aif.rgb_dimensions:
obs_spec["rgb_screen"] = (aif.rgb_dimensions.screen.y,
aif.rgb_dimensions.screen.x,
3)
obs_spec["rgb_minimap"] = (aif.rgb_dimensions.minimap.y,
aif.rgb_dimensions.minimap.x,
3)
if aif.use_feature_units:
obs_spec["feature_units"] = (0, len(FeatureUnit)) # pytype: disable=wrong-arg-types
if aif.use_raw_units:
obs_spec["raw_units"] = (0, len(FeatureUnit))
if aif.use_unit_counts:
obs_spec["unit_counts"] = (0, len(UnitCounts))
if aif.use_camera_position:
obs_spec["camera_position"] = (2,)
return obs_spec | The observation spec for the SC2 environment.
It's worth noting that the image-like observations are in y,x/row,column
order which is different than the actions which are in x,y order. This is
due to conflicting conventions, and to facilitate printing of the images.
Returns:
The dict of observation names to their tensor shapes. Shapes with a 0 can
vary in length, for example the number of valid actions depends on which
units you have selected. | Below is the the instruction that describes the task:
### Input:
The observation spec for the SC2 environment.
It's worth noting that the image-like observations are in y,x/row,column
order which is different than the actions which are in x,y order. This is
due to conflicting conventions, and to facilitate printing of the images.
Returns:
The dict of observation names to their tensor shapes. Shapes with a 0 can
vary in length, for example the number of valid actions depends on which
units you have selected.
### Response:
def observation_spec(self):
"""The observation spec for the SC2 environment.
It's worth noting that the image-like observations are in y,x/row,column
order which is different than the actions which are in x,y order. This is
due to conflicting conventions, and to facilitate printing of the images.
Returns:
The dict of observation names to their tensor shapes. Shapes with a 0 can
vary in length, for example the number of valid actions depends on which
units you have selected.
"""
obs_spec = named_array.NamedDict({
"action_result": (0,), # See error.proto: ActionResult.
"alerts": (0,), # See sc2api.proto: Alert.
"available_actions": (0,),
"build_queue": (0, len(UnitLayer)), # pytype: disable=wrong-arg-types
"cargo": (0, len(UnitLayer)), # pytype: disable=wrong-arg-types
"cargo_slots_available": (1,),
"control_groups": (10, 2),
"game_loop": (1,),
"last_actions": (0,),
"multi_select": (0, len(UnitLayer)), # pytype: disable=wrong-arg-types
"player": (len(Player),), # pytype: disable=wrong-arg-types
"score_cumulative": (len(ScoreCumulative),), # pytype: disable=wrong-arg-types
"score_by_category": (len(ScoreByCategory), len(ScoreCategories)), # pytype: disable=wrong-arg-types
"score_by_vital": (len(ScoreByVital), len(ScoreVitals)), # pytype: disable=wrong-arg-types
"single_select": (0, len(UnitLayer)), # Only (n, 7) for n in (0, 1). # pytype: disable=wrong-arg-types
})
aif = self._agent_interface_format
if aif.feature_dimensions:
obs_spec["feature_screen"] = (len(SCREEN_FEATURES),
aif.feature_dimensions.screen.y,
aif.feature_dimensions.screen.x)
obs_spec["feature_minimap"] = (len(MINIMAP_FEATURES),
aif.feature_dimensions.minimap.y,
aif.feature_dimensions.minimap.x)
if aif.rgb_dimensions:
obs_spec["rgb_screen"] = (aif.rgb_dimensions.screen.y,
aif.rgb_dimensions.screen.x,
3)
obs_spec["rgb_minimap"] = (aif.rgb_dimensions.minimap.y,
aif.rgb_dimensions.minimap.x,
3)
if aif.use_feature_units:
obs_spec["feature_units"] = (0, len(FeatureUnit)) # pytype: disable=wrong-arg-types
if aif.use_raw_units:
obs_spec["raw_units"] = (0, len(FeatureUnit))
if aif.use_unit_counts:
obs_spec["unit_counts"] = (0, len(UnitCounts))
if aif.use_camera_position:
obs_spec["camera_position"] = (2,)
return obs_spec |
def log(ltype, method, page, user_agent):
"""Writes to the log a message in the following format::
"<datetime>: <exception> method <HTTP method> page <path> \
user agent <user_agent>"
"""
try:
f = open(settings.DJANGOSPAM_LOG, "a")
f.write("%s: %s method %s page %s user agent %s\n" % \
(datetime.datetime.now(), ltype, method, page, user_agent))
f.close()
except:
if settings.DJANGOSPAM_FAIL_ON_LOG:
exc_type, exc_value = sys.exc_info()[:2]
raise LogError(exc_type, exc_value) | Writes to the log a message in the following format::
"<datetime>: <exception> method <HTTP method> page <path> \
user agent <user_agent>" | Below is the the instruction that describes the task:
### Input:
Writes to the log a message in the following format::
"<datetime>: <exception> method <HTTP method> page <path> \
user agent <user_agent>"
### Response:
def log(ltype, method, page, user_agent):
"""Writes to the log a message in the following format::
"<datetime>: <exception> method <HTTP method> page <path> \
user agent <user_agent>"
"""
try:
f = open(settings.DJANGOSPAM_LOG, "a")
f.write("%s: %s method %s page %s user agent %s\n" % \
(datetime.datetime.now(), ltype, method, page, user_agent))
f.close()
except:
if settings.DJANGOSPAM_FAIL_ON_LOG:
exc_type, exc_value = sys.exc_info()[:2]
raise LogError(exc_type, exc_value) |
def query(self, parents=None):
""" Compose the query and generate SPARQL. """
# TODO: benchmark single-query strategy
q = Select([])
q = self.project(q, parent=True)
q = self.filter(q, parents=parents)
if self.parent is None:
subq = Select([self.var])
subq = self.filter(subq, parents=parents)
subq = subq.offset(self.node.offset)
subq = subq.limit(self.node.limit)
subq = subq.distinct()
# TODO: sorting.
subq = subq.order_by(desc(self.var))
q = q.where(subq)
# if hasattr(self.context, 'identifier'):
# q._where = graph(self.context.identifier, q._where)
log.debug("Compiled query: %r", q.compile())
return q | Compose the query and generate SPARQL. | Below is the the instruction that describes the task:
### Input:
Compose the query and generate SPARQL.
### Response:
def query(self, parents=None):
""" Compose the query and generate SPARQL. """
# TODO: benchmark single-query strategy
q = Select([])
q = self.project(q, parent=True)
q = self.filter(q, parents=parents)
if self.parent is None:
subq = Select([self.var])
subq = self.filter(subq, parents=parents)
subq = subq.offset(self.node.offset)
subq = subq.limit(self.node.limit)
subq = subq.distinct()
# TODO: sorting.
subq = subq.order_by(desc(self.var))
q = q.where(subq)
# if hasattr(self.context, 'identifier'):
# q._where = graph(self.context.identifier, q._where)
log.debug("Compiled query: %r", q.compile())
return q |
async def release_control(self):
"""Release control of QTM.
"""
cmd = "releasecontrol"
return await asyncio.wait_for(
self._protocol.send_command(cmd), timeout=self._timeout
) | Release control of QTM. | Below is the the instruction that describes the task:
### Input:
Release control of QTM.
### Response:
async def release_control(self):
"""Release control of QTM.
"""
cmd = "releasecontrol"
return await asyncio.wait_for(
self._protocol.send_command(cmd), timeout=self._timeout
) |
def __register_library(self, module_name: str, attr: str, fallback: str = None):
"""Inserts Interpreter Library of imports into sketch in a very non-consensual way"""
# Import the module Named in the string
try:
module = importlib.import_module(module_name)
# If module is not found it checks if an alternative is is listed
# If it is then it substitutes it, just so that the code can run
except ImportError:
if fallback is not None:
module = importlib.import_module(fallback)
self.__logger.warn(module_name + " not available: Replaced with " + fallback)
else:
self.__logger.warn(module_name + " not available: No Replacement Specified")
# Cram the module into the __sketch in the form of module -> "attr"
# AKA the same as `import module as attr`
if not attr in dir(self.__sketch):
setattr(self.__sketch, attr, module)
else:
self.__logger.warn(attr +" could not be imported as it's label is already used in the sketch") | Inserts Interpreter Library of imports into sketch in a very non-consensual way | Below is the the instruction that describes the task:
### Input:
Inserts Interpreter Library of imports into sketch in a very non-consensual way
### Response:
def __register_library(self, module_name: str, attr: str, fallback: str = None):
"""Inserts Interpreter Library of imports into sketch in a very non-consensual way"""
# Import the module Named in the string
try:
module = importlib.import_module(module_name)
# If module is not found it checks if an alternative is is listed
# If it is then it substitutes it, just so that the code can run
except ImportError:
if fallback is not None:
module = importlib.import_module(fallback)
self.__logger.warn(module_name + " not available: Replaced with " + fallback)
else:
self.__logger.warn(module_name + " not available: No Replacement Specified")
# Cram the module into the __sketch in the form of module -> "attr"
# AKA the same as `import module as attr`
if not attr in dir(self.__sketch):
setattr(self.__sketch, attr, module)
else:
self.__logger.warn(attr +" could not be imported as it's label is already used in the sketch") |
def read_auth_method_tuning(self, path):
"""Read the given auth path's configuration.
This endpoint requires sudo capability on the final path, but the same functionality can be achieved without
sudo via sys/mounts/auth/[auth-path]/tune.
Supported methods:
GET: /sys/auth/{path}/tune. Produces: 200 application/json
:param path: The path the method was mounted on. If not provided, defaults to the value of the "method_type"
argument.
:type path: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/sys/auth/{path}/tune'.format(
path=path,
)
response = self._adapter.get(
url=api_path,
)
return response.json() | Read the given auth path's configuration.
This endpoint requires sudo capability on the final path, but the same functionality can be achieved without
sudo via sys/mounts/auth/[auth-path]/tune.
Supported methods:
GET: /sys/auth/{path}/tune. Produces: 200 application/json
:param path: The path the method was mounted on. If not provided, defaults to the value of the "method_type"
argument.
:type path: str | unicode
:return: The JSON response of the request.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Read the given auth path's configuration.
This endpoint requires sudo capability on the final path, but the same functionality can be achieved without
sudo via sys/mounts/auth/[auth-path]/tune.
Supported methods:
GET: /sys/auth/{path}/tune. Produces: 200 application/json
:param path: The path the method was mounted on. If not provided, defaults to the value of the "method_type"
argument.
:type path: str | unicode
:return: The JSON response of the request.
:rtype: dict
### Response:
def read_auth_method_tuning(self, path):
"""Read the given auth path's configuration.
This endpoint requires sudo capability on the final path, but the same functionality can be achieved without
sudo via sys/mounts/auth/[auth-path]/tune.
Supported methods:
GET: /sys/auth/{path}/tune. Produces: 200 application/json
:param path: The path the method was mounted on. If not provided, defaults to the value of the "method_type"
argument.
:type path: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
api_path = '/v1/sys/auth/{path}/tune'.format(
path=path,
)
response = self._adapter.get(
url=api_path,
)
return response.json() |
def permissions(self):
"""
Return each permission role mapping for this Admin User. A permission
role will have 3 fields:
* Domain
* Role (Viewer, Operator, etc)
* Elements (Engines, Policies, or ACLs)
:return: permissions as list
:rtype: list(Permission)
"""
if 'permissions' in self.data:
_permissions = self.data['permissions']['permission']
return [Permission(**perm) for perm in _permissions]
return [] | Return each permission role mapping for this Admin User. A permission
role will have 3 fields:
* Domain
* Role (Viewer, Operator, etc)
* Elements (Engines, Policies, or ACLs)
:return: permissions as list
:rtype: list(Permission) | Below is the the instruction that describes the task:
### Input:
Return each permission role mapping for this Admin User. A permission
role will have 3 fields:
* Domain
* Role (Viewer, Operator, etc)
* Elements (Engines, Policies, or ACLs)
:return: permissions as list
:rtype: list(Permission)
### Response:
def permissions(self):
"""
Return each permission role mapping for this Admin User. A permission
role will have 3 fields:
* Domain
* Role (Viewer, Operator, etc)
* Elements (Engines, Policies, or ACLs)
:return: permissions as list
:rtype: list(Permission)
"""
if 'permissions' in self.data:
_permissions = self.data['permissions']['permission']
return [Permission(**perm) for perm in _permissions]
return [] |
def load_jinja_template(file_name):
"""
Loads the jinja2 HTML template from the given file.
Assumes that the file is in the same directory as the script.
"""
original_script_path = sys.argv[0]
#script_path = os.path.dirname(os.path.realpath(__file__))
script_dir = os.path.dirname(original_script_path)
# file_path = os.path.join(script_path, file_name)
# with open(file_path, 'r') as template_file:
# return template_file.read()
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(script_dir))
template = env.get_template(file_name)
return template | Loads the jinja2 HTML template from the given file.
Assumes that the file is in the same directory as the script. | Below is the the instruction that describes the task:
### Input:
Loads the jinja2 HTML template from the given file.
Assumes that the file is in the same directory as the script.
### Response:
def load_jinja_template(file_name):
"""
Loads the jinja2 HTML template from the given file.
Assumes that the file is in the same directory as the script.
"""
original_script_path = sys.argv[0]
#script_path = os.path.dirname(os.path.realpath(__file__))
script_dir = os.path.dirname(original_script_path)
# file_path = os.path.join(script_path, file_name)
# with open(file_path, 'r') as template_file:
# return template_file.read()
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(script_dir))
template = env.get_template(file_name)
return template |
def clear(self, fg, attr, bg):
"""
Clear the double-buffer.
This does not clear the screen buffer and so the next call to deltas will still show all changes.
:param fg: The foreground colour to use for the new buffer.
:param attr: The attribute value to use for the new buffer.
:param bg: The background colour to use for the new buffer.
"""
line = [(ord(u" "), fg, attr, bg, 1) for _ in range(self._width)]
self._double_buffer = [line[:] for _ in range(self._height)] | Clear the double-buffer.
This does not clear the screen buffer and so the next call to deltas will still show all changes.
:param fg: The foreground colour to use for the new buffer.
:param attr: The attribute value to use for the new buffer.
:param bg: The background colour to use for the new buffer. | Below is the the instruction that describes the task:
### Input:
Clear the double-buffer.
This does not clear the screen buffer and so the next call to deltas will still show all changes.
:param fg: The foreground colour to use for the new buffer.
:param attr: The attribute value to use for the new buffer.
:param bg: The background colour to use for the new buffer.
### Response:
def clear(self, fg, attr, bg):
"""
Clear the double-buffer.
This does not clear the screen buffer and so the next call to deltas will still show all changes.
:param fg: The foreground colour to use for the new buffer.
:param attr: The attribute value to use for the new buffer.
:param bg: The background colour to use for the new buffer.
"""
line = [(ord(u" "), fg, attr, bg, 1) for _ in range(self._width)]
self._double_buffer = [line[:] for _ in range(self._height)] |
def killServices(self, services, error=False):
"""
:param dict services: Maps service jobStoreIDs to the communication flags for the service
"""
for serviceJobStoreID in services:
serviceJob = services[serviceJobStoreID]
if error:
self.jobStore.deleteFile(serviceJob.errorJobStoreID)
self.jobStore.deleteFile(serviceJob.terminateJobStoreID) | :param dict services: Maps service jobStoreIDs to the communication flags for the service | Below is the the instruction that describes the task:
### Input:
:param dict services: Maps service jobStoreIDs to the communication flags for the service
### Response:
def killServices(self, services, error=False):
"""
:param dict services: Maps service jobStoreIDs to the communication flags for the service
"""
for serviceJobStoreID in services:
serviceJob = services[serviceJobStoreID]
if error:
self.jobStore.deleteFile(serviceJob.errorJobStoreID)
self.jobStore.deleteFile(serviceJob.terminateJobStoreID) |
def plot_feature(feature, cell):
'''Plot a feature
'''
fig = pl.figure()
ax = fig.add_subplot(111)
if cell is not None:
try:
histogram(cell, feature, ax)
except ValueError:
pass
stylize(ax, cell.name, feature)
return fig | Plot a feature | Below is the the instruction that describes the task:
### Input:
Plot a feature
### Response:
def plot_feature(feature, cell):
'''Plot a feature
'''
fig = pl.figure()
ax = fig.add_subplot(111)
if cell is not None:
try:
histogram(cell, feature, ax)
except ValueError:
pass
stylize(ax, cell.name, feature)
return fig |
def getCipherText(self, iv, key, plaintext):
"""
:type iv: bytearray
:type key: bytearray
:type plaintext: bytearray
"""
cipher = AESCipher(key, iv)
return cipher.encrypt(bytes(plaintext)) | :type iv: bytearray
:type key: bytearray
:type plaintext: bytearray | Below is the the instruction that describes the task:
### Input:
:type iv: bytearray
:type key: bytearray
:type plaintext: bytearray
### Response:
def getCipherText(self, iv, key, plaintext):
"""
:type iv: bytearray
:type key: bytearray
:type plaintext: bytearray
"""
cipher = AESCipher(key, iv)
return cipher.encrypt(bytes(plaintext)) |
def get_next(self):
"""Return the next set of objects in a list"""
url = self._get_link('next')
resource = self.object_type.get_resource_class(self.client)
resp = resource.perform_api_call(resource.REST_READ, url)
return List(resp, self.object_type, self.client) | Return the next set of objects in a list | Below is the the instruction that describes the task:
### Input:
Return the next set of objects in a list
### Response:
def get_next(self):
"""Return the next set of objects in a list"""
url = self._get_link('next')
resource = self.object_type.get_resource_class(self.client)
resp = resource.perform_api_call(resource.REST_READ, url)
return List(resp, self.object_type, self.client) |
def parse_location(loc, default_port):
'''
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
'''
parsed = urlparse(loc)
if ':' in parsed.netloc:
ip, port = parsed.netloc.split(':')
port = int(port)
else:
ip, port = parsed.netloc, default_port
return ip, port | loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888) | Below is the the instruction that describes the task:
### Input:
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
### Response:
def parse_location(loc, default_port):
'''
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
'''
parsed = urlparse(loc)
if ':' in parsed.netloc:
ip, port = parsed.netloc.split(':')
port = int(port)
else:
ip, port = parsed.netloc, default_port
return ip, port |
def GetIPAddresses(self):
"""IP addresses from all interfaces."""
result = []
filtered_ips = ["127.0.0.1", "::1", "fe80::1"]
for interface in self.interfaces:
for address in interface.addresses:
if address.human_readable_address not in filtered_ips:
result.append(Text(address.human_readable_address))
return sorted(result) | IP addresses from all interfaces. | Below is the the instruction that describes the task:
### Input:
IP addresses from all interfaces.
### Response:
def GetIPAddresses(self):
"""IP addresses from all interfaces."""
result = []
filtered_ips = ["127.0.0.1", "::1", "fe80::1"]
for interface in self.interfaces:
for address in interface.addresses:
if address.human_readable_address not in filtered_ips:
result.append(Text(address.human_readable_address))
return sorted(result) |
def load_udata_commands(self, ctx):
'''
Load udata commands from:
- `udata.commands.*` module
- known internal modules with commands
- plugins exporting a `udata.commands` entrypoint
'''
if self._udata_commands_loaded:
return
# Load all commands submodules
pattern = os.path.join(os.path.dirname(__file__), '[!_]*.py')
for filename in iglob(pattern):
module = os.path.splitext(os.path.basename(filename))[0]
try:
__import__('udata.commands.{0}'.format(module))
except Exception as e:
error('Unable to import {0}'.format(module), e)
# Load all core modules commands
for module in MODULES_WITH_COMMANDS:
try:
__import__('udata.{0}.commands'.format(module))
except Exception as e:
error('Unable to import {0}'.format(module), e)
# Load commands from entry points for enabled plugins
app = ctx.ensure_object(ScriptInfo).load_app()
entrypoints.get_enabled('udata.commands', app)
# Ensure loading happens once
self._udata_commands_loaded = False | Load udata commands from:
- `udata.commands.*` module
- known internal modules with commands
- plugins exporting a `udata.commands` entrypoint | Below is the the instruction that describes the task:
### Input:
Load udata commands from:
- `udata.commands.*` module
- known internal modules with commands
- plugins exporting a `udata.commands` entrypoint
### Response:
def load_udata_commands(self, ctx):
'''
Load udata commands from:
- `udata.commands.*` module
- known internal modules with commands
- plugins exporting a `udata.commands` entrypoint
'''
if self._udata_commands_loaded:
return
# Load all commands submodules
pattern = os.path.join(os.path.dirname(__file__), '[!_]*.py')
for filename in iglob(pattern):
module = os.path.splitext(os.path.basename(filename))[0]
try:
__import__('udata.commands.{0}'.format(module))
except Exception as e:
error('Unable to import {0}'.format(module), e)
# Load all core modules commands
for module in MODULES_WITH_COMMANDS:
try:
__import__('udata.{0}.commands'.format(module))
except Exception as e:
error('Unable to import {0}'.format(module), e)
# Load commands from entry points for enabled plugins
app = ctx.ensure_object(ScriptInfo).load_app()
entrypoints.get_enabled('udata.commands', app)
# Ensure loading happens once
self._udata_commands_loaded = False |
def parse_azimuth(azimuth):
"""
Parses an azimuth measurement in azimuth or quadrant format.
Parameters
-----------
azimuth : string or number
An azimuth measurement in degrees or a quadrant measurement of azimuth.
Returns
-------
azi : float
The azimuth in degrees clockwise from north (range: 0-360)
See Also
--------
parse_quadrant_measurement
parse_strike_dip
parse_plunge_bearing
"""
try:
azimuth = float(azimuth)
except ValueError:
if not azimuth[0].isalpha():
raise ValueError('Ambiguous azimuth: {}'.format(azimuth))
azimuth = parse_quadrant_measurement(azimuth)
return azimuth | Parses an azimuth measurement in azimuth or quadrant format.
Parameters
-----------
azimuth : string or number
An azimuth measurement in degrees or a quadrant measurement of azimuth.
Returns
-------
azi : float
The azimuth in degrees clockwise from north (range: 0-360)
See Also
--------
parse_quadrant_measurement
parse_strike_dip
parse_plunge_bearing | Below is the the instruction that describes the task:
### Input:
Parses an azimuth measurement in azimuth or quadrant format.
Parameters
-----------
azimuth : string or number
An azimuth measurement in degrees or a quadrant measurement of azimuth.
Returns
-------
azi : float
The azimuth in degrees clockwise from north (range: 0-360)
See Also
--------
parse_quadrant_measurement
parse_strike_dip
parse_plunge_bearing
### Response:
def parse_azimuth(azimuth):
"""
Parses an azimuth measurement in azimuth or quadrant format.
Parameters
-----------
azimuth : string or number
An azimuth measurement in degrees or a quadrant measurement of azimuth.
Returns
-------
azi : float
The azimuth in degrees clockwise from north (range: 0-360)
See Also
--------
parse_quadrant_measurement
parse_strike_dip
parse_plunge_bearing
"""
try:
azimuth = float(azimuth)
except ValueError:
if not azimuth[0].isalpha():
raise ValueError('Ambiguous azimuth: {}'.format(azimuth))
azimuth = parse_quadrant_measurement(azimuth)
return azimuth |
def prepare(self):
'''
Run the preparation sequence required to start a salt syndic minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
'''
super(Syndic, self).prepare()
try:
if self.config['verify_env']:
verify_env(
[
self.config['pki_dir'],
self.config['cachedir'],
self.config['sock_dir'],
self.config['extension_modules'],
],
self.config['user'],
permissive=self.config['permissive_pki_access'],
root_dir=self.config['root_dir'],
pki_dir=self.config['pki_dir'],
)
except OSError as error:
self.environment_failure(error)
self.setup_logfile_logger()
verify_log(self.config)
self.action_log_info('Setting up "{0}"'.format(self.config['id']))
# Late import so logging works correctly
import salt.minion
self.daemonize_if_required()
self.syndic = salt.minion.SyndicManager(self.config)
self.set_pidfile() | Run the preparation sequence required to start a salt syndic minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare() | Below is the the instruction that describes the task:
### Input:
Run the preparation sequence required to start a salt syndic minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
### Response:
def prepare(self):
'''
Run the preparation sequence required to start a salt syndic minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
'''
super(Syndic, self).prepare()
try:
if self.config['verify_env']:
verify_env(
[
self.config['pki_dir'],
self.config['cachedir'],
self.config['sock_dir'],
self.config['extension_modules'],
],
self.config['user'],
permissive=self.config['permissive_pki_access'],
root_dir=self.config['root_dir'],
pki_dir=self.config['pki_dir'],
)
except OSError as error:
self.environment_failure(error)
self.setup_logfile_logger()
verify_log(self.config)
self.action_log_info('Setting up "{0}"'.format(self.config['id']))
# Late import so logging works correctly
import salt.minion
self.daemonize_if_required()
self.syndic = salt.minion.SyndicManager(self.config)
self.set_pidfile() |
def wsproto_demo(host, port):
'''
Demonstrate wsproto:
0) Open TCP connection
1) Negotiate WebSocket opening handshake
2) Send a message and display response
3) Send ping and display pong
4) Negotiate WebSocket closing handshake
:param stream: a socket stream
'''
# 0) Open TCP connection
print('Connecting to {}:{}'.format(host, port))
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((host, port))
# 1) Negotiate WebSocket opening handshake
print('Opening WebSocket')
ws = WSConnection(ConnectionType.CLIENT)
net_send(ws.send(Request(host=host, target='server')), conn)
net_recv(ws, conn)
# events is a generator that yields websocket event objects. Usually you
# would say `for event in ws.events()`, but the synchronous nature of this
# client requires us to use next(event) instead so that we can interleave
# the network I/O. It will raise StopIteration when it runs out of events
# (i.e. needs more network data), but since this script is synchronous, we
# will explicitly resume the generator whenever we have new network data.
events = ws.events()
# Because this is a client WebSocket, wsproto has automatically queued up
# a handshake, and we need to send it and wait for a response.
event = next(events)
if isinstance(event, AcceptConnection):
print('WebSocket negotiation complete')
else:
raise Exception('Expected AcceptConnection event!')
# 2) Send a message and display response
message = "wsproto is great"
print('Sending message: {}'.format(message))
net_send(ws.send(Message(data=message)), conn)
net_recv(ws, conn)
event = next(events)
if isinstance(event, TextMessage):
print('Received message: {}'.format(event.data))
else:
raise Exception('Expected TextMessage event!')
# 3) Send ping and display pong
payload = b"table tennis"
print('Sending ping: {}'.format(payload))
net_send(ws.send(Ping(payload=payload)), conn)
net_recv(ws, conn)
event = next(events)
if isinstance(event, Pong):
print('Received pong: {}'.format(event.payload))
else:
raise Exception('Expected Pong event!')
# 4) Negotiate WebSocket closing handshake
print('Closing WebSocket')
net_send(ws.send(CloseConnection(code=1000, reason='sample reason')), conn)
# After sending the closing frame, we won't get any more events. The server
# should send a reply and then close the connection, so we need to receive
# twice:
net_recv(ws, conn)
conn.shutdown(socket.SHUT_WR)
net_recv(ws, conn) | Demonstrate wsproto:
0) Open TCP connection
1) Negotiate WebSocket opening handshake
2) Send a message and display response
3) Send ping and display pong
4) Negotiate WebSocket closing handshake
:param stream: a socket stream | Below is the the instruction that describes the task:
### Input:
Demonstrate wsproto:
0) Open TCP connection
1) Negotiate WebSocket opening handshake
2) Send a message and display response
3) Send ping and display pong
4) Negotiate WebSocket closing handshake
:param stream: a socket stream
### Response:
def wsproto_demo(host, port):
'''
Demonstrate wsproto:
0) Open TCP connection
1) Negotiate WebSocket opening handshake
2) Send a message and display response
3) Send ping and display pong
4) Negotiate WebSocket closing handshake
:param stream: a socket stream
'''
# 0) Open TCP connection
print('Connecting to {}:{}'.format(host, port))
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((host, port))
# 1) Negotiate WebSocket opening handshake
print('Opening WebSocket')
ws = WSConnection(ConnectionType.CLIENT)
net_send(ws.send(Request(host=host, target='server')), conn)
net_recv(ws, conn)
# events is a generator that yields websocket event objects. Usually you
# would say `for event in ws.events()`, but the synchronous nature of this
# client requires us to use next(event) instead so that we can interleave
# the network I/O. It will raise StopIteration when it runs out of events
# (i.e. needs more network data), but since this script is synchronous, we
# will explicitly resume the generator whenever we have new network data.
events = ws.events()
# Because this is a client WebSocket, wsproto has automatically queued up
# a handshake, and we need to send it and wait for a response.
event = next(events)
if isinstance(event, AcceptConnection):
print('WebSocket negotiation complete')
else:
raise Exception('Expected AcceptConnection event!')
# 2) Send a message and display response
message = "wsproto is great"
print('Sending message: {}'.format(message))
net_send(ws.send(Message(data=message)), conn)
net_recv(ws, conn)
event = next(events)
if isinstance(event, TextMessage):
print('Received message: {}'.format(event.data))
else:
raise Exception('Expected TextMessage event!')
# 3) Send ping and display pong
payload = b"table tennis"
print('Sending ping: {}'.format(payload))
net_send(ws.send(Ping(payload=payload)), conn)
net_recv(ws, conn)
event = next(events)
if isinstance(event, Pong):
print('Received pong: {}'.format(event.payload))
else:
raise Exception('Expected Pong event!')
# 4) Negotiate WebSocket closing handshake
print('Closing WebSocket')
net_send(ws.send(CloseConnection(code=1000, reason='sample reason')), conn)
# After sending the closing frame, we won't get any more events. The server
# should send a reply and then close the connection, so we need to receive
# twice:
net_recv(ws, conn)
conn.shutdown(socket.SHUT_WR)
net_recv(ws, conn) |
def get_angles(self, angle_id):
"""Get sun-satellite viewing angles"""
tic = datetime.now()
sunz40km = self._data["ang"][:, :, 0] * 1e-2
satz40km = self._data["ang"][:, :, 1] * 1e-2
azidiff40km = self._data["ang"][:, :, 2] * 1e-2
try:
from geotiepoints.interpolator import Interpolator
except ImportError:
logger.warning("Could not interpolate sun-sat angles, "
"python-geotiepoints missing.")
self.sunz, self.satz, self.azidiff = sunz40km, satz40km, azidiff40km
else:
cols40km = np.arange(24, 2048, 40)
cols1km = np.arange(2048)
lines = sunz40km.shape[0]
rows40km = np.arange(lines)
rows1km = np.arange(lines)
along_track_order = 1
cross_track_order = 3
satint = Interpolator(
[sunz40km, satz40km, azidiff40km], (rows40km, cols40km),
(rows1km, cols1km), along_track_order, cross_track_order)
self.sunz, self.satz, self.azidiff = satint.interpolate()
logger.debug("Interpolate sun-sat angles: time %s",
str(datetime.now() - tic))
return create_xarray(getattr(self, ANGLES[angle_id])) | Get sun-satellite viewing angles | Below is the the instruction that describes the task:
### Input:
Get sun-satellite viewing angles
### Response:
def get_angles(self, angle_id):
"""Get sun-satellite viewing angles"""
tic = datetime.now()
sunz40km = self._data["ang"][:, :, 0] * 1e-2
satz40km = self._data["ang"][:, :, 1] * 1e-2
azidiff40km = self._data["ang"][:, :, 2] * 1e-2
try:
from geotiepoints.interpolator import Interpolator
except ImportError:
logger.warning("Could not interpolate sun-sat angles, "
"python-geotiepoints missing.")
self.sunz, self.satz, self.azidiff = sunz40km, satz40km, azidiff40km
else:
cols40km = np.arange(24, 2048, 40)
cols1km = np.arange(2048)
lines = sunz40km.shape[0]
rows40km = np.arange(lines)
rows1km = np.arange(lines)
along_track_order = 1
cross_track_order = 3
satint = Interpolator(
[sunz40km, satz40km, azidiff40km], (rows40km, cols40km),
(rows1km, cols1km), along_track_order, cross_track_order)
self.sunz, self.satz, self.azidiff = satint.interpolate()
logger.debug("Interpolate sun-sat angles: time %s",
str(datetime.now() - tic))
return create_xarray(getattr(self, ANGLES[angle_id])) |
def _press_pwr_btn(self, pushType="Press"):
"""Simulates a physical press of the server power button.
:param pushType: Type of power button press to simulate
Supported values are: 'Press' and 'PressAndHold'
:raises: IloError, on an error from iLO.
"""
power_settings = {"Action": "PowerButton",
"Target": "/Oem/Hp",
"PushType": pushType}
systems_uri = "/rest/v1/Systems/1"
status, headers, response = self._rest_post(systems_uri, None,
power_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg) | Simulates a physical press of the server power button.
:param pushType: Type of power button press to simulate
Supported values are: 'Press' and 'PressAndHold'
:raises: IloError, on an error from iLO. | Below is the the instruction that describes the task:
### Input:
Simulates a physical press of the server power button.
:param pushType: Type of power button press to simulate
Supported values are: 'Press' and 'PressAndHold'
:raises: IloError, on an error from iLO.
### Response:
def _press_pwr_btn(self, pushType="Press"):
"""Simulates a physical press of the server power button.
:param pushType: Type of power button press to simulate
Supported values are: 'Press' and 'PressAndHold'
:raises: IloError, on an error from iLO.
"""
power_settings = {"Action": "PowerButton",
"Target": "/Oem/Hp",
"PushType": pushType}
systems_uri = "/rest/v1/Systems/1"
status, headers, response = self._rest_post(systems_uri, None,
power_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg) |
def route(self, path, routinemethod, container = None, host = None, vhost = None, method = [b'GET', b'HEAD']):
'''
Route specified path to a WSGI-styled routine factory
:param path: path to match, can be a regular expression
:param routinemethod: factory function routinemethod(env), env is an Environment object
see also utils.http.Environment
:param container: routine container
:param host: if specified, only response to request to specified host
:param vhost: if specified, only response to request to specified vhost.
If not specified, response to dispatcher default vhost.
:param method: if specified, response to specified methods
'''
self.routeevent(path, statichttp(container)(routinemethod), container, host, vhost, method) | Route specified path to a WSGI-styled routine factory
:param path: path to match, can be a regular expression
:param routinemethod: factory function routinemethod(env), env is an Environment object
see also utils.http.Environment
:param container: routine container
:param host: if specified, only response to request to specified host
:param vhost: if specified, only response to request to specified vhost.
If not specified, response to dispatcher default vhost.
:param method: if specified, response to specified methods | Below is the the instruction that describes the task:
### Input:
Route specified path to a WSGI-styled routine factory
:param path: path to match, can be a regular expression
:param routinemethod: factory function routinemethod(env), env is an Environment object
see also utils.http.Environment
:param container: routine container
:param host: if specified, only response to request to specified host
:param vhost: if specified, only response to request to specified vhost.
If not specified, response to dispatcher default vhost.
:param method: if specified, response to specified methods
### Response:
def route(self, path, routinemethod, container = None, host = None, vhost = None, method = [b'GET', b'HEAD']):
'''
Route specified path to a WSGI-styled routine factory
:param path: path to match, can be a regular expression
:param routinemethod: factory function routinemethod(env), env is an Environment object
see also utils.http.Environment
:param container: routine container
:param host: if specified, only response to request to specified host
:param vhost: if specified, only response to request to specified vhost.
If not specified, response to dispatcher default vhost.
:param method: if specified, response to specified methods
'''
self.routeevent(path, statichttp(container)(routinemethod), container, host, vhost, method) |
def model(method):
"""Use this to decorate methods that expect a model."""
def wrapper(self, *args, **kwargs):
if self.__model__ is None:
raise ValidationError(
'You cannot perform CRUD operations without selecting a '
'model first.',
)
return method(self, *args, **kwargs)
return wrapper | Use this to decorate methods that expect a model. | Below is the the instruction that describes the task:
### Input:
Use this to decorate methods that expect a model.
### Response:
def model(method):
"""Use this to decorate methods that expect a model."""
def wrapper(self, *args, **kwargs):
if self.__model__ is None:
raise ValidationError(
'You cannot perform CRUD operations without selecting a '
'model first.',
)
return method(self, *args, **kwargs)
return wrapper |
def set_meta_all(self, props):
"""Set metadata values for collection.
``props`` a dict with values for properties.
"""
delta_props = self.get_meta()
for key in delta_props.keys():
if key not in props:
delta_props[key] = None
delta_props.update(props)
self.set_meta(delta_props) | Set metadata values for collection.
``props`` a dict with values for properties. | Below is the the instruction that describes the task:
### Input:
Set metadata values for collection.
``props`` a dict with values for properties.
### Response:
def set_meta_all(self, props):
"""Set metadata values for collection.
``props`` a dict with values for properties.
"""
delta_props = self.get_meta()
for key in delta_props.keys():
if key not in props:
delta_props[key] = None
delta_props.update(props)
self.set_meta(delta_props) |
def cert_chain(certs):
"""Validate PEM-encoded X.509 certificate chain.
See `validate.request` for additional info.
Args:
certs: list. The certificate chain as a list of
cryptography.hazmat.backends.openssl.x509._Certificate certificates.
See `validate.retrieve` to create certs obj.
Returns:
bool: True if valid, False otherwise.
"""
if len(certs) < 2:
warnings.warn('Certificate chain contains < 3 certificates.')
return False
cert = certs[0]
today = datetime.datetime.today()
if not today > cert.not_valid_before:
warnings.warn('Certificate Not Before date is invalid.')
return False
if not today < cert.not_valid_after:
warnings.warn('Certificate Not After date is invalid.')
return False
oid_san = x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME
ext = cert.extensions.get_extension_for_oid(oid_san)
sans = ext.value.get_values_for_type(x509.DNSName)
if not 'echo-api.amazon.com' in sans:
return False
for i in range(len(certs) - 1):
if not certs[i].issuer == certs[i + 1].subject:
return False
return True | Validate PEM-encoded X.509 certificate chain.
See `validate.request` for additional info.
Args:
certs: list. The certificate chain as a list of
cryptography.hazmat.backends.openssl.x509._Certificate certificates.
See `validate.retrieve` to create certs obj.
Returns:
bool: True if valid, False otherwise. | Below is the the instruction that describes the task:
### Input:
Validate PEM-encoded X.509 certificate chain.
See `validate.request` for additional info.
Args:
certs: list. The certificate chain as a list of
cryptography.hazmat.backends.openssl.x509._Certificate certificates.
See `validate.retrieve` to create certs obj.
Returns:
bool: True if valid, False otherwise.
### Response:
def cert_chain(certs):
"""Validate PEM-encoded X.509 certificate chain.
See `validate.request` for additional info.
Args:
certs: list. The certificate chain as a list of
cryptography.hazmat.backends.openssl.x509._Certificate certificates.
See `validate.retrieve` to create certs obj.
Returns:
bool: True if valid, False otherwise.
"""
if len(certs) < 2:
warnings.warn('Certificate chain contains < 3 certificates.')
return False
cert = certs[0]
today = datetime.datetime.today()
if not today > cert.not_valid_before:
warnings.warn('Certificate Not Before date is invalid.')
return False
if not today < cert.not_valid_after:
warnings.warn('Certificate Not After date is invalid.')
return False
oid_san = x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME
ext = cert.extensions.get_extension_for_oid(oid_san)
sans = ext.value.get_values_for_type(x509.DNSName)
if not 'echo-api.amazon.com' in sans:
return False
for i in range(len(certs) - 1):
if not certs[i].issuer == certs[i + 1].subject:
return False
return True |
def default_handler(self, signum, frame):
""" Default handler, a generic callback method for signal processing"""
self.log.debug("Signal handler called with signal: {0}".format(signum))
# 1. If signal is HUP restart the python process
# 2. If signal is TERM, INT or QUIT we try to cleanup then exit with -1
# 3. If signal is STOP or TSTP we pause
# 4. If signal is CONT or USR1 we continue
# 5. If signal is INFO we print status
# 6. If signal is USR2 we we abort and then exit with -1
if signum in self.restart_signals:
self.set_handler(self.handled_signals, self.pseudo_handler)
self._cleanup()
os.execl('python', 'python', * sys.argv)
elif signum in self.abort_signals:
self.abort(signum)
elif signum in self.pause_signals:
self.pause(signum)
elif signum in self.resume_signals:
self.resume(signum)
elif signum in self.status_signals:
self.status(signum)
elif signum in self.error_signals:
self.log.error('Signal handler received error signal from an external process, aborting')
self.abort(signum)
else:
self.log.error("Unhandled signal received: {0}".format(signum))
raise | Default handler, a generic callback method for signal processing | Below is the the instruction that describes the task:
### Input:
Default handler, a generic callback method for signal processing
### Response:
def default_handler(self, signum, frame):
""" Default handler, a generic callback method for signal processing"""
self.log.debug("Signal handler called with signal: {0}".format(signum))
# 1. If signal is HUP restart the python process
# 2. If signal is TERM, INT or QUIT we try to cleanup then exit with -1
# 3. If signal is STOP or TSTP we pause
# 4. If signal is CONT or USR1 we continue
# 5. If signal is INFO we print status
# 6. If signal is USR2 we we abort and then exit with -1
if signum in self.restart_signals:
self.set_handler(self.handled_signals, self.pseudo_handler)
self._cleanup()
os.execl('python', 'python', * sys.argv)
elif signum in self.abort_signals:
self.abort(signum)
elif signum in self.pause_signals:
self.pause(signum)
elif signum in self.resume_signals:
self.resume(signum)
elif signum in self.status_signals:
self.status(signum)
elif signum in self.error_signals:
self.log.error('Signal handler received error signal from an external process, aborting')
self.abort(signum)
else:
self.log.error("Unhandled signal received: {0}".format(signum))
raise |
def output_schema(
self, what="", file_path=None, append=False, sort_keys=False
):
"""*Outputs JSON Schema to terminal or a file.*
By default, the schema is output for the last request and response.
The output can be limited further by:
- The property of the last instance, e.g. ``request`` or ``response``
- Any nested property that exists, similarly as for assertion keywords
Also variables and values that can be converted to JSON are accepted,
in which case the schema is generated for those instead.
*Options*
``file_path``: The JSON Schema is written to a file instead of terminal.
The file is created if it does not exist.
``append``: If true, the JSON Schema is appended to the given file
instead of truncating it first.
``sort_keys``: If true, the JSON Schema is sorted alphabetically by
property names before it is output.
*Examples*
| `Output Schema` | response | ${CURDIR}/response_schema.json | # Write a file to use with `Expect Response` |
| `Output Schema` | response body | ${CURDIR}/response_body_schema.json | # Write a file to use with `Expect Response Body` |
| `Output Schema` | $.email | # only the schema for one response body property |
| `Output Schema` | $..geo | # only the schema for the nested response body property |
"""
if isinstance(what, (STRING_TYPES)):
if what == "":
try:
json = self._last_instance_or_error()["schema"]
except IndexError:
raise RuntimeError(no_instances_error)
elif what.startswith(("request", "response", "$")):
self._last_instance_or_error()
matches = self._find_by_field(what)
if len(matches) > 1:
json = [found["schema"] for found in matches]
else:
json = matches[0]["schema"]
else:
try:
json = self._new_schema(self._input_json_as_string(what))
except ValueError:
json = self._new_schema(self._input_string(what))
else:
json = self._new_schema(self._input_json_from_non_string(what))
sort_keys = self._input_boolean(sort_keys)
if not file_path:
self.log_json(json, sort_keys=sort_keys)
else:
content = dumps(
json,
ensure_ascii=False,
indent=4,
separators=(",", ": "),
sort_keys=sort_keys,
)
write_mode = "a" if self._input_boolean(append) else "w"
try:
with open(
path.join(getcwd(), file_path), write_mode, encoding="utf-8"
) as file:
if IS_PYTHON_2:
content = unicode(content)
file.write(content)
except IOError as e:
raise RuntimeError(
"Error outputting to file '%s':\n%s" % (file_path, e)
)
return json | *Outputs JSON Schema to terminal or a file.*
By default, the schema is output for the last request and response.
The output can be limited further by:
- The property of the last instance, e.g. ``request`` or ``response``
- Any nested property that exists, similarly as for assertion keywords
Also variables and values that can be converted to JSON are accepted,
in which case the schema is generated for those instead.
*Options*
``file_path``: The JSON Schema is written to a file instead of terminal.
The file is created if it does not exist.
``append``: If true, the JSON Schema is appended to the given file
instead of truncating it first.
``sort_keys``: If true, the JSON Schema is sorted alphabetically by
property names before it is output.
*Examples*
| `Output Schema` | response | ${CURDIR}/response_schema.json | # Write a file to use with `Expect Response` |
| `Output Schema` | response body | ${CURDIR}/response_body_schema.json | # Write a file to use with `Expect Response Body` |
| `Output Schema` | $.email | # only the schema for one response body property |
| `Output Schema` | $..geo | # only the schema for the nested response body property | | Below is the the instruction that describes the task:
### Input:
*Outputs JSON Schema to terminal or a file.*
By default, the schema is output for the last request and response.
The output can be limited further by:
- The property of the last instance, e.g. ``request`` or ``response``
- Any nested property that exists, similarly as for assertion keywords
Also variables and values that can be converted to JSON are accepted,
in which case the schema is generated for those instead.
*Options*
``file_path``: The JSON Schema is written to a file instead of terminal.
The file is created if it does not exist.
``append``: If true, the JSON Schema is appended to the given file
instead of truncating it first.
``sort_keys``: If true, the JSON Schema is sorted alphabetically by
property names before it is output.
*Examples*
| `Output Schema` | response | ${CURDIR}/response_schema.json | # Write a file to use with `Expect Response` |
| `Output Schema` | response body | ${CURDIR}/response_body_schema.json | # Write a file to use with `Expect Response Body` |
| `Output Schema` | $.email | # only the schema for one response body property |
| `Output Schema` | $..geo | # only the schema for the nested response body property |
### Response:
def output_schema(
self, what="", file_path=None, append=False, sort_keys=False
):
"""*Outputs JSON Schema to terminal or a file.*
By default, the schema is output for the last request and response.
The output can be limited further by:
- The property of the last instance, e.g. ``request`` or ``response``
- Any nested property that exists, similarly as for assertion keywords
Also variables and values that can be converted to JSON are accepted,
in which case the schema is generated for those instead.
*Options*
``file_path``: The JSON Schema is written to a file instead of terminal.
The file is created if it does not exist.
``append``: If true, the JSON Schema is appended to the given file
instead of truncating it first.
``sort_keys``: If true, the JSON Schema is sorted alphabetically by
property names before it is output.
*Examples*
| `Output Schema` | response | ${CURDIR}/response_schema.json | # Write a file to use with `Expect Response` |
| `Output Schema` | response body | ${CURDIR}/response_body_schema.json | # Write a file to use with `Expect Response Body` |
| `Output Schema` | $.email | # only the schema for one response body property |
| `Output Schema` | $..geo | # only the schema for the nested response body property |
"""
if isinstance(what, (STRING_TYPES)):
if what == "":
try:
json = self._last_instance_or_error()["schema"]
except IndexError:
raise RuntimeError(no_instances_error)
elif what.startswith(("request", "response", "$")):
self._last_instance_or_error()
matches = self._find_by_field(what)
if len(matches) > 1:
json = [found["schema"] for found in matches]
else:
json = matches[0]["schema"]
else:
try:
json = self._new_schema(self._input_json_as_string(what))
except ValueError:
json = self._new_schema(self._input_string(what))
else:
json = self._new_schema(self._input_json_from_non_string(what))
sort_keys = self._input_boolean(sort_keys)
if not file_path:
self.log_json(json, sort_keys=sort_keys)
else:
content = dumps(
json,
ensure_ascii=False,
indent=4,
separators=(",", ": "),
sort_keys=sort_keys,
)
write_mode = "a" if self._input_boolean(append) else "w"
try:
with open(
path.join(getcwd(), file_path), write_mode, encoding="utf-8"
) as file:
if IS_PYTHON_2:
content = unicode(content)
file.write(content)
except IOError as e:
raise RuntimeError(
"Error outputting to file '%s':\n%s" % (file_path, e)
)
return json |
def guest_start(self, userid):
""""Power on z/VM instance."""
LOG.info("Begin to power on vm %s", userid)
self._smtclient.guest_start(userid)
LOG.info("Complete power on vm %s", userid) | Power on z/VM instance. | Below is the the instruction that describes the task:
### Input:
Power on z/VM instance.
### Response:
def guest_start(self, userid):
""""Power on z/VM instance."""
LOG.info("Begin to power on vm %s", userid)
self._smtclient.guest_start(userid)
LOG.info("Complete power on vm %s", userid) |
def _is_indirect_jump(_, sim_successors):
"""
Determine if this SimIRSB has an indirect jump as its exit
"""
if sim_successors.artifacts['irsb_direct_next']:
# It's a direct jump
return False
default_jumpkind = sim_successors.artifacts['irsb_default_jumpkind']
if default_jumpkind not in ('Ijk_Call', 'Ijk_Boring', 'Ijk_InvalICache'):
# It's something else, like a ret of a syscall... we don't care about it
return False
return True | Determine if this SimIRSB has an indirect jump as its exit | Below is the the instruction that describes the task:
### Input:
Determine if this SimIRSB has an indirect jump as its exit
### Response:
def _is_indirect_jump(_, sim_successors):
"""
Determine if this SimIRSB has an indirect jump as its exit
"""
if sim_successors.artifacts['irsb_direct_next']:
# It's a direct jump
return False
default_jumpkind = sim_successors.artifacts['irsb_default_jumpkind']
if default_jumpkind not in ('Ijk_Call', 'Ijk_Boring', 'Ijk_InvalICache'):
# It's something else, like a ret of a syscall... we don't care about it
return False
return True |
def heatmap_mpl(dfr, outfilename=None, title=None, params=None):
"""Returns matplotlib heatmap with cluster dendrograms.
- dfr - pandas DataFrame with relevant data
- outfilename - path to output file (indicates output format)
- params - a list of parameters for plotting: [colormap, vmin, vmax]
- labels - dictionary of alternative labels, keyed by default sequence
labels
- classes - dictionary of sequence classes, keyed by default sequence
labels
"""
# Layout figure grid and add title
# Set figure size by the number of rows in the dataframe
figsize = max(8, dfr.shape[0] * 0.175)
fig = plt.figure(figsize=(figsize, figsize))
# if title:
# fig.suptitle(title)
heatmap_gs = gridspec.GridSpec(
2, 2, wspace=0.0, hspace=0.0, width_ratios=[0.3, 1], height_ratios=[0.3, 1]
)
# Add column and row dendrograms/axes to figure
coldend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col")
rowdend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="row")
# Add heatmap axes to figure, with rows/columns as in the dendrograms
heatmap_axes = get_mpl_heatmap_axes(dfr, fig, heatmap_gs)
ax_map = heatmap_axes.imshow(
dfr.iloc[rowdend["dendrogram"]["leaves"], coldend["dendrogram"]["leaves"]],
interpolation="nearest",
cmap=params.cmap,
origin="lower",
vmin=params.vmin,
vmax=params.vmax,
aspect="auto",
)
# Are there class colourbars to add?
if params.classes is not None:
add_mpl_colorbar(dfr, fig, coldend, params, orientation="col")
add_mpl_colorbar(dfr, fig, rowdend, params, orientation="row")
# Add heatmap labels
add_mpl_labels(
heatmap_axes,
dfr.index[rowdend["dendrogram"]["leaves"]],
dfr.index[coldend["dendrogram"]["leaves"]],
params,
)
# Add colour scale
add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title)
# Return figure output, and write, if required
plt.subplots_adjust(top=0.85) # Leave room for title
# fig.set_tight_layout(True)
# We know that there is a UserWarning here about tight_layout and
# using the Agg renderer on OSX, so catch and ignore it, for cleanliness.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
heatmap_gs.tight_layout(fig, h_pad=0.1, w_pad=0.5)
if outfilename:
fig.savefig(outfilename)
return fig | Returns matplotlib heatmap with cluster dendrograms.
- dfr - pandas DataFrame with relevant data
- outfilename - path to output file (indicates output format)
- params - a list of parameters for plotting: [colormap, vmin, vmax]
- labels - dictionary of alternative labels, keyed by default sequence
labels
- classes - dictionary of sequence classes, keyed by default sequence
labels | Below is the the instruction that describes the task:
### Input:
Returns matplotlib heatmap with cluster dendrograms.
- dfr - pandas DataFrame with relevant data
- outfilename - path to output file (indicates output format)
- params - a list of parameters for plotting: [colormap, vmin, vmax]
- labels - dictionary of alternative labels, keyed by default sequence
labels
- classes - dictionary of sequence classes, keyed by default sequence
labels
### Response:
def heatmap_mpl(dfr, outfilename=None, title=None, params=None):
"""Returns matplotlib heatmap with cluster dendrograms.
- dfr - pandas DataFrame with relevant data
- outfilename - path to output file (indicates output format)
- params - a list of parameters for plotting: [colormap, vmin, vmax]
- labels - dictionary of alternative labels, keyed by default sequence
labels
- classes - dictionary of sequence classes, keyed by default sequence
labels
"""
# Layout figure grid and add title
# Set figure size by the number of rows in the dataframe
figsize = max(8, dfr.shape[0] * 0.175)
fig = plt.figure(figsize=(figsize, figsize))
# if title:
# fig.suptitle(title)
heatmap_gs = gridspec.GridSpec(
2, 2, wspace=0.0, hspace=0.0, width_ratios=[0.3, 1], height_ratios=[0.3, 1]
)
# Add column and row dendrograms/axes to figure
coldend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col")
rowdend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="row")
# Add heatmap axes to figure, with rows/columns as in the dendrograms
heatmap_axes = get_mpl_heatmap_axes(dfr, fig, heatmap_gs)
ax_map = heatmap_axes.imshow(
dfr.iloc[rowdend["dendrogram"]["leaves"], coldend["dendrogram"]["leaves"]],
interpolation="nearest",
cmap=params.cmap,
origin="lower",
vmin=params.vmin,
vmax=params.vmax,
aspect="auto",
)
# Are there class colourbars to add?
if params.classes is not None:
add_mpl_colorbar(dfr, fig, coldend, params, orientation="col")
add_mpl_colorbar(dfr, fig, rowdend, params, orientation="row")
# Add heatmap labels
add_mpl_labels(
heatmap_axes,
dfr.index[rowdend["dendrogram"]["leaves"]],
dfr.index[coldend["dendrogram"]["leaves"]],
params,
)
# Add colour scale
add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title)
# Return figure output, and write, if required
plt.subplots_adjust(top=0.85) # Leave room for title
# fig.set_tight_layout(True)
# We know that there is a UserWarning here about tight_layout and
# using the Agg renderer on OSX, so catch and ignore it, for cleanliness.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
heatmap_gs.tight_layout(fig, h_pad=0.1, w_pad=0.5)
if outfilename:
fig.savefig(outfilename)
return fig |
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
serv = _get_serv(ret=None)
sql = '''select first(id) as fid, first(full_ret) as fret
from returns
where fun = '{0}'
group by fun, id
'''.format(fun)
data = serv.query(sql)
ret = {}
if data:
points = data[0]['points']
for point in points:
ret[point[1]] = salt.utils.json.loads(point[2])
return ret | Return a dict of the last function called for all minions | Below is the the instruction that describes the task:
### Input:
Return a dict of the last function called for all minions
### Response:
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
serv = _get_serv(ret=None)
sql = '''select first(id) as fid, first(full_ret) as fret
from returns
where fun = '{0}'
group by fun, id
'''.format(fun)
data = serv.query(sql)
ret = {}
if data:
points = data[0]['points']
for point in points:
ret[point[1]] = salt.utils.json.loads(point[2])
return ret |
def storage_detail(self, name, timeout=10):
"""
Get the definition of the named storage plugin.
:param name: The assigned name in the storage plugin definition.
:param timeout: int
:return: pydrill.client.Result
"""
result = Result(*self.perform_request(**{
'method': 'GET',
'url': '/storage/{0}.json'.format(name),
'params': {
'request_timeout': timeout
}
}))
return result | Get the definition of the named storage plugin.
:param name: The assigned name in the storage plugin definition.
:param timeout: int
:return: pydrill.client.Result | Below is the the instruction that describes the task:
### Input:
Get the definition of the named storage plugin.
:param name: The assigned name in the storage plugin definition.
:param timeout: int
:return: pydrill.client.Result
### Response:
def storage_detail(self, name, timeout=10):
"""
Get the definition of the named storage plugin.
:param name: The assigned name in the storage plugin definition.
:param timeout: int
:return: pydrill.client.Result
"""
result = Result(*self.perform_request(**{
'method': 'GET',
'url': '/storage/{0}.json'.format(name),
'params': {
'request_timeout': timeout
}
}))
return result |
def nodes_touching_edge(edge_coord):
"""
Returns the two node coordinates which are on the given edge coordinate.
:return: list of 2 node coordinates which are on the given edge coordinate, list(int)
"""
a, b = hex_digit(edge_coord, 1), hex_digit(edge_coord, 2)
if a % 2 == 0 and b % 2 == 0:
return [coord_from_hex_digits(a, b + 1),
coord_from_hex_digits(a + 1, b)]
else:
return [coord_from_hex_digits(a, b),
coord_from_hex_digits(a + 1, b + 1)] | Returns the two node coordinates which are on the given edge coordinate.
:return: list of 2 node coordinates which are on the given edge coordinate, list(int) | Below is the the instruction that describes the task:
### Input:
Returns the two node coordinates which are on the given edge coordinate.
:return: list of 2 node coordinates which are on the given edge coordinate, list(int)
### Response:
def nodes_touching_edge(edge_coord):
"""
Returns the two node coordinates which are on the given edge coordinate.
:return: list of 2 node coordinates which are on the given edge coordinate, list(int)
"""
a, b = hex_digit(edge_coord, 1), hex_digit(edge_coord, 2)
if a % 2 == 0 and b % 2 == 0:
return [coord_from_hex_digits(a, b + 1),
coord_from_hex_digits(a + 1, b)]
else:
return [coord_from_hex_digits(a, b),
coord_from_hex_digits(a + 1, b + 1)] |
def open_application(self, remote_url, alias=None, **kwargs):
"""Opens a new application to given Appium server.
Capabilities of appium server, Android and iOS,
Please check https://github.com/appium/appium/blob/master/docs/en/writing-running-appium/server-args.md
| *Option* | *Man.* | *Description* |
| remote_url | Yes | Appium server url |
| alias | no | alias |
Examples:
| Open Application | http://localhost:4723/wd/hub | alias=Myapp1 | platformName=iOS | platformVersion=7.0 | deviceName='iPhone Simulator' | app=your.app |
| Open Application | http://localhost:4723/wd/hub | platformName=Android | platformVersion=4.2.2 | deviceName=192.168.56.101:5555 | app=${CURDIR}/demoapp/OrangeDemoApp.apk | appPackage=com.netease.qa.orangedemo | appActivity=MainActivity |
"""
desired_caps = kwargs
application = webdriver.Remote(str(remote_url), desired_caps)
self._debug('Opened application with session id %s' % application.session_id)
return self._cache.register(application, alias) | Opens a new application to given Appium server.
Capabilities of appium server, Android and iOS,
Please check https://github.com/appium/appium/blob/master/docs/en/writing-running-appium/server-args.md
| *Option* | *Man.* | *Description* |
| remote_url | Yes | Appium server url |
| alias | no | alias |
Examples:
| Open Application | http://localhost:4723/wd/hub | alias=Myapp1 | platformName=iOS | platformVersion=7.0 | deviceName='iPhone Simulator' | app=your.app |
| Open Application | http://localhost:4723/wd/hub | platformName=Android | platformVersion=4.2.2 | deviceName=192.168.56.101:5555 | app=${CURDIR}/demoapp/OrangeDemoApp.apk | appPackage=com.netease.qa.orangedemo | appActivity=MainActivity | | Below is the the instruction that describes the task:
### Input:
Opens a new application to given Appium server.
Capabilities of appium server, Android and iOS,
Please check https://github.com/appium/appium/blob/master/docs/en/writing-running-appium/server-args.md
| *Option* | *Man.* | *Description* |
| remote_url | Yes | Appium server url |
| alias | no | alias |
Examples:
| Open Application | http://localhost:4723/wd/hub | alias=Myapp1 | platformName=iOS | platformVersion=7.0 | deviceName='iPhone Simulator' | app=your.app |
| Open Application | http://localhost:4723/wd/hub | platformName=Android | platformVersion=4.2.2 | deviceName=192.168.56.101:5555 | app=${CURDIR}/demoapp/OrangeDemoApp.apk | appPackage=com.netease.qa.orangedemo | appActivity=MainActivity |
### Response:
def open_application(self, remote_url, alias=None, **kwargs):
"""Opens a new application to given Appium server.
Capabilities of appium server, Android and iOS,
Please check https://github.com/appium/appium/blob/master/docs/en/writing-running-appium/server-args.md
| *Option* | *Man.* | *Description* |
| remote_url | Yes | Appium server url |
| alias | no | alias |
Examples:
| Open Application | http://localhost:4723/wd/hub | alias=Myapp1 | platformName=iOS | platformVersion=7.0 | deviceName='iPhone Simulator' | app=your.app |
| Open Application | http://localhost:4723/wd/hub | platformName=Android | platformVersion=4.2.2 | deviceName=192.168.56.101:5555 | app=${CURDIR}/demoapp/OrangeDemoApp.apk | appPackage=com.netease.qa.orangedemo | appActivity=MainActivity |
"""
desired_caps = kwargs
application = webdriver.Remote(str(remote_url), desired_caps)
self._debug('Opened application with session id %s' % application.session_id)
return self._cache.register(application, alias) |
def memoize_with_ttl(ttl_secs=60 * 60 * 24):
"""Memoizes return values of the decorated function for a given time-to-live.
Similar to l0cache, but the cache persists for the duration of the process, unless clear_cache()
is called on the function or the time-to-live expires. By default, the time-to-live is set to
24 hours.
"""
error_msg = (
"Incorrect usage of qcore.caching.memoize_with_ttl: "
"ttl_secs must be a positive integer."
)
assert_is_instance(ttl_secs, six.integer_types, error_msg)
assert_gt(ttl_secs, 0, error_msg)
def cache_fun(fun):
argspec = inspect2.getfullargspec(fun)
arg_names = argspec.args + argspec.kwonlyargs
kwargs_defaults = get_kwargs_defaults(argspec)
def cache_key(args, kwargs):
return repr(get_args_tuple(args, kwargs, arg_names, kwargs_defaults))
@functools.wraps(fun)
def new_fun(*args, **kwargs):
k = cache_key(args, kwargs)
current_time = int(time.time())
# k is not in the cache; perform the function and cache the result.
if k not in new_fun.__cache or k not in new_fun.__cache_times:
new_fun.__cache[k] = fun(*args, **kwargs)
new_fun.__cache_times[k] = current_time
return new_fun.__cache[k]
# k is in the cache at this point. Check if the ttl has expired;
# if so, recompute the value and cache it.
cache_time = new_fun.__cache_times[k]
if current_time - cache_time > ttl_secs:
new_fun.__cache[k] = fun(*args, **kwargs)
new_fun.__cache_times[k] = current_time
# finally, return the cached result.
return new_fun.__cache[k]
def clear_cache():
"""Removes all cached values for this function."""
new_fun.__cache.clear()
new_fun.__cache_times.clear()
def dirty(*args, **kwargs):
"""Dirties the function for a given set of arguments."""
k = cache_key(args, kwargs)
new_fun.__cache.pop(k, None)
new_fun.__cache_times.pop(k, None)
new_fun.__cache = {}
new_fun.__cache_times = {}
new_fun.clear_cache = clear_cache
new_fun.dirty = dirty
return new_fun
return cache_fun | Memoizes return values of the decorated function for a given time-to-live.
Similar to l0cache, but the cache persists for the duration of the process, unless clear_cache()
is called on the function or the time-to-live expires. By default, the time-to-live is set to
24 hours. | Below is the the instruction that describes the task:
### Input:
Memoizes return values of the decorated function for a given time-to-live.
Similar to l0cache, but the cache persists for the duration of the process, unless clear_cache()
is called on the function or the time-to-live expires. By default, the time-to-live is set to
24 hours.
### Response:
def memoize_with_ttl(ttl_secs=60 * 60 * 24):
"""Memoizes return values of the decorated function for a given time-to-live.
Similar to l0cache, but the cache persists for the duration of the process, unless clear_cache()
is called on the function or the time-to-live expires. By default, the time-to-live is set to
24 hours.
"""
error_msg = (
"Incorrect usage of qcore.caching.memoize_with_ttl: "
"ttl_secs must be a positive integer."
)
assert_is_instance(ttl_secs, six.integer_types, error_msg)
assert_gt(ttl_secs, 0, error_msg)
def cache_fun(fun):
argspec = inspect2.getfullargspec(fun)
arg_names = argspec.args + argspec.kwonlyargs
kwargs_defaults = get_kwargs_defaults(argspec)
def cache_key(args, kwargs):
return repr(get_args_tuple(args, kwargs, arg_names, kwargs_defaults))
@functools.wraps(fun)
def new_fun(*args, **kwargs):
k = cache_key(args, kwargs)
current_time = int(time.time())
# k is not in the cache; perform the function and cache the result.
if k not in new_fun.__cache or k not in new_fun.__cache_times:
new_fun.__cache[k] = fun(*args, **kwargs)
new_fun.__cache_times[k] = current_time
return new_fun.__cache[k]
# k is in the cache at this point. Check if the ttl has expired;
# if so, recompute the value and cache it.
cache_time = new_fun.__cache_times[k]
if current_time - cache_time > ttl_secs:
new_fun.__cache[k] = fun(*args, **kwargs)
new_fun.__cache_times[k] = current_time
# finally, return the cached result.
return new_fun.__cache[k]
def clear_cache():
"""Removes all cached values for this function."""
new_fun.__cache.clear()
new_fun.__cache_times.clear()
def dirty(*args, **kwargs):
"""Dirties the function for a given set of arguments."""
k = cache_key(args, kwargs)
new_fun.__cache.pop(k, None)
new_fun.__cache_times.pop(k, None)
new_fun.__cache = {}
new_fun.__cache_times = {}
new_fun.clear_cache = clear_cache
new_fun.dirty = dirty
return new_fun
return cache_fun |
def visit_DictComp(self, node: ast.DictComp) -> Any:
"""Compile the dictionary comprehension as a function and call it."""
result = self._execute_comprehension(node=node)
for generator in node.generators:
self.visit(generator.iter)
self.recomputed_values[node] = result
return result | Compile the dictionary comprehension as a function and call it. | Below is the the instruction that describes the task:
### Input:
Compile the dictionary comprehension as a function and call it.
### Response:
def visit_DictComp(self, node: ast.DictComp) -> Any:
"""Compile the dictionary comprehension as a function and call it."""
result = self._execute_comprehension(node=node)
for generator in node.generators:
self.visit(generator.iter)
self.recomputed_values[node] = result
return result |
def forget_importer(name):
'''
forget_importer(name) yields True if an importer of type name was successfully forgotten from
the neuropythy importers list and false otherwise. This function must be called before an
importer can be replaced.
'''
global importers
name = name.lower()
if name in importers:
importers = importers.discard(name)
delattr(load, name)
return True
else:
return False | forget_importer(name) yields True if an importer of type name was successfully forgotten from
the neuropythy importers list and false otherwise. This function must be called before an
importer can be replaced. | Below is the the instruction that describes the task:
### Input:
forget_importer(name) yields True if an importer of type name was successfully forgotten from
the neuropythy importers list and false otherwise. This function must be called before an
importer can be replaced.
### Response:
def forget_importer(name):
'''
forget_importer(name) yields True if an importer of type name was successfully forgotten from
the neuropythy importers list and false otherwise. This function must be called before an
importer can be replaced.
'''
global importers
name = name.lower()
if name in importers:
importers = importers.discard(name)
delattr(load, name)
return True
else:
return False |
def _config_root_Linux():
"""
Use freedesktop.org Base Dir Specfication to determine config
location.
"""
_check_old_config_root()
fallback = os.path.expanduser('~/.local/share')
key = 'XDG_CONFIG_HOME'
root = os.environ.get(key, None) or fallback
return os.path.join(root, 'python_keyring') | Use freedesktop.org Base Dir Specfication to determine config
location. | Below is the the instruction that describes the task:
### Input:
Use freedesktop.org Base Dir Specfication to determine config
location.
### Response:
def _config_root_Linux():
"""
Use freedesktop.org Base Dir Specfication to determine config
location.
"""
_check_old_config_root()
fallback = os.path.expanduser('~/.local/share')
key = 'XDG_CONFIG_HOME'
root = os.environ.get(key, None) or fallback
return os.path.join(root, 'python_keyring') |
def load_config(self):
"""Loads the configuration."""
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
for key, value in iteritems(config):
self.cfg.set(key.lower(), value) | Loads the configuration. | Below is the the instruction that describes the task:
### Input:
Loads the configuration.
### Response:
def load_config(self):
"""Loads the configuration."""
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
for key, value in iteritems(config):
self.cfg.set(key.lower(), value) |
def properties(self) -> dict:
"""
Returns the properties of the current node in the iteration.
"""
if isinstance(self._last_node, dict):
return self._last_node.keys()
else:
return {} | Returns the properties of the current node in the iteration. | Below is the the instruction that describes the task:
### Input:
Returns the properties of the current node in the iteration.
### Response:
def properties(self) -> dict:
"""
Returns the properties of the current node in the iteration.
"""
if isinstance(self._last_node, dict):
return self._last_node.keys()
else:
return {} |
def __get_bundle_data(self, bundleId):
"""``GET /{serviceInstanceId}/v2/bundles/{bundleId}``
Gets the bundle's information.
"""
url = self.__get_base_bundle_url() + '/' + bundleId
response = self.__perform_rest_call(requestURL=url)
if not response:
return None
bundleData = response.get(self.__RESPONSE_BUNDLE_KEY)
return bundleData | ``GET /{serviceInstanceId}/v2/bundles/{bundleId}``
Gets the bundle's information. | Below is the the instruction that describes the task:
### Input:
``GET /{serviceInstanceId}/v2/bundles/{bundleId}``
Gets the bundle's information.
### Response:
def __get_bundle_data(self, bundleId):
"""``GET /{serviceInstanceId}/v2/bundles/{bundleId}``
Gets the bundle's information.
"""
url = self.__get_base_bundle_url() + '/' + bundleId
response = self.__perform_rest_call(requestURL=url)
if not response:
return None
bundleData = response.get(self.__RESPONSE_BUNDLE_KEY)
return bundleData |
def validate(self, data): # pylint: disable=arguments-differ
"""
Validate that at least one of the user identifier fields has been passed in.
"""
lms_user_id = data.get('lms_user_id')
tpa_user_id = data.get('tpa_user_id')
user_email = data.get('user_email')
if not lms_user_id and not tpa_user_id and not user_email:
raise serializers.ValidationError(
'At least one of the following fields must be specified and map to an EnterpriseCustomerUser: '
'lms_user_id, tpa_user_id, user_email'
)
return data | Validate that at least one of the user identifier fields has been passed in. | Below is the the instruction that describes the task:
### Input:
Validate that at least one of the user identifier fields has been passed in.
### Response:
def validate(self, data): # pylint: disable=arguments-differ
"""
Validate that at least one of the user identifier fields has been passed in.
"""
lms_user_id = data.get('lms_user_id')
tpa_user_id = data.get('tpa_user_id')
user_email = data.get('user_email')
if not lms_user_id and not tpa_user_id and not user_email:
raise serializers.ValidationError(
'At least one of the following fields must be specified and map to an EnterpriseCustomerUser: '
'lms_user_id, tpa_user_id, user_email'
)
return data |
def print_traceback(self, always_print=False):
"""
Prints the traceback to console - if there is any traceback, otherwise does nothing.
:param always_print: print the traceback, even if there is nothing in the buffer (default: false)
"""
if self._exception or always_print:
self.__echo.critical("--{ TRACEBACK }" + "-" * 100)
self.__format_lines_error(self.traceback)
self.__echo.critical("---------------" + "-" * 100) | Prints the traceback to console - if there is any traceback, otherwise does nothing.
:param always_print: print the traceback, even if there is nothing in the buffer (default: false) | Below is the the instruction that describes the task:
### Input:
Prints the traceback to console - if there is any traceback, otherwise does nothing.
:param always_print: print the traceback, even if there is nothing in the buffer (default: false)
### Response:
def print_traceback(self, always_print=False):
"""
Prints the traceback to console - if there is any traceback, otherwise does nothing.
:param always_print: print the traceback, even if there is nothing in the buffer (default: false)
"""
if self._exception or always_print:
self.__echo.critical("--{ TRACEBACK }" + "-" * 100)
self.__format_lines_error(self.traceback)
self.__echo.critical("---------------" + "-" * 100) |
def Zoom(self, zoomLevel: float, waitTime: float = OPERATION_WAIT_TIME) -> bool:
"""
Call IUIAutomationTransformPattern2::Zoom.
Zoom the viewport of the control.
zoomLevel: float for int.
waitTime: float.
Return bool, True if succeed otherwise False.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern2-zoom
"""
ret = self.pattern.Zoom(zoomLevel) == S_OK
time.sleep(waitTime)
return ret | Call IUIAutomationTransformPattern2::Zoom.
Zoom the viewport of the control.
zoomLevel: float for int.
waitTime: float.
Return bool, True if succeed otherwise False.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern2-zoom | Below is the the instruction that describes the task:
### Input:
Call IUIAutomationTransformPattern2::Zoom.
Zoom the viewport of the control.
zoomLevel: float for int.
waitTime: float.
Return bool, True if succeed otherwise False.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern2-zoom
### Response:
def Zoom(self, zoomLevel: float, waitTime: float = OPERATION_WAIT_TIME) -> bool:
"""
Call IUIAutomationTransformPattern2::Zoom.
Zoom the viewport of the control.
zoomLevel: float for int.
waitTime: float.
Return bool, True if succeed otherwise False.
Refer https://docs.microsoft.com/en-us/windows/desktop/api/uiautomationclient/nf-uiautomationclient-iuiautomationtransformpattern2-zoom
"""
ret = self.pattern.Zoom(zoomLevel) == S_OK
time.sleep(waitTime)
return ret |
def _getCandidateParticleAndSwarm (self, exhaustedSwarmId=None):
"""Find or create a candidate particle to produce a new model.
At any one time, there is an active set of swarms in the current sprint, where
each swarm in the sprint represents a particular combination of fields.
Ideally, we should try to balance the number of models we have evaluated for
each swarm at any time.
This method will see how many models have been evaluated for each active
swarm in the current active sprint(s) and then try and choose a particle
from the least represented swarm in the first possible active sprint, with
the following constraints/rules:
for each active sprint:
for each active swarm (preference to those with least# of models so far):
1.) The particle will be created from new (generation #0) if there are not
already self._minParticlesPerSwarm particles in the swarm.
2.) Find the first gen that has a completed particle and evolve that
particle to the next generation.
3.) If we got to here, we know that we have satisfied the min# of
particles for the swarm, and they are all currently running (probably at
various generation indexes). Go onto the next swarm
If we couldn't find a swarm to allocate a particle in, go onto the next
sprint and start allocating particles there....
Parameters:
----------------------------------------------------------------
exhaustedSwarmId: If not None, force a change to the current set of active
swarms by marking this swarm as either 'completing' or
'completed'. If there are still models being evaluaed in
it, mark it as 'completing', else 'completed. This is
used in situations where we can't find any new unique
models to create in this swarm. In these situations, we
force an update to the hypersearch state so no other
worker wastes time try to use this swarm.
retval: (exit, particle, swarm)
exit: If true, this worker is ready to exit (particle and
swarm will be None)
particle: Which particle to run
swarm: which swarm the particle is in
NOTE: When particle and swarm are None and exit is False, it
means that we need to wait for one or more other worker(s) to
finish their respective models before we can pick a particle
to run. This will generally only happen when speculativeParticles
is set to False.
"""
# Cancel search?
jobCancel = self._cjDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._jobCancelled = True
# Did a worker cancel the job because of an error?
(workerCmpReason, workerCmpMsg) = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason', 'workerCompletionMsg'])
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self.logger.info("Exiting due to job being cancelled")
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg="Job was cancelled"),
useConnectionID=False, ignoreUnchanged=True)
else:
self.logger.error("Exiting because some worker set the "
"workerCompletionReason to %s. WorkerCompletionMsg: %s" %
(workerCmpReason, workerCmpMsg))
return (True, None, None)
# Perform periodic updates on the Hypersearch state.
if self._hsState is not None:
priorActiveSwarms = self._hsState.getActiveSwarms()
else:
priorActiveSwarms = None
# Update the HypersearchState, checking for matured swarms, and marking
# the passed in swarm as exhausted, if any
self._hsStatePeriodicUpdate(exhaustedSwarmId=exhaustedSwarmId)
# The above call may have modified self._hsState['activeSwarmIds']
# Log the current set of active swarms
activeSwarms = self._hsState.getActiveSwarms()
if activeSwarms != priorActiveSwarms:
self.logger.info("Active swarms changed to %s (from %s)" % (activeSwarms,
priorActiveSwarms))
self.logger.debug("Active swarms: %s" % (activeSwarms))
# If too many model errors were detected, exit
totalCmpModels = self._resultsDB.getNumCompletedModels()
if totalCmpModels > 5:
numErrs = self._resultsDB.getNumErrModels()
if (float(numErrs) / totalCmpModels) > self._maxPctErrModels:
# Get one of the errors
errModelIds = self._resultsDB.getErrModelIds()
resInfo = self._cjDAO.modelsGetResultAndStatus([errModelIds[0]])[0]
modelErrMsg = resInfo.completionMsg
cmpMsg = "%s: Exiting due to receiving too many models failing" \
" from exceptions (%d out of %d). \nModel Exception: %s" % \
(ErrorCodes.tooManyModelErrs, numErrs, totalCmpModels,
modelErrMsg)
self.logger.error(cmpMsg)
# Cancel the entire job now, if it has not already been cancelled
workerCmpReason = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason'])[0]
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self._cjDAO.jobSetFields(
self._jobID,
fields=dict(
cancel=True,
workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,
workerCompletionMsg = cmpMsg),
useConnectionID=False,
ignoreUnchanged=True)
return (True, None, None)
# If HsState thinks the search is over, exit. It is seeing if the results
# on the sprint we just completed are worse than a prior sprint.
if self._hsState.isSearchOver():
cmpMsg = "Exiting because results did not improve in most recently" \
" completed sprint."
self.logger.info(cmpMsg)
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
return (True, None, None)
# Search successive active sprints, until we can find a candidate particle
# to work with
sprintIdx = -1
while True:
# Is this sprint active?
sprintIdx += 1
(active, eos) = self._hsState.isSprintActive(sprintIdx)
# If no more sprints to explore:
if eos:
# If any prior ones are still being explored, finish up exploring them
if self._hsState.anyGoodSprintsActive():
self.logger.info("No more sprints to explore, waiting for prior"
" sprints to complete")
return (False, None, None)
# Else, we're done
else:
cmpMsg = "Exiting because we've evaluated all possible field " \
"combinations"
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
self.logger.info(cmpMsg)
return (True, None, None)
if not active:
if not self._speculativeParticles:
if not self._hsState.isSprintCompleted(sprintIdx):
self.logger.info("Waiting for all particles in sprint %d to complete"
"before evolving any more particles" % (sprintIdx))
return (False, None, None)
continue
# ====================================================================
# Look for swarms that have particle "holes" in their generations. That is,
# an earlier generation with less than minParticlesPerSwarm. This can
# happen if a model that was started eariler got orphaned. If we detect
# this, start a new particle in that generation.
swarmIds = self._hsState.getActiveSwarms(sprintIdx)
for swarmId in swarmIds:
firstNonFullGenIdx = self._resultsDB.firstNonFullGeneration(
swarmId=swarmId,
minNumParticles=self._minParticlesPerSwarm)
if firstNonFullGenIdx is None:
continue
if firstNonFullGenIdx < self._resultsDB.highestGeneration(swarmId):
self.logger.info("Cloning an earlier model in generation %d of swarm "
"%s (sprintIdx=%s) to replace an orphaned model" % (
firstNonFullGenIdx, swarmId, sprintIdx))
# Clone a random orphaned particle from the incomplete generation
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getOrphanParticleInfos(swarmId, firstNonFullGenIdx)
if len(allModelIds) > 0:
# We have seen instances where we get stuck in a loop incessantly
# trying to clone earlier models (NUP-1511). My best guess is that
# we've already successfully cloned each of the orphaned models at
# least once, but still need at least one more. If we don't create
# a new particleID, we will never be able to instantiate another
# model (since particleID hash is a unique key in the models table).
# So, on 1/8/2013 this logic was changed to create a new particleID
# whenever we clone an orphan.
newParticleId = True
self.logger.info("Cloning an orphaned model")
# If there is no orphan, clone one of the other particles. We can
# have no orphan if this was a speculative generation that only
# continued particles completed in the prior generation.
else:
newParticleId = True
self.logger.info("No orphans found, so cloning a non-orphan")
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getParticleInfos(swarmId=swarmId,
genIdx=firstNonFullGenIdx)
# Clone that model
modelId = random.choice(allModelIds)
self.logger.info("Cloning model %r" % (modelId))
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(modelId)
particle = Particle(hsObj = self,
resultsDB = self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
newFromClone=particleState,
newParticleId=newParticleId)
return (False, particle, swarmId)
# ====================================================================
# Sort the swarms in priority order, trying the ones with the least
# number of models first
swarmSizes = numpy.array([self._resultsDB.numModels(x) for x in swarmIds])
swarmSizeAndIdList = zip(swarmSizes, swarmIds)
swarmSizeAndIdList.sort()
for (_, swarmId) in swarmSizeAndIdList:
# -------------------------------------------------------------------
# 1.) The particle will be created from new (at generation #0) if there
# are not already self._minParticlesPerSwarm particles in the swarm.
(allParticles, allModelIds, errScores, completed, matured) = (
self._resultsDB.getParticleInfos(swarmId))
if len(allParticles) < self._minParticlesPerSwarm:
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
swarmId=swarmId,
newFarFrom=allParticles)
# Jam in the best encoder state found from the first sprint
bestPriorModel = None
if sprintIdx >= 1:
(bestPriorModel, errScore) = self._hsState.bestModelInSprint(0)
if bestPriorModel is not None:
self.logger.info("Best model and errScore from previous sprint(%d):"
" %s, %g" % (0, str(bestPriorModel), errScore))
(baseState, modelId, errScore, completed, matured) \
= self._resultsDB.getParticleInfo(bestPriorModel)
particle.copyEncoderStatesFrom(baseState)
# Copy the best inference type from the earlier sprint
particle.copyVarStatesFrom(baseState, ['modelParams|inferenceType'])
# It's best to jiggle the best settings from the prior sprint, so
# compute a new position starting from that previous best
# Only jiggle the vars we copied from the prior model
whichVars = []
for varName in baseState['varStates']:
if ':' in varName:
whichVars.append(varName)
particle.newPosition(whichVars)
self.logger.debug("Particle after incorporating encoder vars from best "
"model in previous sprint: \n%s" % (str(particle)))
return (False, particle, swarmId)
# -------------------------------------------------------------------
# 2.) Look for a completed particle to evolve
# Note that we use lastDescendent. We only want to evolve particles that
# are at their most recent generation index.
(readyParticles, readyModelIds, readyErrScores, _, _) = (
self._resultsDB.getParticleInfos(swarmId, genIdx=None,
matured=True, lastDescendent=True))
# If we have at least 1 ready particle to evolve...
if len(readyParticles) > 0:
readyGenIdxs = [x['genIdx'] for x in readyParticles]
sortedGenIdxs = sorted(set(readyGenIdxs))
genIdx = sortedGenIdxs[0]
# Now, genIdx has the generation of the particle we want to run,
# Get a particle from that generation and evolve it.
useParticle = None
for particle in readyParticles:
if particle['genIdx'] == genIdx:
useParticle = particle
break
# If speculativeParticles is off, we don't want to evolve a particle
# into the next generation until all particles in the current
# generation have completed.
if not self._speculativeParticles:
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId, genIdx=genIdx, matured=False)
if len(particles) > 0:
continue
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
evolveFromState=useParticle)
return (False, particle, swarmId)
# END: for (swarmSize, swarmId) in swarmSizeAndIdList:
# No success in this swarm, onto next swarm
# ====================================================================
# We couldn't find a particle in this sprint ready to evolve. If
# speculative particles is OFF, we have to wait for one or more other
# workers to finish up their particles before we can do anything.
if not self._speculativeParticles:
self.logger.info("Waiting for one or more of the %s swarms "
"to complete a generation before evolving any more particles" \
% (str(swarmIds)))
return (False, None, None) | Find or create a candidate particle to produce a new model.
At any one time, there is an active set of swarms in the current sprint, where
each swarm in the sprint represents a particular combination of fields.
Ideally, we should try to balance the number of models we have evaluated for
each swarm at any time.
This method will see how many models have been evaluated for each active
swarm in the current active sprint(s) and then try and choose a particle
from the least represented swarm in the first possible active sprint, with
the following constraints/rules:
for each active sprint:
for each active swarm (preference to those with least# of models so far):
1.) The particle will be created from new (generation #0) if there are not
already self._minParticlesPerSwarm particles in the swarm.
2.) Find the first gen that has a completed particle and evolve that
particle to the next generation.
3.) If we got to here, we know that we have satisfied the min# of
particles for the swarm, and they are all currently running (probably at
various generation indexes). Go onto the next swarm
If we couldn't find a swarm to allocate a particle in, go onto the next
sprint and start allocating particles there....
Parameters:
----------------------------------------------------------------
exhaustedSwarmId: If not None, force a change to the current set of active
swarms by marking this swarm as either 'completing' or
'completed'. If there are still models being evaluaed in
it, mark it as 'completing', else 'completed. This is
used in situations where we can't find any new unique
models to create in this swarm. In these situations, we
force an update to the hypersearch state so no other
worker wastes time try to use this swarm.
retval: (exit, particle, swarm)
exit: If true, this worker is ready to exit (particle and
swarm will be None)
particle: Which particle to run
swarm: which swarm the particle is in
NOTE: When particle and swarm are None and exit is False, it
means that we need to wait for one or more other worker(s) to
finish their respective models before we can pick a particle
to run. This will generally only happen when speculativeParticles
is set to False. | Below is the the instruction that describes the task:
### Input:
Find or create a candidate particle to produce a new model.
At any one time, there is an active set of swarms in the current sprint, where
each swarm in the sprint represents a particular combination of fields.
Ideally, we should try to balance the number of models we have evaluated for
each swarm at any time.
This method will see how many models have been evaluated for each active
swarm in the current active sprint(s) and then try and choose a particle
from the least represented swarm in the first possible active sprint, with
the following constraints/rules:
for each active sprint:
for each active swarm (preference to those with least# of models so far):
1.) The particle will be created from new (generation #0) if there are not
already self._minParticlesPerSwarm particles in the swarm.
2.) Find the first gen that has a completed particle and evolve that
particle to the next generation.
3.) If we got to here, we know that we have satisfied the min# of
particles for the swarm, and they are all currently running (probably at
various generation indexes). Go onto the next swarm
If we couldn't find a swarm to allocate a particle in, go onto the next
sprint and start allocating particles there....
Parameters:
----------------------------------------------------------------
exhaustedSwarmId: If not None, force a change to the current set of active
swarms by marking this swarm as either 'completing' or
'completed'. If there are still models being evaluaed in
it, mark it as 'completing', else 'completed. This is
used in situations where we can't find any new unique
models to create in this swarm. In these situations, we
force an update to the hypersearch state so no other
worker wastes time try to use this swarm.
retval: (exit, particle, swarm)
exit: If true, this worker is ready to exit (particle and
swarm will be None)
particle: Which particle to run
swarm: which swarm the particle is in
NOTE: When particle and swarm are None and exit is False, it
means that we need to wait for one or more other worker(s) to
finish their respective models before we can pick a particle
to run. This will generally only happen when speculativeParticles
is set to False.
### Response:
def _getCandidateParticleAndSwarm (self, exhaustedSwarmId=None):
"""Find or create a candidate particle to produce a new model.
At any one time, there is an active set of swarms in the current sprint, where
each swarm in the sprint represents a particular combination of fields.
Ideally, we should try to balance the number of models we have evaluated for
each swarm at any time.
This method will see how many models have been evaluated for each active
swarm in the current active sprint(s) and then try and choose a particle
from the least represented swarm in the first possible active sprint, with
the following constraints/rules:
for each active sprint:
for each active swarm (preference to those with least# of models so far):
1.) The particle will be created from new (generation #0) if there are not
already self._minParticlesPerSwarm particles in the swarm.
2.) Find the first gen that has a completed particle and evolve that
particle to the next generation.
3.) If we got to here, we know that we have satisfied the min# of
particles for the swarm, and they are all currently running (probably at
various generation indexes). Go onto the next swarm
If we couldn't find a swarm to allocate a particle in, go onto the next
sprint and start allocating particles there....
Parameters:
----------------------------------------------------------------
exhaustedSwarmId: If not None, force a change to the current set of active
swarms by marking this swarm as either 'completing' or
'completed'. If there are still models being evaluaed in
it, mark it as 'completing', else 'completed. This is
used in situations where we can't find any new unique
models to create in this swarm. In these situations, we
force an update to the hypersearch state so no other
worker wastes time try to use this swarm.
retval: (exit, particle, swarm)
exit: If true, this worker is ready to exit (particle and
swarm will be None)
particle: Which particle to run
swarm: which swarm the particle is in
NOTE: When particle and swarm are None and exit is False, it
means that we need to wait for one or more other worker(s) to
finish their respective models before we can pick a particle
to run. This will generally only happen when speculativeParticles
is set to False.
"""
# Cancel search?
jobCancel = self._cjDAO.jobGetFields(self._jobID, ['cancel'])[0]
if jobCancel:
self._jobCancelled = True
# Did a worker cancel the job because of an error?
(workerCmpReason, workerCmpMsg) = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason', 'workerCompletionMsg'])
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self.logger.info("Exiting due to job being cancelled")
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg="Job was cancelled"),
useConnectionID=False, ignoreUnchanged=True)
else:
self.logger.error("Exiting because some worker set the "
"workerCompletionReason to %s. WorkerCompletionMsg: %s" %
(workerCmpReason, workerCmpMsg))
return (True, None, None)
# Perform periodic updates on the Hypersearch state.
if self._hsState is not None:
priorActiveSwarms = self._hsState.getActiveSwarms()
else:
priorActiveSwarms = None
# Update the HypersearchState, checking for matured swarms, and marking
# the passed in swarm as exhausted, if any
self._hsStatePeriodicUpdate(exhaustedSwarmId=exhaustedSwarmId)
# The above call may have modified self._hsState['activeSwarmIds']
# Log the current set of active swarms
activeSwarms = self._hsState.getActiveSwarms()
if activeSwarms != priorActiveSwarms:
self.logger.info("Active swarms changed to %s (from %s)" % (activeSwarms,
priorActiveSwarms))
self.logger.debug("Active swarms: %s" % (activeSwarms))
# If too many model errors were detected, exit
totalCmpModels = self._resultsDB.getNumCompletedModels()
if totalCmpModels > 5:
numErrs = self._resultsDB.getNumErrModels()
if (float(numErrs) / totalCmpModels) > self._maxPctErrModels:
# Get one of the errors
errModelIds = self._resultsDB.getErrModelIds()
resInfo = self._cjDAO.modelsGetResultAndStatus([errModelIds[0]])[0]
modelErrMsg = resInfo.completionMsg
cmpMsg = "%s: Exiting due to receiving too many models failing" \
" from exceptions (%d out of %d). \nModel Exception: %s" % \
(ErrorCodes.tooManyModelErrs, numErrs, totalCmpModels,
modelErrMsg)
self.logger.error(cmpMsg)
# Cancel the entire job now, if it has not already been cancelled
workerCmpReason = self._cjDAO.jobGetFields(self._jobID,
['workerCompletionReason'])[0]
if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
self._cjDAO.jobSetFields(
self._jobID,
fields=dict(
cancel=True,
workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,
workerCompletionMsg = cmpMsg),
useConnectionID=False,
ignoreUnchanged=True)
return (True, None, None)
# If HsState thinks the search is over, exit. It is seeing if the results
# on the sprint we just completed are worse than a prior sprint.
if self._hsState.isSearchOver():
cmpMsg = "Exiting because results did not improve in most recently" \
" completed sprint."
self.logger.info(cmpMsg)
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
return (True, None, None)
# Search successive active sprints, until we can find a candidate particle
# to work with
sprintIdx = -1
while True:
# Is this sprint active?
sprintIdx += 1
(active, eos) = self._hsState.isSprintActive(sprintIdx)
# If no more sprints to explore:
if eos:
# If any prior ones are still being explored, finish up exploring them
if self._hsState.anyGoodSprintsActive():
self.logger.info("No more sprints to explore, waiting for prior"
" sprints to complete")
return (False, None, None)
# Else, we're done
else:
cmpMsg = "Exiting because we've evaluated all possible field " \
"combinations"
self._cjDAO.jobSetFields(self._jobID,
dict(workerCompletionMsg=cmpMsg),
useConnectionID=False, ignoreUnchanged=True)
self.logger.info(cmpMsg)
return (True, None, None)
if not active:
if not self._speculativeParticles:
if not self._hsState.isSprintCompleted(sprintIdx):
self.logger.info("Waiting for all particles in sprint %d to complete"
"before evolving any more particles" % (sprintIdx))
return (False, None, None)
continue
# ====================================================================
# Look for swarms that have particle "holes" in their generations. That is,
# an earlier generation with less than minParticlesPerSwarm. This can
# happen if a model that was started eariler got orphaned. If we detect
# this, start a new particle in that generation.
swarmIds = self._hsState.getActiveSwarms(sprintIdx)
for swarmId in swarmIds:
firstNonFullGenIdx = self._resultsDB.firstNonFullGeneration(
swarmId=swarmId,
minNumParticles=self._minParticlesPerSwarm)
if firstNonFullGenIdx is None:
continue
if firstNonFullGenIdx < self._resultsDB.highestGeneration(swarmId):
self.logger.info("Cloning an earlier model in generation %d of swarm "
"%s (sprintIdx=%s) to replace an orphaned model" % (
firstNonFullGenIdx, swarmId, sprintIdx))
# Clone a random orphaned particle from the incomplete generation
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getOrphanParticleInfos(swarmId, firstNonFullGenIdx)
if len(allModelIds) > 0:
# We have seen instances where we get stuck in a loop incessantly
# trying to clone earlier models (NUP-1511). My best guess is that
# we've already successfully cloned each of the orphaned models at
# least once, but still need at least one more. If we don't create
# a new particleID, we will never be able to instantiate another
# model (since particleID hash is a unique key in the models table).
# So, on 1/8/2013 this logic was changed to create a new particleID
# whenever we clone an orphan.
newParticleId = True
self.logger.info("Cloning an orphaned model")
# If there is no orphan, clone one of the other particles. We can
# have no orphan if this was a speculative generation that only
# continued particles completed in the prior generation.
else:
newParticleId = True
self.logger.info("No orphans found, so cloning a non-orphan")
(allParticles, allModelIds, errScores, completed, matured) = \
self._resultsDB.getParticleInfos(swarmId=swarmId,
genIdx=firstNonFullGenIdx)
# Clone that model
modelId = random.choice(allModelIds)
self.logger.info("Cloning model %r" % (modelId))
(particleState, _, _, _, _) = self._resultsDB.getParticleInfo(modelId)
particle = Particle(hsObj = self,
resultsDB = self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
newFromClone=particleState,
newParticleId=newParticleId)
return (False, particle, swarmId)
# ====================================================================
# Sort the swarms in priority order, trying the ones with the least
# number of models first
swarmSizes = numpy.array([self._resultsDB.numModels(x) for x in swarmIds])
swarmSizeAndIdList = zip(swarmSizes, swarmIds)
swarmSizeAndIdList.sort()
for (_, swarmId) in swarmSizeAndIdList:
# -------------------------------------------------------------------
# 1.) The particle will be created from new (at generation #0) if there
# are not already self._minParticlesPerSwarm particles in the swarm.
(allParticles, allModelIds, errScores, completed, matured) = (
self._resultsDB.getParticleInfos(swarmId))
if len(allParticles) < self._minParticlesPerSwarm:
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
swarmId=swarmId,
newFarFrom=allParticles)
# Jam in the best encoder state found from the first sprint
bestPriorModel = None
if sprintIdx >= 1:
(bestPriorModel, errScore) = self._hsState.bestModelInSprint(0)
if bestPriorModel is not None:
self.logger.info("Best model and errScore from previous sprint(%d):"
" %s, %g" % (0, str(bestPriorModel), errScore))
(baseState, modelId, errScore, completed, matured) \
= self._resultsDB.getParticleInfo(bestPriorModel)
particle.copyEncoderStatesFrom(baseState)
# Copy the best inference type from the earlier sprint
particle.copyVarStatesFrom(baseState, ['modelParams|inferenceType'])
# It's best to jiggle the best settings from the prior sprint, so
# compute a new position starting from that previous best
# Only jiggle the vars we copied from the prior model
whichVars = []
for varName in baseState['varStates']:
if ':' in varName:
whichVars.append(varName)
particle.newPosition(whichVars)
self.logger.debug("Particle after incorporating encoder vars from best "
"model in previous sprint: \n%s" % (str(particle)))
return (False, particle, swarmId)
# -------------------------------------------------------------------
# 2.) Look for a completed particle to evolve
# Note that we use lastDescendent. We only want to evolve particles that
# are at their most recent generation index.
(readyParticles, readyModelIds, readyErrScores, _, _) = (
self._resultsDB.getParticleInfos(swarmId, genIdx=None,
matured=True, lastDescendent=True))
# If we have at least 1 ready particle to evolve...
if len(readyParticles) > 0:
readyGenIdxs = [x['genIdx'] for x in readyParticles]
sortedGenIdxs = sorted(set(readyGenIdxs))
genIdx = sortedGenIdxs[0]
# Now, genIdx has the generation of the particle we want to run,
# Get a particle from that generation and evolve it.
useParticle = None
for particle in readyParticles:
if particle['genIdx'] == genIdx:
useParticle = particle
break
# If speculativeParticles is off, we don't want to evolve a particle
# into the next generation until all particles in the current
# generation have completed.
if not self._speculativeParticles:
(particles, _, _, _, _) = self._resultsDB.getParticleInfos(
swarmId, genIdx=genIdx, matured=False)
if len(particles) > 0:
continue
particle = Particle(hsObj=self,
resultsDB=self._resultsDB,
flattenedPermuteVars=self._flattenedPermutations,
evolveFromState=useParticle)
return (False, particle, swarmId)
# END: for (swarmSize, swarmId) in swarmSizeAndIdList:
# No success in this swarm, onto next swarm
# ====================================================================
# We couldn't find a particle in this sprint ready to evolve. If
# speculative particles is OFF, we have to wait for one or more other
# workers to finish up their particles before we can do anything.
if not self._speculativeParticles:
self.logger.info("Waiting for one or more of the %s swarms "
"to complete a generation before evolving any more particles" \
% (str(swarmIds)))
return (False, None, None) |
def lower(self):
"""Lower bound"""
if self._reaction in self._view._flipped:
return -super(FlipableFluxBounds, self).upper
return super(FlipableFluxBounds, self).lower | Lower bound | Below is the the instruction that describes the task:
### Input:
Lower bound
### Response:
def lower(self):
"""Lower bound"""
if self._reaction in self._view._flipped:
return -super(FlipableFluxBounds, self).upper
return super(FlipableFluxBounds, self).lower |
def on_redraw_timer(self, event):
'''the redraw timer ensures we show new map tiles as they
are downloaded'''
state = self.state
while state.in_queue.qsize():
try:
obj = state.in_queue.get()
except Exception:
time.sleep(0.05)
return
if isinstance(obj, MPImageData):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
img = wx.EmptyImage(obj.width, obj.height)
img.SetData(obj.data)
self.img = img
self.need_redraw = True
if state.auto_size:
client_area = state.frame.GetClientSize()
total_area = state.frame.GetSize()
bx = max(total_area.x - client_area.x,0)
by = max(total_area.y - client_area.y,0)
state.frame.SetSize(wx.Size(obj.width+bx, obj.height+by))
if isinstance(obj, MPImageTitle):
state.frame.SetTitle(obj.title)
if isinstance(obj, MPImageRecenter):
self.on_recenter(obj.location)
if isinstance(obj, MPImageMenu):
self.set_menu(obj.menu)
if isinstance(obj, MPImagePopupMenu):
self.set_popup_menu(obj.menu)
if isinstance(obj, MPImageBrightness):
state.brightness = obj.brightness
self.need_redraw = True
if isinstance(obj, MPImageFullSize):
self.full_size()
if isinstance(obj, MPImageFitToWindow):
self.fit_to_window()
if isinstance(obj, win_layout.WinLayout):
win_layout.set_wx_window_layout(state.frame, obj)
if self.need_redraw:
self.redraw() | the redraw timer ensures we show new map tiles as they
are downloaded | Below is the the instruction that describes the task:
### Input:
the redraw timer ensures we show new map tiles as they
are downloaded
### Response:
def on_redraw_timer(self, event):
'''the redraw timer ensures we show new map tiles as they
are downloaded'''
state = self.state
while state.in_queue.qsize():
try:
obj = state.in_queue.get()
except Exception:
time.sleep(0.05)
return
if isinstance(obj, MPImageData):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
img = wx.EmptyImage(obj.width, obj.height)
img.SetData(obj.data)
self.img = img
self.need_redraw = True
if state.auto_size:
client_area = state.frame.GetClientSize()
total_area = state.frame.GetSize()
bx = max(total_area.x - client_area.x,0)
by = max(total_area.y - client_area.y,0)
state.frame.SetSize(wx.Size(obj.width+bx, obj.height+by))
if isinstance(obj, MPImageTitle):
state.frame.SetTitle(obj.title)
if isinstance(obj, MPImageRecenter):
self.on_recenter(obj.location)
if isinstance(obj, MPImageMenu):
self.set_menu(obj.menu)
if isinstance(obj, MPImagePopupMenu):
self.set_popup_menu(obj.menu)
if isinstance(obj, MPImageBrightness):
state.brightness = obj.brightness
self.need_redraw = True
if isinstance(obj, MPImageFullSize):
self.full_size()
if isinstance(obj, MPImageFitToWindow):
self.fit_to_window()
if isinstance(obj, win_layout.WinLayout):
win_layout.set_wx_window_layout(state.frame, obj)
if self.need_redraw:
self.redraw() |
def variance_larger_than_standard_deviation(x):
"""
Boolean variable denoting if the variance of x is greater than its standard deviation. Is equal to variance of x
being larger than 1
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool
"""
y = np.var(x)
return y > np.sqrt(y) | Boolean variable denoting if the variance of x is greater than its standard deviation. Is equal to variance of x
being larger than 1
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool | Below is the the instruction that describes the task:
### Input:
Boolean variable denoting if the variance of x is greater than its standard deviation. Is equal to variance of x
being larger than 1
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool
### Response:
def variance_larger_than_standard_deviation(x):
"""
Boolean variable denoting if the variance of x is greater than its standard deviation. Is equal to variance of x
being larger than 1
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:return: the value of this feature
:return type: bool
"""
y = np.var(x)
return y > np.sqrt(y) |
def resolve_heron_suffix_issue(abs_pex_path, class_path):
"""Resolves duplicate package suffix problems
When dynamically loading a pex file and a corresponding python class (bolt/spout/topology),
if the top level package in which to-be-loaded classes reside is named 'heron', the path conflicts
with this Heron Instance pex package (heron.instance.src.python...), making the Python
interpreter unable to find the target class in a given pex file.
This function resolves this issue by individually loading packages with suffix `heron` to
avoid this issue.
However, if a dependent module/class that is not directly specified under ``class_path``
and has conflicts with other native heron packages, there is a possibility that
such a class/module might not be imported correctly. For example, if a given ``class_path`` was
``heron.common.src.module.Class``, but it has a dependent module (such as by import statement),
``heron.common.src.python.dep_module.DepClass`` for example, pex_loader does not guarantee that
``DepClass` is imported correctly. This is because ``heron.common.src.python.dep_module`` is not
explicitly added to sys.path, while ``heron.common.src.python`` module exists as the native heron
package, from which ``dep_module`` cannot be found, so Python interpreter may raise ImportError.
The best way to avoid this issue is NOT to dynamically load a pex file whose top level package
name is ``heron``. Note that this method is included because some of the example topologies and
tests have to have a pex with its top level package name of ``heron``.
"""
# import top-level package named `heron` of a given pex file
importer = zipimport.zipimporter(abs_pex_path)
importer.load_module("heron")
# remove 'heron' and the classname
to_load_lst = class_path.split('.')[1:-1]
loaded = ['heron']
loaded_mod = None
for to_load in to_load_lst:
sub_importer = zipimport.zipimporter(os.path.join(abs_pex_path, '/'.join(loaded)))
loaded_mod = sub_importer.load_module(to_load)
loaded.append(to_load)
return loaded_mod | Resolves duplicate package suffix problems
When dynamically loading a pex file and a corresponding python class (bolt/spout/topology),
if the top level package in which to-be-loaded classes reside is named 'heron', the path conflicts
with this Heron Instance pex package (heron.instance.src.python...), making the Python
interpreter unable to find the target class in a given pex file.
This function resolves this issue by individually loading packages with suffix `heron` to
avoid this issue.
However, if a dependent module/class that is not directly specified under ``class_path``
and has conflicts with other native heron packages, there is a possibility that
such a class/module might not be imported correctly. For example, if a given ``class_path`` was
``heron.common.src.module.Class``, but it has a dependent module (such as by import statement),
``heron.common.src.python.dep_module.DepClass`` for example, pex_loader does not guarantee that
``DepClass` is imported correctly. This is because ``heron.common.src.python.dep_module`` is not
explicitly added to sys.path, while ``heron.common.src.python`` module exists as the native heron
package, from which ``dep_module`` cannot be found, so Python interpreter may raise ImportError.
The best way to avoid this issue is NOT to dynamically load a pex file whose top level package
name is ``heron``. Note that this method is included because some of the example topologies and
tests have to have a pex with its top level package name of ``heron``. | Below is the the instruction that describes the task:
### Input:
Resolves duplicate package suffix problems
When dynamically loading a pex file and a corresponding python class (bolt/spout/topology),
if the top level package in which to-be-loaded classes reside is named 'heron', the path conflicts
with this Heron Instance pex package (heron.instance.src.python...), making the Python
interpreter unable to find the target class in a given pex file.
This function resolves this issue by individually loading packages with suffix `heron` to
avoid this issue.
However, if a dependent module/class that is not directly specified under ``class_path``
and has conflicts with other native heron packages, there is a possibility that
such a class/module might not be imported correctly. For example, if a given ``class_path`` was
``heron.common.src.module.Class``, but it has a dependent module (such as by import statement),
``heron.common.src.python.dep_module.DepClass`` for example, pex_loader does not guarantee that
``DepClass` is imported correctly. This is because ``heron.common.src.python.dep_module`` is not
explicitly added to sys.path, while ``heron.common.src.python`` module exists as the native heron
package, from which ``dep_module`` cannot be found, so Python interpreter may raise ImportError.
The best way to avoid this issue is NOT to dynamically load a pex file whose top level package
name is ``heron``. Note that this method is included because some of the example topologies and
tests have to have a pex with its top level package name of ``heron``.
### Response:
def resolve_heron_suffix_issue(abs_pex_path, class_path):
"""Resolves duplicate package suffix problems
When dynamically loading a pex file and a corresponding python class (bolt/spout/topology),
if the top level package in which to-be-loaded classes reside is named 'heron', the path conflicts
with this Heron Instance pex package (heron.instance.src.python...), making the Python
interpreter unable to find the target class in a given pex file.
This function resolves this issue by individually loading packages with suffix `heron` to
avoid this issue.
However, if a dependent module/class that is not directly specified under ``class_path``
and has conflicts with other native heron packages, there is a possibility that
such a class/module might not be imported correctly. For example, if a given ``class_path`` was
``heron.common.src.module.Class``, but it has a dependent module (such as by import statement),
``heron.common.src.python.dep_module.DepClass`` for example, pex_loader does not guarantee that
``DepClass` is imported correctly. This is because ``heron.common.src.python.dep_module`` is not
explicitly added to sys.path, while ``heron.common.src.python`` module exists as the native heron
package, from which ``dep_module`` cannot be found, so Python interpreter may raise ImportError.
The best way to avoid this issue is NOT to dynamically load a pex file whose top level package
name is ``heron``. Note that this method is included because some of the example topologies and
tests have to have a pex with its top level package name of ``heron``.
"""
# import top-level package named `heron` of a given pex file
importer = zipimport.zipimporter(abs_pex_path)
importer.load_module("heron")
# remove 'heron' and the classname
to_load_lst = class_path.split('.')[1:-1]
loaded = ['heron']
loaded_mod = None
for to_load in to_load_lst:
sub_importer = zipimport.zipimporter(os.path.join(abs_pex_path, '/'.join(loaded)))
loaded_mod = sub_importer.load_module(to_load)
loaded.append(to_load)
return loaded_mod |
def indented_tree_line_generator(el, max_lines=None):
"""
Like tree_line_generator, but yields tuples (start_ref, end_ref, line),
where the line already takes the indentation into account by having "> "
prepended. If a line already starts with ">", it is escaped ("\\>"). This
makes it possible to reliably use methods that analyze plain text to detect
quoting.
"""
gen = tree_line_generator(el, max_lines)
for start_ref, end_ref, indentation_level, line in gen:
# Escape line
if line.startswith('>'):
line = '\\' + line
yield start_ref, end_ref, '> '*indentation_level + line | Like tree_line_generator, but yields tuples (start_ref, end_ref, line),
where the line already takes the indentation into account by having "> "
prepended. If a line already starts with ">", it is escaped ("\\>"). This
makes it possible to reliably use methods that analyze plain text to detect
quoting. | Below is the the instruction that describes the task:
### Input:
Like tree_line_generator, but yields tuples (start_ref, end_ref, line),
where the line already takes the indentation into account by having "> "
prepended. If a line already starts with ">", it is escaped ("\\>"). This
makes it possible to reliably use methods that analyze plain text to detect
quoting.
### Response:
def indented_tree_line_generator(el, max_lines=None):
"""
Like tree_line_generator, but yields tuples (start_ref, end_ref, line),
where the line already takes the indentation into account by having "> "
prepended. If a line already starts with ">", it is escaped ("\\>"). This
makes it possible to reliably use methods that analyze plain text to detect
quoting.
"""
gen = tree_line_generator(el, max_lines)
for start_ref, end_ref, indentation_level, line in gen:
# Escape line
if line.startswith('>'):
line = '\\' + line
yield start_ref, end_ref, '> '*indentation_level + line |
def dst_to_src(self, dst_file):
"""Map destination path to source URI."""
for map in self.mappings:
src_uri = map.dst_to_src(dst_file)
if (src_uri is not None):
return(src_uri)
# Must have failed if loop exited
raise MapperError(
"Unable to translate destination path (%s) "
"into a source URI." % (dst_file)) | Map destination path to source URI. | Below is the the instruction that describes the task:
### Input:
Map destination path to source URI.
### Response:
def dst_to_src(self, dst_file):
"""Map destination path to source URI."""
for map in self.mappings:
src_uri = map.dst_to_src(dst_file)
if (src_uri is not None):
return(src_uri)
# Must have failed if loop exited
raise MapperError(
"Unable to translate destination path (%s) "
"into a source URI." % (dst_file)) |
def migrateFileFields(portal):
"""
This function walks over all attachment types and migrates their FileField
fields.
"""
portal_types = [
"Attachment",
"ARImport",
"Instrument",
"InstrumentCertification",
"Method",
"Multifile",
"Report",
"ARReport",
"SamplePoint"]
for portal_type in portal_types:
# Do the migration
migrate_to_blob(
portal,
portal_type=portal_type,
remove_old_value=True) | This function walks over all attachment types and migrates their FileField
fields. | Below is the the instruction that describes the task:
### Input:
This function walks over all attachment types and migrates their FileField
fields.
### Response:
def migrateFileFields(portal):
"""
This function walks over all attachment types and migrates their FileField
fields.
"""
portal_types = [
"Attachment",
"ARImport",
"Instrument",
"InstrumentCertification",
"Method",
"Multifile",
"Report",
"ARReport",
"SamplePoint"]
for portal_type in portal_types:
# Do the migration
migrate_to_blob(
portal,
portal_type=portal_type,
remove_old_value=True) |
def write_table_report(summary_dict, seqid, genus):
"""
Parse the PointFinder table output, and write a summary report
:param summary_dict: nested dictionary containing data such as header strings, and paths to reports
:param seqid: name of the strain,
:param genus: MASH-calculated genus of current isolate
"""
# Set the header string if the summary report doesn't already exist
if not os.path.isfile(summary_dict[genus]['table']['summary']):
header_string = summary_dict[genus]['table']['header']
else:
header_string = str()
summary_string = '{seq},'.format(seq=seqid)
try:
# Read in the predictions
with open(summary_dict[genus]['table']['output'], 'r') as outputs:
for header_value in summary_dict[genus]['table']['header'].split(',')[:-1]:
for line in outputs:
if line.startswith('{hv}\n'.format(hv=header_value)):
# Iterate through the lines following the match
for subline in outputs:
if subline != '\n':
if subline.startswith('Mutation'):
for detailline in outputs:
if detailline != '\n':
summary_string += '{},'.format(detailline.split('\t')[0])
else:
break
else:
summary_string += '{},'.format(
subline.replace(',', ';').replace('\t', ',').rstrip())
break
else:
break
break
# Reset the file iterator to the first line in preparation for the next header
outputs.seek(0)
# Ensure that there were results to report
if summary_string:
if not summary_string.endswith('\n'):
summary_string += '\n'
# Write the summaries to the summary file
with open(summary_dict[genus]['table']['summary'], 'a+') as summary:
# Write the header if necessary
if header_string:
summary.write(header_string)
summary.write(summary_string)
except FileNotFoundError:
# Write the summaries to the summary file
with open(summary_dict[genus]['table']['summary'], 'a+') as summary:
# Extract the length of the header from the dictionary. Subtract two (don't need the strain, or the
# empty column created by a trailing comma
header_len = len(summary_dict[genus]['table']['header'].split(',')) - 2
# Populate the summary strain with the appropriate number of comma-separated 'Gene not found' entries
summary_string += '{empty}\n'.format(empty='Gene not found,' * header_len)
# Write the header if necessary
if header_string:
summary.write(header_string)
summary.write(summary_string) | Parse the PointFinder table output, and write a summary report
:param summary_dict: nested dictionary containing data such as header strings, and paths to reports
:param seqid: name of the strain,
:param genus: MASH-calculated genus of current isolate | Below is the the instruction that describes the task:
### Input:
Parse the PointFinder table output, and write a summary report
:param summary_dict: nested dictionary containing data such as header strings, and paths to reports
:param seqid: name of the strain,
:param genus: MASH-calculated genus of current isolate
### Response:
def write_table_report(summary_dict, seqid, genus):
"""
Parse the PointFinder table output, and write a summary report
:param summary_dict: nested dictionary containing data such as header strings, and paths to reports
:param seqid: name of the strain,
:param genus: MASH-calculated genus of current isolate
"""
# Set the header string if the summary report doesn't already exist
if not os.path.isfile(summary_dict[genus]['table']['summary']):
header_string = summary_dict[genus]['table']['header']
else:
header_string = str()
summary_string = '{seq},'.format(seq=seqid)
try:
# Read in the predictions
with open(summary_dict[genus]['table']['output'], 'r') as outputs:
for header_value in summary_dict[genus]['table']['header'].split(',')[:-1]:
for line in outputs:
if line.startswith('{hv}\n'.format(hv=header_value)):
# Iterate through the lines following the match
for subline in outputs:
if subline != '\n':
if subline.startswith('Mutation'):
for detailline in outputs:
if detailline != '\n':
summary_string += '{},'.format(detailline.split('\t')[0])
else:
break
else:
summary_string += '{},'.format(
subline.replace(',', ';').replace('\t', ',').rstrip())
break
else:
break
break
# Reset the file iterator to the first line in preparation for the next header
outputs.seek(0)
# Ensure that there were results to report
if summary_string:
if not summary_string.endswith('\n'):
summary_string += '\n'
# Write the summaries to the summary file
with open(summary_dict[genus]['table']['summary'], 'a+') as summary:
# Write the header if necessary
if header_string:
summary.write(header_string)
summary.write(summary_string)
except FileNotFoundError:
# Write the summaries to the summary file
with open(summary_dict[genus]['table']['summary'], 'a+') as summary:
# Extract the length of the header from the dictionary. Subtract two (don't need the strain, or the
# empty column created by a trailing comma
header_len = len(summary_dict[genus]['table']['header'].split(',')) - 2
# Populate the summary strain with the appropriate number of comma-separated 'Gene not found' entries
summary_string += '{empty}\n'.format(empty='Gene not found,' * header_len)
# Write the header if necessary
if header_string:
summary.write(header_string)
summary.write(summary_string) |
def absent(name, entry=None, entries=None, family='ipv4', **kwargs):
'''
.. versionadded:: 2014.7.0
Remove a entry or entries from a chain
name
A user-defined name to call this entry by in another part of a state or
formula. This should not be an actual entry.
family
Network family, ipv4 or ipv6.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not entry:
ret['result'] = False
ret['comment'] = ('ipset entry must be specified')
return ret
entries = []
if isinstance(entry, list):
entries = entry
else:
entries.append(entry)
for entry in entries:
entry_opts = ''
if ' ' in entry:
entry, entry_opts = entry.split(' ', 1)
if 'timeout' in kwargs and 'timeout' not in entry_opts:
entry_opts = 'timeout {0} {1}'.format(kwargs['timeout'], entry_opts)
if 'comment' in kwargs and 'comment' not in entry_opts:
entry_opts = '{0} comment "{1}"'.format(entry_opts, kwargs['comment'])
_entry = ' '.join([entry, entry_opts]).strip()
log.debug('_entry %s', _entry)
if not __salt__['ipset.check'](kwargs['set_name'],
_entry,
family) is True:
ret['result'] = True
ret['comment'] += 'ipset entry for {0} not present in set {1} for {2}\n'.format(
_entry,
kwargs['set_name'],
family)
else:
if __opts__['test']:
ret['result'] = None
ret['comment'] += 'ipset entry {0} would be removed from set {1} for {2}\n'.format(
entry,
kwargs['set_name'],
family)
else:
command = __salt__['ipset.delete'](kwargs['set_name'], entry, family, **kwargs)
if 'Error' not in command:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] += 'ipset entry {1} removed from set {0} for {2}\n'.format(
kwargs['set_name'],
_entry,
family)
else:
ret['result'] = False
ret['comment'] = 'Failed to delete ipset entry from set {0} for {2}. ' \
'Attempted entry was {1}.\n' \
'{3}\n'.format(kwargs['set_name'], _entry, family, command)
return ret | .. versionadded:: 2014.7.0
Remove a entry or entries from a chain
name
A user-defined name to call this entry by in another part of a state or
formula. This should not be an actual entry.
family
Network family, ipv4 or ipv6. | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2014.7.0
Remove a entry or entries from a chain
name
A user-defined name to call this entry by in another part of a state or
formula. This should not be an actual entry.
family
Network family, ipv4 or ipv6.
### Response:
def absent(name, entry=None, entries=None, family='ipv4', **kwargs):
'''
.. versionadded:: 2014.7.0
Remove a entry or entries from a chain
name
A user-defined name to call this entry by in another part of a state or
formula. This should not be an actual entry.
family
Network family, ipv4 or ipv6.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not entry:
ret['result'] = False
ret['comment'] = ('ipset entry must be specified')
return ret
entries = []
if isinstance(entry, list):
entries = entry
else:
entries.append(entry)
for entry in entries:
entry_opts = ''
if ' ' in entry:
entry, entry_opts = entry.split(' ', 1)
if 'timeout' in kwargs and 'timeout' not in entry_opts:
entry_opts = 'timeout {0} {1}'.format(kwargs['timeout'], entry_opts)
if 'comment' in kwargs and 'comment' not in entry_opts:
entry_opts = '{0} comment "{1}"'.format(entry_opts, kwargs['comment'])
_entry = ' '.join([entry, entry_opts]).strip()
log.debug('_entry %s', _entry)
if not __salt__['ipset.check'](kwargs['set_name'],
_entry,
family) is True:
ret['result'] = True
ret['comment'] += 'ipset entry for {0} not present in set {1} for {2}\n'.format(
_entry,
kwargs['set_name'],
family)
else:
if __opts__['test']:
ret['result'] = None
ret['comment'] += 'ipset entry {0} would be removed from set {1} for {2}\n'.format(
entry,
kwargs['set_name'],
family)
else:
command = __salt__['ipset.delete'](kwargs['set_name'], entry, family, **kwargs)
if 'Error' not in command:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] += 'ipset entry {1} removed from set {0} for {2}\n'.format(
kwargs['set_name'],
_entry,
family)
else:
ret['result'] = False
ret['comment'] = 'Failed to delete ipset entry from set {0} for {2}. ' \
'Attempted entry was {1}.\n' \
'{3}\n'.format(kwargs['set_name'], _entry, family, command)
return ret |
def add_handler(self, fn, handle=None, persist=True,
policy=None, respondent=None,
overwrite=False):
"""
Invoke `fn(msg)` on the :class:`Broker` thread for each Message sent to
`handle` from this context. Unregister after one invocation if
`persist` is :data:`False`. If `handle` is :data:`None`, a new handle
is allocated and returned.
:param int handle:
If not :data:`None`, an explicit handle to register, usually one of
the ``mitogen.core.*`` constants. If unspecified, a new unused
handle will be allocated.
:param bool persist:
If :data:`False`, the handler will be unregistered after a single
message has been received.
:param Context respondent:
Context that messages to this handle are expected to be sent from.
If specified, arranges for a dead message to be delivered to `fn`
when disconnection of the context is detected.
In future `respondent` will likely also be used to prevent other
contexts from sending messages to the handle.
:param function policy:
Function invoked as `policy(msg, stream)` where `msg` is a
:class:`mitogen.core.Message` about to be delivered, and `stream`
is the :class:`mitogen.core.Stream` on which it was received. The
function must return :data:`True`, otherwise an error is logged and
delivery is refused.
Two built-in policy functions exist:
* :func:`has_parent_authority`: requires the message arrived from a
parent context, or a context acting with a parent context's
authority (``auth_id``).
* :func:`mitogen.parent.is_immediate_child`: requires the
message arrived from an immediately connected child, for use in
messaging patterns where either something becomes buggy or
insecure by permitting indirect upstream communication.
In case of refusal, and the message's ``reply_to`` field is
nonzero, a :class:`mitogen.core.CallError` is delivered to the
sender indicating refusal occurred.
:param bool overwrite:
If :data:`True`, allow existing handles to be silently overwritten.
:return:
`handle`, or if `handle` was :data:`None`, the newly allocated
handle.
:raises Error:
Attemp to register handle that was already registered.
"""
handle = handle or next(self._last_handle)
_vv and IOLOG.debug('%r.add_handler(%r, %r, %r)', self, fn, handle, persist)
if handle in self._handle_map and not overwrite:
raise Error(self.duplicate_handle_msg)
self._handle_map[handle] = persist, fn, policy, respondent
if respondent:
if respondent not in self._handles_by_respondent:
self._handles_by_respondent[respondent] = set()
listen(respondent, 'disconnect',
lambda: self._on_respondent_disconnect(respondent))
self._handles_by_respondent[respondent].add(handle)
return handle | Invoke `fn(msg)` on the :class:`Broker` thread for each Message sent to
`handle` from this context. Unregister after one invocation if
`persist` is :data:`False`. If `handle` is :data:`None`, a new handle
is allocated and returned.
:param int handle:
If not :data:`None`, an explicit handle to register, usually one of
the ``mitogen.core.*`` constants. If unspecified, a new unused
handle will be allocated.
:param bool persist:
If :data:`False`, the handler will be unregistered after a single
message has been received.
:param Context respondent:
Context that messages to this handle are expected to be sent from.
If specified, arranges for a dead message to be delivered to `fn`
when disconnection of the context is detected.
In future `respondent` will likely also be used to prevent other
contexts from sending messages to the handle.
:param function policy:
Function invoked as `policy(msg, stream)` where `msg` is a
:class:`mitogen.core.Message` about to be delivered, and `stream`
is the :class:`mitogen.core.Stream` on which it was received. The
function must return :data:`True`, otherwise an error is logged and
delivery is refused.
Two built-in policy functions exist:
* :func:`has_parent_authority`: requires the message arrived from a
parent context, or a context acting with a parent context's
authority (``auth_id``).
* :func:`mitogen.parent.is_immediate_child`: requires the
message arrived from an immediately connected child, for use in
messaging patterns where either something becomes buggy or
insecure by permitting indirect upstream communication.
In case of refusal, and the message's ``reply_to`` field is
nonzero, a :class:`mitogen.core.CallError` is delivered to the
sender indicating refusal occurred.
:param bool overwrite:
If :data:`True`, allow existing handles to be silently overwritten.
:return:
`handle`, or if `handle` was :data:`None`, the newly allocated
handle.
:raises Error:
Attemp to register handle that was already registered. | Below is the the instruction that describes the task:
### Input:
Invoke `fn(msg)` on the :class:`Broker` thread for each Message sent to
`handle` from this context. Unregister after one invocation if
`persist` is :data:`False`. If `handle` is :data:`None`, a new handle
is allocated and returned.
:param int handle:
If not :data:`None`, an explicit handle to register, usually one of
the ``mitogen.core.*`` constants. If unspecified, a new unused
handle will be allocated.
:param bool persist:
If :data:`False`, the handler will be unregistered after a single
message has been received.
:param Context respondent:
Context that messages to this handle are expected to be sent from.
If specified, arranges for a dead message to be delivered to `fn`
when disconnection of the context is detected.
In future `respondent` will likely also be used to prevent other
contexts from sending messages to the handle.
:param function policy:
Function invoked as `policy(msg, stream)` where `msg` is a
:class:`mitogen.core.Message` about to be delivered, and `stream`
is the :class:`mitogen.core.Stream` on which it was received. The
function must return :data:`True`, otherwise an error is logged and
delivery is refused.
Two built-in policy functions exist:
* :func:`has_parent_authority`: requires the message arrived from a
parent context, or a context acting with a parent context's
authority (``auth_id``).
* :func:`mitogen.parent.is_immediate_child`: requires the
message arrived from an immediately connected child, for use in
messaging patterns where either something becomes buggy or
insecure by permitting indirect upstream communication.
In case of refusal, and the message's ``reply_to`` field is
nonzero, a :class:`mitogen.core.CallError` is delivered to the
sender indicating refusal occurred.
:param bool overwrite:
If :data:`True`, allow existing handles to be silently overwritten.
:return:
`handle`, or if `handle` was :data:`None`, the newly allocated
handle.
:raises Error:
Attemp to register handle that was already registered.
### Response:
def add_handler(self, fn, handle=None, persist=True,
policy=None, respondent=None,
overwrite=False):
"""
Invoke `fn(msg)` on the :class:`Broker` thread for each Message sent to
`handle` from this context. Unregister after one invocation if
`persist` is :data:`False`. If `handle` is :data:`None`, a new handle
is allocated and returned.
:param int handle:
If not :data:`None`, an explicit handle to register, usually one of
the ``mitogen.core.*`` constants. If unspecified, a new unused
handle will be allocated.
:param bool persist:
If :data:`False`, the handler will be unregistered after a single
message has been received.
:param Context respondent:
Context that messages to this handle are expected to be sent from.
If specified, arranges for a dead message to be delivered to `fn`
when disconnection of the context is detected.
In future `respondent` will likely also be used to prevent other
contexts from sending messages to the handle.
:param function policy:
Function invoked as `policy(msg, stream)` where `msg` is a
:class:`mitogen.core.Message` about to be delivered, and `stream`
is the :class:`mitogen.core.Stream` on which it was received. The
function must return :data:`True`, otherwise an error is logged and
delivery is refused.
Two built-in policy functions exist:
* :func:`has_parent_authority`: requires the message arrived from a
parent context, or a context acting with a parent context's
authority (``auth_id``).
* :func:`mitogen.parent.is_immediate_child`: requires the
message arrived from an immediately connected child, for use in
messaging patterns where either something becomes buggy or
insecure by permitting indirect upstream communication.
In case of refusal, and the message's ``reply_to`` field is
nonzero, a :class:`mitogen.core.CallError` is delivered to the
sender indicating refusal occurred.
:param bool overwrite:
If :data:`True`, allow existing handles to be silently overwritten.
:return:
`handle`, or if `handle` was :data:`None`, the newly allocated
handle.
:raises Error:
Attemp to register handle that was already registered.
"""
handle = handle or next(self._last_handle)
_vv and IOLOG.debug('%r.add_handler(%r, %r, %r)', self, fn, handle, persist)
if handle in self._handle_map and not overwrite:
raise Error(self.duplicate_handle_msg)
self._handle_map[handle] = persist, fn, policy, respondent
if respondent:
if respondent not in self._handles_by_respondent:
self._handles_by_respondent[respondent] = set()
listen(respondent, 'disconnect',
lambda: self._on_respondent_disconnect(respondent))
self._handles_by_respondent[respondent].add(handle)
return handle |
def extract_contours(array, tile, interval=100, field='elev', base=0):
"""
Extract contour lines from an array.
Parameters
----------
array : array
input elevation data
tile : Tile
tile covering the array
interval : integer
elevation value interval when drawing contour lines
field : string
output field name containing elevation value
base : integer
elevation base value the intervals are computed from
Returns
-------
contours : iterable
contours as GeoJSON-like pairs of properties and geometry
"""
import matplotlib.pyplot as plt
levels = _get_contour_values(
array.min(), array.max(), interval=interval, base=base)
if not levels:
return []
contours = plt.contour(array, levels)
index = 0
out_contours = []
for level in range(len(contours.collections)):
elevation = levels[index]
index += 1
paths = contours.collections[level].get_paths()
for path in paths:
out_coords = [
(
tile.left + (y * tile.pixel_x_size),
tile.top - (x * tile.pixel_y_size),
)
for x, y in zip(path.vertices[:, 1], path.vertices[:, 0])
]
if len(out_coords) >= 2:
out_contours.append(
dict(
properties={field: elevation},
geometry=mapping(LineString(out_coords))
)
)
return out_contours | Extract contour lines from an array.
Parameters
----------
array : array
input elevation data
tile : Tile
tile covering the array
interval : integer
elevation value interval when drawing contour lines
field : string
output field name containing elevation value
base : integer
elevation base value the intervals are computed from
Returns
-------
contours : iterable
contours as GeoJSON-like pairs of properties and geometry | Below is the the instruction that describes the task:
### Input:
Extract contour lines from an array.
Parameters
----------
array : array
input elevation data
tile : Tile
tile covering the array
interval : integer
elevation value interval when drawing contour lines
field : string
output field name containing elevation value
base : integer
elevation base value the intervals are computed from
Returns
-------
contours : iterable
contours as GeoJSON-like pairs of properties and geometry
### Response:
def extract_contours(array, tile, interval=100, field='elev', base=0):
"""
Extract contour lines from an array.
Parameters
----------
array : array
input elevation data
tile : Tile
tile covering the array
interval : integer
elevation value interval when drawing contour lines
field : string
output field name containing elevation value
base : integer
elevation base value the intervals are computed from
Returns
-------
contours : iterable
contours as GeoJSON-like pairs of properties and geometry
"""
import matplotlib.pyplot as plt
levels = _get_contour_values(
array.min(), array.max(), interval=interval, base=base)
if not levels:
return []
contours = plt.contour(array, levels)
index = 0
out_contours = []
for level in range(len(contours.collections)):
elevation = levels[index]
index += 1
paths = contours.collections[level].get_paths()
for path in paths:
out_coords = [
(
tile.left + (y * tile.pixel_x_size),
tile.top - (x * tile.pixel_y_size),
)
for x, y in zip(path.vertices[:, 1], path.vertices[:, 0])
]
if len(out_coords) >= 2:
out_contours.append(
dict(
properties={field: elevation},
geometry=mapping(LineString(out_coords))
)
)
return out_contours |
def compare(self):
"""
Main comparison function
"""
"""
Note: Make sure to be able to handle these ref/test scenarios:
A:
o----o---o---o
x-------x----x
B:
o----o-----o---o
x--------x--x--x
C:
o------o-----o---o
x-x--------x--x--x
D:
o------o-----o---o
x-x--------x-----x
"""
test_samp_num = 0
ref_samp_num = 0
# Iterate through the reference sample numbers
while ref_samp_num < self.n_ref and test_samp_num < self.n_test:
# Get the closest testing sample number for this reference sample
closest_samp_num, smallest_samp_diff = (
self._get_closest_samp_num(ref_samp_num, test_samp_num))
# Get the closest testing sample number for the next reference
# sample. This doesn't need to be called for the last index.
if ref_samp_num < self.n_ref - 1:
closest_samp_num_next, smallest_samp_diff_next = (
self._get_closest_samp_num(ref_samp_num + 1, test_samp_num))
else:
# Set non-matching value if there is no next reference sample
# to compete for the test sample
closest_samp_num_next = -1
# Found a contested test sample number. Decide which
# reference sample it belongs to. If the sample is closer to
# the next reference sample, leave it to the next reference
# sample and label this reference sample as unmatched.
if (closest_samp_num == closest_samp_num_next
and smallest_samp_diff_next < smallest_samp_diff):
# Get the next closest sample for this reference sample,
# if not already assigned to a previous sample.
# It will be the previous testing sample number in any
# possible case (scenario D below), or nothing.
if closest_samp_num and (not ref_samp_num or closest_samp_num - 1 != self.matching_sample_nums[ref_samp_num - 1]):
# The previous test annotation is inspected
closest_samp_num = closest_samp_num - 1
smallest_samp_diff = abs(self.ref_sample[ref_samp_num]
- self.test_sample[closest_samp_num])
# Assign the reference-test pair if close enough
if smallest_samp_diff < self.window_width:
self.matching_sample_nums[ref_samp_num] = closest_samp_num
# Set the starting test sample number to inspect
# for the next reference sample.
test_samp_num = closest_samp_num + 1
# Otherwise there is no matching test annotation
# If there is no clash, or the contested test sample is
# closer to the current reference, keep the test sample
# for this reference sample.
else:
# Assign the reference-test pair if close enough
if smallest_samp_diff < self.window_width:
self.matching_sample_nums[ref_samp_num] = closest_samp_num
# Increment the starting test sample number to inspect
# for the next reference sample.
test_samp_num = closest_samp_num + 1
ref_samp_num += 1
self._calc_stats() | Main comparison function | Below is the the instruction that describes the task:
### Input:
Main comparison function
### Response:
def compare(self):
"""
Main comparison function
"""
"""
Note: Make sure to be able to handle these ref/test scenarios:
A:
o----o---o---o
x-------x----x
B:
o----o-----o---o
x--------x--x--x
C:
o------o-----o---o
x-x--------x--x--x
D:
o------o-----o---o
x-x--------x-----x
"""
test_samp_num = 0
ref_samp_num = 0
# Iterate through the reference sample numbers
while ref_samp_num < self.n_ref and test_samp_num < self.n_test:
# Get the closest testing sample number for this reference sample
closest_samp_num, smallest_samp_diff = (
self._get_closest_samp_num(ref_samp_num, test_samp_num))
# Get the closest testing sample number for the next reference
# sample. This doesn't need to be called for the last index.
if ref_samp_num < self.n_ref - 1:
closest_samp_num_next, smallest_samp_diff_next = (
self._get_closest_samp_num(ref_samp_num + 1, test_samp_num))
else:
# Set non-matching value if there is no next reference sample
# to compete for the test sample
closest_samp_num_next = -1
# Found a contested test sample number. Decide which
# reference sample it belongs to. If the sample is closer to
# the next reference sample, leave it to the next reference
# sample and label this reference sample as unmatched.
if (closest_samp_num == closest_samp_num_next
and smallest_samp_diff_next < smallest_samp_diff):
# Get the next closest sample for this reference sample,
# if not already assigned to a previous sample.
# It will be the previous testing sample number in any
# possible case (scenario D below), or nothing.
if closest_samp_num and (not ref_samp_num or closest_samp_num - 1 != self.matching_sample_nums[ref_samp_num - 1]):
# The previous test annotation is inspected
closest_samp_num = closest_samp_num - 1
smallest_samp_diff = abs(self.ref_sample[ref_samp_num]
- self.test_sample[closest_samp_num])
# Assign the reference-test pair if close enough
if smallest_samp_diff < self.window_width:
self.matching_sample_nums[ref_samp_num] = closest_samp_num
# Set the starting test sample number to inspect
# for the next reference sample.
test_samp_num = closest_samp_num + 1
# Otherwise there is no matching test annotation
# If there is no clash, or the contested test sample is
# closer to the current reference, keep the test sample
# for this reference sample.
else:
# Assign the reference-test pair if close enough
if smallest_samp_diff < self.window_width:
self.matching_sample_nums[ref_samp_num] = closest_samp_num
# Increment the starting test sample number to inspect
# for the next reference sample.
test_samp_num = closest_samp_num + 1
ref_samp_num += 1
self._calc_stats() |
def get_azure_cli_credentials(resource=None, with_tenant=False):
"""Return Credentials and default SubscriptionID of current loaded profile of the CLI.
Credentials will be the "az login" command:
https://docs.microsoft.com/cli/azure/authenticate-azure-cli
Default subscription ID is either the only one you have, or you can define it:
https://docs.microsoft.com/cli/azure/manage-azure-subscriptions-azure-cli
.. versionadded:: 1.1.6
:param str resource: The alternative resource for credentials if not ARM (GraphRBac, etc.)
:param bool with_tenant: If True, return a three-tuple with last as tenant ID
:return: tuple of Credentials and SubscriptionID (and tenant ID if with_tenant)
:rtype: tuple
"""
profile = get_cli_profile()
cred, subscription_id, tenant_id = profile.get_login_credentials(resource=resource)
if with_tenant:
return cred, subscription_id, tenant_id
else:
return cred, subscription_id | Return Credentials and default SubscriptionID of current loaded profile of the CLI.
Credentials will be the "az login" command:
https://docs.microsoft.com/cli/azure/authenticate-azure-cli
Default subscription ID is either the only one you have, or you can define it:
https://docs.microsoft.com/cli/azure/manage-azure-subscriptions-azure-cli
.. versionadded:: 1.1.6
:param str resource: The alternative resource for credentials if not ARM (GraphRBac, etc.)
:param bool with_tenant: If True, return a three-tuple with last as tenant ID
:return: tuple of Credentials and SubscriptionID (and tenant ID if with_tenant)
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Return Credentials and default SubscriptionID of current loaded profile of the CLI.
Credentials will be the "az login" command:
https://docs.microsoft.com/cli/azure/authenticate-azure-cli
Default subscription ID is either the only one you have, or you can define it:
https://docs.microsoft.com/cli/azure/manage-azure-subscriptions-azure-cli
.. versionadded:: 1.1.6
:param str resource: The alternative resource for credentials if not ARM (GraphRBac, etc.)
:param bool with_tenant: If True, return a three-tuple with last as tenant ID
:return: tuple of Credentials and SubscriptionID (and tenant ID if with_tenant)
:rtype: tuple
### Response:
def get_azure_cli_credentials(resource=None, with_tenant=False):
"""Return Credentials and default SubscriptionID of current loaded profile of the CLI.
Credentials will be the "az login" command:
https://docs.microsoft.com/cli/azure/authenticate-azure-cli
Default subscription ID is either the only one you have, or you can define it:
https://docs.microsoft.com/cli/azure/manage-azure-subscriptions-azure-cli
.. versionadded:: 1.1.6
:param str resource: The alternative resource for credentials if not ARM (GraphRBac, etc.)
:param bool with_tenant: If True, return a three-tuple with last as tenant ID
:return: tuple of Credentials and SubscriptionID (and tenant ID if with_tenant)
:rtype: tuple
"""
profile = get_cli_profile()
cred, subscription_id, tenant_id = profile.get_login_credentials(resource=resource)
if with_tenant:
return cred, subscription_id, tenant_id
else:
return cred, subscription_id |
def set_listener(self, name, listener):
"""
Set a named listener to use with this connection.
See :py:class:`stomp.listener.ConnectionListener`
:param str name: the name of the listener
:param ConnectionListener listener: the listener object
"""
with self.__listeners_change_condition:
self.listeners[name] = listener | Set a named listener to use with this connection.
See :py:class:`stomp.listener.ConnectionListener`
:param str name: the name of the listener
:param ConnectionListener listener: the listener object | Below is the the instruction that describes the task:
### Input:
Set a named listener to use with this connection.
See :py:class:`stomp.listener.ConnectionListener`
:param str name: the name of the listener
:param ConnectionListener listener: the listener object
### Response:
def set_listener(self, name, listener):
"""
Set a named listener to use with this connection.
See :py:class:`stomp.listener.ConnectionListener`
:param str name: the name of the listener
:param ConnectionListener listener: the listener object
"""
with self.__listeners_change_condition:
self.listeners[name] = listener |
def normalize(self, inplace=True):
"""
Normalizes the values of factor so that they sum to 1.
Parameters
----------
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi.values
array([[[ 0, 1],
[ 2, 3],
[ 4, 5]],
[[ 6, 7],
[ 8, 9],
[10, 11]]])
>>> phi.normalize()
>>> phi.variables
['x1', 'x2', 'x3']
>>> phi.cardinality
array([2, 3, 2])
>>> phi.values
array([[[ 0. , 0.01515152],
[ 0.03030303, 0.04545455],
[ 0.06060606, 0.07575758]],
[[ 0.09090909, 0.10606061],
[ 0.12121212, 0.13636364],
[ 0.15151515, 0.16666667]]])
"""
phi = self if inplace else self.copy()
phi.values = phi.values / phi.values.sum()
if not inplace:
return phi | Normalizes the values of factor so that they sum to 1.
Parameters
----------
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi.values
array([[[ 0, 1],
[ 2, 3],
[ 4, 5]],
[[ 6, 7],
[ 8, 9],
[10, 11]]])
>>> phi.normalize()
>>> phi.variables
['x1', 'x2', 'x3']
>>> phi.cardinality
array([2, 3, 2])
>>> phi.values
array([[[ 0. , 0.01515152],
[ 0.03030303, 0.04545455],
[ 0.06060606, 0.07575758]],
[[ 0.09090909, 0.10606061],
[ 0.12121212, 0.13636364],
[ 0.15151515, 0.16666667]]]) | Below is the the instruction that describes the task:
### Input:
Normalizes the values of factor so that they sum to 1.
Parameters
----------
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi.values
array([[[ 0, 1],
[ 2, 3],
[ 4, 5]],
[[ 6, 7],
[ 8, 9],
[10, 11]]])
>>> phi.normalize()
>>> phi.variables
['x1', 'x2', 'x3']
>>> phi.cardinality
array([2, 3, 2])
>>> phi.values
array([[[ 0. , 0.01515152],
[ 0.03030303, 0.04545455],
[ 0.06060606, 0.07575758]],
[[ 0.09090909, 0.10606061],
[ 0.12121212, 0.13636364],
[ 0.15151515, 0.16666667]]])
### Response:
def normalize(self, inplace=True):
"""
Normalizes the values of factor so that they sum to 1.
Parameters
----------
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi.values
array([[[ 0, 1],
[ 2, 3],
[ 4, 5]],
[[ 6, 7],
[ 8, 9],
[10, 11]]])
>>> phi.normalize()
>>> phi.variables
['x1', 'x2', 'x3']
>>> phi.cardinality
array([2, 3, 2])
>>> phi.values
array([[[ 0. , 0.01515152],
[ 0.03030303, 0.04545455],
[ 0.06060606, 0.07575758]],
[[ 0.09090909, 0.10606061],
[ 0.12121212, 0.13636364],
[ 0.15151515, 0.16666667]]])
"""
phi = self if inplace else self.copy()
phi.values = phi.values / phi.values.sum()
if not inplace:
return phi |
def get_first_of_week(self):
"""
Returns an integer representing the first day of the week.
0 represents Monday, 6 represents Sunday.
"""
if self.first_of_week is None:
raise ImproperlyConfigured("%s.first_of_week is required." % self.__class__.__name__)
if self.first_of_week not in range(7):
raise ImproperlyConfigured("%s.first_of_week must be an integer between 0 and 6." % self.__class__.__name__)
return self.first_of_week | Returns an integer representing the first day of the week.
0 represents Monday, 6 represents Sunday. | Below is the the instruction that describes the task:
### Input:
Returns an integer representing the first day of the week.
0 represents Monday, 6 represents Sunday.
### Response:
def get_first_of_week(self):
"""
Returns an integer representing the first day of the week.
0 represents Monday, 6 represents Sunday.
"""
if self.first_of_week is None:
raise ImproperlyConfigured("%s.first_of_week is required." % self.__class__.__name__)
if self.first_of_week not in range(7):
raise ImproperlyConfigured("%s.first_of_week must be an integer between 0 and 6." % self.__class__.__name__)
return self.first_of_week |
def children(self):
"""
Children matches.
"""
if self._children is None:
self._children = Matches(None, self.input_string)
return self._children | Children matches. | Below is the the instruction that describes the task:
### Input:
Children matches.
### Response:
def children(self):
"""
Children matches.
"""
if self._children is None:
self._children = Matches(None, self.input_string)
return self._children |
def str_summaryline(self):
"""Print: 47 GOs, 262 genes described by 10 of 19 sections consistent_increase."""
return "{N} GOs, {M} genes described by {X} of {Y} sections {NM}".format(
N=len(self.go2nt), M=len(self.gene2gos),
X=len(self.sec2chr), Y=len(self.datobj.sec2chr), NM=self.name) | Print: 47 GOs, 262 genes described by 10 of 19 sections consistent_increase. | Below is the the instruction that describes the task:
### Input:
Print: 47 GOs, 262 genes described by 10 of 19 sections consistent_increase.
### Response:
def str_summaryline(self):
"""Print: 47 GOs, 262 genes described by 10 of 19 sections consistent_increase."""
return "{N} GOs, {M} genes described by {X} of {Y} sections {NM}".format(
N=len(self.go2nt), M=len(self.gene2gos),
X=len(self.sec2chr), Y=len(self.datobj.sec2chr), NM=self.name) |
def AddClusterTags(r, tags, dry_run=False):
"""
Adds tags to the cluster.
@type tags: list of str
@param tags: tags to add to the cluster
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id
"""
query = {
"dry-run": dry_run,
"tag": tags,
}
return r.request("put", "/2/tags", query=query) | Adds tags to the cluster.
@type tags: list of str
@param tags: tags to add to the cluster
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id | Below is the the instruction that describes the task:
### Input:
Adds tags to the cluster.
@type tags: list of str
@param tags: tags to add to the cluster
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id
### Response:
def AddClusterTags(r, tags, dry_run=False):
"""
Adds tags to the cluster.
@type tags: list of str
@param tags: tags to add to the cluster
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id
"""
query = {
"dry-run": dry_run,
"tag": tags,
}
return r.request("put", "/2/tags", query=query) |
def get(self, data_view_id):
"""
Gets basic information about a view
:param data_view_id: Identifier of the data view
:return: Metadata about the view as JSON
"""
failure_message = "Dataview get failed"
return self._get_success_json(self._get(
'v1/data_views/' + data_view_id, None, failure_message=failure_message))['data']['data_view'] | Gets basic information about a view
:param data_view_id: Identifier of the data view
:return: Metadata about the view as JSON | Below is the the instruction that describes the task:
### Input:
Gets basic information about a view
:param data_view_id: Identifier of the data view
:return: Metadata about the view as JSON
### Response:
def get(self, data_view_id):
"""
Gets basic information about a view
:param data_view_id: Identifier of the data view
:return: Metadata about the view as JSON
"""
failure_message = "Dataview get failed"
return self._get_success_json(self._get(
'v1/data_views/' + data_view_id, None, failure_message=failure_message))['data']['data_view'] |
def _detect_encoding(readline):
"""Return file encoding."""
try:
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
encoding = lib2to3_tokenize.detect_encoding(readline)[0]
return encoding
except (LookupError, SyntaxError, UnicodeDecodeError):
return 'latin-1' | Return file encoding. | Below is the the instruction that describes the task:
### Input:
Return file encoding.
### Response:
def _detect_encoding(readline):
"""Return file encoding."""
try:
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
encoding = lib2to3_tokenize.detect_encoding(readline)[0]
return encoding
except (LookupError, SyntaxError, UnicodeDecodeError):
return 'latin-1' |
def list_basebackups(self, arg):
"""List basebackups from an object store"""
self.config = config.read_json_config_file(arg.config, check_commands=False, check_pgdata=False)
site = config.get_site_from_config(self.config, arg.site)
self.storage = self._get_object_storage(site, pgdata=None)
self.storage.show_basebackup_list(verbose=arg.verbose) | List basebackups from an object store | Below is the the instruction that describes the task:
### Input:
List basebackups from an object store
### Response:
def list_basebackups(self, arg):
"""List basebackups from an object store"""
self.config = config.read_json_config_file(arg.config, check_commands=False, check_pgdata=False)
site = config.get_site_from_config(self.config, arg.site)
self.storage = self._get_object_storage(site, pgdata=None)
self.storage.show_basebackup_list(verbose=arg.verbose) |
def accuracy(mod_y, ref_y, summary=True, name="accuracy"):
"""Accuracy computation op.
Parameters
----------
mod_y : tf.Tensor
Model output tensor.
ref_y : tf.Tensor
Reference input tensor.
summary : bool, optional (default = True)
Whether to save tf summary for the op.
Returns
-------
tf.Tensor : accuracy op. tensor
"""
with tf.name_scope(name):
mod_pred = tf.argmax(mod_y, 1)
correct_pred = tf.equal(mod_pred, tf.argmax(ref_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
if summary:
tf.summary.scalar('accuracy', accuracy)
return accuracy | Accuracy computation op.
Parameters
----------
mod_y : tf.Tensor
Model output tensor.
ref_y : tf.Tensor
Reference input tensor.
summary : bool, optional (default = True)
Whether to save tf summary for the op.
Returns
-------
tf.Tensor : accuracy op. tensor | Below is the the instruction that describes the task:
### Input:
Accuracy computation op.
Parameters
----------
mod_y : tf.Tensor
Model output tensor.
ref_y : tf.Tensor
Reference input tensor.
summary : bool, optional (default = True)
Whether to save tf summary for the op.
Returns
-------
tf.Tensor : accuracy op. tensor
### Response:
def accuracy(mod_y, ref_y, summary=True, name="accuracy"):
"""Accuracy computation op.
Parameters
----------
mod_y : tf.Tensor
Model output tensor.
ref_y : tf.Tensor
Reference input tensor.
summary : bool, optional (default = True)
Whether to save tf summary for the op.
Returns
-------
tf.Tensor : accuracy op. tensor
"""
with tf.name_scope(name):
mod_pred = tf.argmax(mod_y, 1)
correct_pred = tf.equal(mod_pred, tf.argmax(ref_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
if summary:
tf.summary.scalar('accuracy', accuracy)
return accuracy |
def add_file_to_tree(tree, file_path, file_contents, is_executable=False):
"""Add a file to a tree.
Args:
tree
A list of dicts containing info about each blob in a tree.
file_path
The path of the new file in the tree.
file_contents
The (UTF-8 encoded) contents of the new file.
is_executable
If ``True``, the new file will get executable permissions (0755).
Otherwise, it will get 0644 permissions.
Returns:
The provided tree, but with the new file added.
"""
record = {
"path": file_path,
"mode": "100755" if is_executable else "100644",
"type": "blob",
"content": file_contents,
}
tree.append(record)
return tree | Add a file to a tree.
Args:
tree
A list of dicts containing info about each blob in a tree.
file_path
The path of the new file in the tree.
file_contents
The (UTF-8 encoded) contents of the new file.
is_executable
If ``True``, the new file will get executable permissions (0755).
Otherwise, it will get 0644 permissions.
Returns:
The provided tree, but with the new file added. | Below is the the instruction that describes the task:
### Input:
Add a file to a tree.
Args:
tree
A list of dicts containing info about each blob in a tree.
file_path
The path of the new file in the tree.
file_contents
The (UTF-8 encoded) contents of the new file.
is_executable
If ``True``, the new file will get executable permissions (0755).
Otherwise, it will get 0644 permissions.
Returns:
The provided tree, but with the new file added.
### Response:
def add_file_to_tree(tree, file_path, file_contents, is_executable=False):
"""Add a file to a tree.
Args:
tree
A list of dicts containing info about each blob in a tree.
file_path
The path of the new file in the tree.
file_contents
The (UTF-8 encoded) contents of the new file.
is_executable
If ``True``, the new file will get executable permissions (0755).
Otherwise, it will get 0644 permissions.
Returns:
The provided tree, but with the new file added.
"""
record = {
"path": file_path,
"mode": "100755" if is_executable else "100644",
"type": "blob",
"content": file_contents,
}
tree.append(record)
return tree |
async def article(
self, title, description=None,
*, url=None, thumb=None, content=None,
id=None, text=None, parse_mode=(), link_preview=True,
geo=None, period=60, contact=None, game=False, buttons=None
):
"""
Creates new inline result of article type.
Args:
title (`str`):
The title to be shown for this result.
description (`str`, optional):
Further explanation of what this result means.
url (`str`, optional):
The URL to be shown for this result.
thumb (:tl:`InputWebDocument`, optional):
The thumbnail to be shown for this result.
For now it has to be a :tl:`InputWebDocument` if present.
content (:tl:`InputWebDocument`, optional):
The content to be shown for this result.
For now it has to be a :tl:`InputWebDocument` if present.
"""
# TODO Does 'article' work always?
# article, photo, gif, mpeg4_gif, video, audio,
# voice, document, location, venue, contact, game
result = types.InputBotInlineResult(
id=id or '',
type='article',
send_message=await self._message(
text=text, parse_mode=parse_mode, link_preview=link_preview,
geo=geo, period=period,
contact=contact,
game=game,
buttons=buttons
),
title=title,
description=description,
url=url,
thumb=thumb,
content=content
)
if id is None:
result.id = hashlib.sha256(bytes(result)).hexdigest()
return result | Creates new inline result of article type.
Args:
title (`str`):
The title to be shown for this result.
description (`str`, optional):
Further explanation of what this result means.
url (`str`, optional):
The URL to be shown for this result.
thumb (:tl:`InputWebDocument`, optional):
The thumbnail to be shown for this result.
For now it has to be a :tl:`InputWebDocument` if present.
content (:tl:`InputWebDocument`, optional):
The content to be shown for this result.
For now it has to be a :tl:`InputWebDocument` if present. | Below is the the instruction that describes the task:
### Input:
Creates new inline result of article type.
Args:
title (`str`):
The title to be shown for this result.
description (`str`, optional):
Further explanation of what this result means.
url (`str`, optional):
The URL to be shown for this result.
thumb (:tl:`InputWebDocument`, optional):
The thumbnail to be shown for this result.
For now it has to be a :tl:`InputWebDocument` if present.
content (:tl:`InputWebDocument`, optional):
The content to be shown for this result.
For now it has to be a :tl:`InputWebDocument` if present.
### Response:
async def article(
self, title, description=None,
*, url=None, thumb=None, content=None,
id=None, text=None, parse_mode=(), link_preview=True,
geo=None, period=60, contact=None, game=False, buttons=None
):
"""
Creates new inline result of article type.
Args:
title (`str`):
The title to be shown for this result.
description (`str`, optional):
Further explanation of what this result means.
url (`str`, optional):
The URL to be shown for this result.
thumb (:tl:`InputWebDocument`, optional):
The thumbnail to be shown for this result.
For now it has to be a :tl:`InputWebDocument` if present.
content (:tl:`InputWebDocument`, optional):
The content to be shown for this result.
For now it has to be a :tl:`InputWebDocument` if present.
"""
# TODO Does 'article' work always?
# article, photo, gif, mpeg4_gif, video, audio,
# voice, document, location, venue, contact, game
result = types.InputBotInlineResult(
id=id or '',
type='article',
send_message=await self._message(
text=text, parse_mode=parse_mode, link_preview=link_preview,
geo=geo, period=period,
contact=contact,
game=game,
buttons=buttons
),
title=title,
description=description,
url=url,
thumb=thumb,
content=content
)
if id is None:
result.id = hashlib.sha256(bytes(result)).hexdigest()
return result |
def to_curl(request, compressed=False, verify=True):
"""
Returns string with curl command by provided request object
Parameters
----------
compressed : bool
If `True` then `--compressed` argument will be added to result
"""
parts = [
('curl', None),
('-X', request.method),
]
for k, v in sorted(request.headers.items()):
parts += [('-H', '{0}: {1}'.format(k, v))]
if request.body:
body = request.body
if isinstance(body, bytes):
body = body.decode('utf-8')
parts += [('-d', body)]
if compressed:
parts += [('--compressed', None)]
if not verify:
parts += [('--insecure', None)]
parts += [(None, request.url)]
flat_parts = []
for k, v in parts:
if k:
flat_parts.append(k)
if v:
flat_parts.append("'{0}'".format(v))
return ' '.join(flat_parts) | Returns string with curl command by provided request object
Parameters
----------
compressed : bool
If `True` then `--compressed` argument will be added to result | Below is the the instruction that describes the task:
### Input:
Returns string with curl command by provided request object
Parameters
----------
compressed : bool
If `True` then `--compressed` argument will be added to result
### Response:
def to_curl(request, compressed=False, verify=True):
"""
Returns string with curl command by provided request object
Parameters
----------
compressed : bool
If `True` then `--compressed` argument will be added to result
"""
parts = [
('curl', None),
('-X', request.method),
]
for k, v in sorted(request.headers.items()):
parts += [('-H', '{0}: {1}'.format(k, v))]
if request.body:
body = request.body
if isinstance(body, bytes):
body = body.decode('utf-8')
parts += [('-d', body)]
if compressed:
parts += [('--compressed', None)]
if not verify:
parts += [('--insecure', None)]
parts += [(None, request.url)]
flat_parts = []
for k, v in parts:
if k:
flat_parts.append(k)
if v:
flat_parts.append("'{0}'".format(v))
return ' '.join(flat_parts) |
def objects_to_root(objects: List) -> Root:
"""
Convert a list of s3 ObjectSummaries into a directory tree.
:param objects: The list of objects, e.g. the result of calling
`.objects.all()` on a bucket.
:return: The tree structure, contained within a root node.
"""
def _to_tree(objs: Iterable) -> Dict:
"""
Build a tree structure from a flat list of objects.
:param objs: The raw iterable of S3 `ObjectSummary`s, as returned by a
bucket listing.
:return: The listing as a nested dictionary where keys are directory
and file names. The values of directories will in turn be a
dict. The values of keys representing files will be the
`ObjectSummary` instance.
"""
path_tree = {}
for obj in objs:
is_dir = obj.key.endswith('/')
chunks = [chunk for chunk in obj.key.split('/') if chunk]
chunk_count = len(chunks)
tmp = path_tree
for i, chunk in enumerate(chunks):
is_last_chunk = i == chunk_count - 1
if is_last_chunk and not is_dir:
tmp[chunk] = obj
else:
# must be a directory
if chunk not in tmp:
# it doesn't exist - create it
tmp[chunk] = {}
tmp = tmp[chunk]
return path_tree
def _to_entity(key: str, value: Union[Dict, Any]) -> Entity:
"""
Turn a nested dictionary representing an S3 bucket into the correct
`Entity` object.
:param key: The name of the entity.
:param value: If the entity is a directory, the nested dict
representing its contents. Otherwise, the `ObjectSummary`
instance representing the file.
:return: The entity representing the entity name and value pair.
"""
if isinstance(value, dict):
return Directory(
key,
{key_: _to_entity(key_, value_)
for key_, value_ in value.items()})
return File(pathlib.PurePath(value.key).name, value.size,
value.e_tag.strip('"'))
tree = _to_tree(objects)
return Root({pathlib.PurePath(key).name: _to_entity(key, value)
for key, value in tree.items()}) | Convert a list of s3 ObjectSummaries into a directory tree.
:param objects: The list of objects, e.g. the result of calling
`.objects.all()` on a bucket.
:return: The tree structure, contained within a root node. | Below is the the instruction that describes the task:
### Input:
Convert a list of s3 ObjectSummaries into a directory tree.
:param objects: The list of objects, e.g. the result of calling
`.objects.all()` on a bucket.
:return: The tree structure, contained within a root node.
### Response:
def objects_to_root(objects: List) -> Root:
"""
Convert a list of s3 ObjectSummaries into a directory tree.
:param objects: The list of objects, e.g. the result of calling
`.objects.all()` on a bucket.
:return: The tree structure, contained within a root node.
"""
def _to_tree(objs: Iterable) -> Dict:
"""
Build a tree structure from a flat list of objects.
:param objs: The raw iterable of S3 `ObjectSummary`s, as returned by a
bucket listing.
:return: The listing as a nested dictionary where keys are directory
and file names. The values of directories will in turn be a
dict. The values of keys representing files will be the
`ObjectSummary` instance.
"""
path_tree = {}
for obj in objs:
is_dir = obj.key.endswith('/')
chunks = [chunk for chunk in obj.key.split('/') if chunk]
chunk_count = len(chunks)
tmp = path_tree
for i, chunk in enumerate(chunks):
is_last_chunk = i == chunk_count - 1
if is_last_chunk and not is_dir:
tmp[chunk] = obj
else:
# must be a directory
if chunk not in tmp:
# it doesn't exist - create it
tmp[chunk] = {}
tmp = tmp[chunk]
return path_tree
def _to_entity(key: str, value: Union[Dict, Any]) -> Entity:
"""
Turn a nested dictionary representing an S3 bucket into the correct
`Entity` object.
:param key: The name of the entity.
:param value: If the entity is a directory, the nested dict
representing its contents. Otherwise, the `ObjectSummary`
instance representing the file.
:return: The entity representing the entity name and value pair.
"""
if isinstance(value, dict):
return Directory(
key,
{key_: _to_entity(key_, value_)
for key_, value_ in value.items()})
return File(pathlib.PurePath(value.key).name, value.size,
value.e_tag.strip('"'))
tree = _to_tree(objects)
return Root({pathlib.PurePath(key).name: _to_entity(key, value)
for key, value in tree.items()}) |
def _earth_orientation(date):
"""Earth orientation parameters in degrees
"""
ttt = date.change_scale('TT').julian_century
# a_a = 0.12
# a_c = 0.26
# s_prime = -0.0015 * (a_c ** 2 / 1.2 + a_a ** 2) * ttt
s_prime = - 0.000047 * ttt
return date.eop.x / 3600., date.eop.y / 3600., s_prime / 3600 | Earth orientation parameters in degrees | Below is the the instruction that describes the task:
### Input:
Earth orientation parameters in degrees
### Response:
def _earth_orientation(date):
"""Earth orientation parameters in degrees
"""
ttt = date.change_scale('TT').julian_century
# a_a = 0.12
# a_c = 0.26
# s_prime = -0.0015 * (a_c ** 2 / 1.2 + a_a ** 2) * ttt
s_prime = - 0.000047 * ttt
return date.eop.x / 3600., date.eop.y / 3600., s_prime / 3600 |
def addAxes(axtype=None, c=None):
"""Draw axes on scene. Available axes types:
:param int axtype:
- 0, no axes,
- 1, draw three gray grid walls
- 2, show cartesian axes from (0,0,0)
- 3, show positive range of cartesian axes from (0,0,0)
- 4, show a triad at bottom left
- 5, show a cube at bottom left
- 6, mark the corners of the bounding box
- 7, draw a simple ruler at the bottom of the window
- 8, show the ``vtkCubeAxesActor`` object
- 9, show the bounding box outLine
- 10, show three circles representing the maximum bounding box
"""
vp = settings.plotter_instance
if axtype is not None:
vp.axes = axtype # overrride
r = vp.renderers.index(vp.renderer)
if not vp.axes:
return
if c is None: # automatic black or white
c = (0.9, 0.9, 0.9)
if numpy.sum(vp.renderer.GetBackground()) > 1.5:
c = (0.1, 0.1, 0.1)
if not vp.renderer:
return
if vp.axes_exist[r]:
return
# calculate max actors bounds
bns = []
for a in vp.actors:
if a and a.GetPickable():
b = a.GetBounds()
if b:
bns.append(b)
if len(bns):
max_bns = numpy.max(bns, axis=0)
min_bns = numpy.min(bns, axis=0)
vbb = (min_bns[0], max_bns[1], min_bns[2], max_bns[3], min_bns[4], max_bns[5])
else:
vbb = vp.renderer.ComputeVisiblePropBounds()
max_bns = vbb
min_bns = vbb
sizes = (max_bns[1] - min_bns[0], max_bns[3] - min_bns[2], max_bns[5] - min_bns[4])
############################################################
if vp.axes == 1 or vp.axes == True: # gray grid walls
nd = 4 # number of divisions in the smallest axis
off = -0.04 # label offset
step = numpy.min(sizes) / nd
if not step:
# bad proportions, use vtkCubeAxesActor
vp.addAxes(axtype=8, c=c)
vp.axes = 1
return
rx, ry, rz = numpy.rint(sizes / step).astype(int)
if max([rx / ry, ry / rx, rx / rz, rz / rx, ry / rz, rz / ry]) > 15:
# bad proportions, use vtkCubeAxesActor
vp.addAxes(axtype=8, c=c)
vp.axes = 1
return
gxy = shapes.Grid(pos=(0.5, 0.5, 0), normal=[0, 0, 1], bc=None, resx=rx, resy=ry)
gxz = shapes.Grid(pos=(0.5, 0, 0.5), normal=[0, 1, 0], bc=None, resx=rz, resy=rx)
gyz = shapes.Grid(pos=(0, 0.5, 0.5), normal=[1, 0, 0], bc=None, resx=rz, resy=ry)
gxy.alpha(0.06).wire(False).color(c).lineWidth(1)
gxz.alpha(0.04).wire(False).color(c).lineWidth(1)
gyz.alpha(0.04).wire(False).color(c).lineWidth(1)
xa = shapes.Line([0, 0, 0], [1, 0, 0], c=c, lw=1)
ya = shapes.Line([0, 0, 0], [0, 1, 0], c=c, lw=1)
za = shapes.Line([0, 0, 0], [0, 0, 1], c=c, lw=1)
xt, yt, zt, ox, oy, oz = [None] * 6
if vp.xtitle:
xtitle = vp.xtitle
if min_bns[0] <= 0 and max_bns[1] > 0: # mark x origin
ox = shapes.Cube([-min_bns[0] / sizes[0], 0, 0], side=0.008, c=c)
if len(vp.xtitle) == 1: # add axis length info
xtitle = vp.xtitle + " /" + utils.precision(sizes[0], 4)
wpos = [1 - (len(vp.xtitle) + 1) / 40, off, 0]
xt = shapes.Text(xtitle, pos=wpos, normal=(0, 0, 1),
s=0.025, c=c, justify="bottom-right")
if vp.ytitle:
if min_bns[2] <= 0 and max_bns[3] > 0: # mark y origin
oy = shapes.Cube([0, -min_bns[2] / sizes[1], 0], side=0.008, c=c)
yt = shapes.Text(vp.ytitle, pos=(0, 0, 0), normal=(0, 0, 1),
s=0.025, c=c, justify="bottom-right")
if len(vp.ytitle) == 1:
wpos = [off, 1 - (len(vp.ytitle) + 1) / 40, 0]
yt.pos(wpos)
else:
wpos = [off * 0.7, 1 - (len(vp.ytitle) + 1) / 40, 0]
yt.rotateZ(90).pos(wpos)
if vp.ztitle:
if min_bns[4] <= 0 and max_bns[5] > 0: # mark z origin
oz = shapes.Cube([0, 0, -min_bns[4] / sizes[2]], side=0.008, c=c)
zt = shapes.Text(vp.ztitle, pos=(0, 0, 0), normal=(1, -1, 0),
s=0.025, c=c, justify="bottom-right")
if len(vp.ztitle) == 1:
wpos = [off * 0.6, off * 0.6, 1 - (len(vp.ztitle) + 1) / 40]
zt.rotate(90, (1, -1, 0)).pos(wpos)
else:
wpos = [off * 0.3, off * 0.3, 1 - (len(vp.ztitle) + 1) / 40]
zt.rotate(180, (1, -1, 0)).pos(wpos)
acts = [gxy, gxz, gyz, xa, ya, za, xt, yt, zt, ox, oy, oz]
for a in acts:
if a:
a.PickableOff()
aa = Assembly(acts)
aa.pos(min_bns[0], min_bns[2], min_bns[4])
aa.SetScale(sizes)
aa.PickableOff()
vp.renderer.AddActor(aa)
vp.axes_exist[r] = aa
elif vp.axes == 2 or vp.axes == 3:
vbb = vp.renderer.ComputeVisiblePropBounds() # to be double checked
xcol, ycol, zcol = "db", "dg", "dr"
s = 1
alpha = 1
centered = False
x0, x1, y0, y1, z0, z1 = vbb
dx, dy, dz = x1 - x0, y1 - y0, z1 - z0
aves = numpy.sqrt(dx * dx + dy * dy + dz * dz) / 2
x0, x1 = min(x0, 0), max(x1, 0)
y0, y1 = min(y0, 0), max(y1, 0)
z0, z1 = min(z0, 0), max(z1, 0)
if vp.axes == 3:
if x1 > 0:
x0 = 0
if y1 > 0:
y0 = 0
if z1 > 0:
z0 = 0
dx, dy, dz = x1 - x0, y1 - y0, z1 - z0
acts = []
if x0 * x1 <= 0 or y0 * z1 <= 0 or z0 * z1 <= 0: # some ranges contain origin
zero = shapes.Sphere(r=aves / 120 * s, c="k", alpha=alpha, res=10)
acts += [zero]
if len(vp.xtitle) and dx > aves/100:
xl = shapes.Cylinder([[x0, 0, 0], [x1, 0, 0]], r=aves/250*s, c=xcol, alpha=alpha)
xc = shapes.Cone(pos=[x1, 0, 0], c=xcol, alpha=alpha,
r=aves/100*s, height=aves/25*s, axis=[1, 0, 0], res=10)
wpos = [x1-(len(vp.xtitle)+1)*aves/40*s, -aves/25*s, 0] # aligned to arrow tip
if centered:
wpos = [(x0 + x1) / 2 - len(vp.xtitle) / 2 * aves / 40 * s, -aves / 25 * s, 0]
xt = shapes.Text(vp.xtitle, pos=wpos, normal=(0, 0, 1), s=aves / 40 * s, c=xcol)
acts += [xl, xc, xt]
if len(vp.ytitle) and dy > aves/100:
yl = shapes.Cylinder([[0, y0, 0], [0, y1, 0]], r=aves/250*s, c=ycol, alpha=alpha)
yc = shapes.Cone(pos=[0, y1, 0], c=ycol, alpha=alpha,
r=aves/100*s, height=aves/25*s, axis=[0, 1, 0], res=10)
wpos = [-aves/40*s, y1-(len(vp.ytitle)+1)*aves/40*s, 0]
if centered:
wpos = [-aves / 40 * s, (y0 + y1) / 2 - len(vp.ytitle) / 2 * aves / 40 * s, 0]
yt = shapes.Text(vp.ytitle, pos=(0, 0, 0), normal=(0, 0, 1), s=aves / 40 * s, c=ycol)
yt.rotate(90, [0, 0, 1]).pos(wpos)
acts += [yl, yc, yt]
if len(vp.ztitle) and dz > aves/100:
zl = shapes.Cylinder([[0, 0, z0], [0, 0, z1]], r=aves/250*s, c=zcol, alpha=alpha)
zc = shapes.Cone(pos=[0, 0, z1], c=zcol, alpha=alpha,
r=aves/100*s, height=aves/25*s, axis=[0, 0, 1], res=10)
wpos = [-aves/50*s, -aves/50*s, z1 - (len(vp.ztitle)+1)*aves/40*s]
if centered:
wpos = [-aves/50*s, -aves/50*s, (z0+z1)/2-len(vp.ztitle)/2*aves/40*s]
zt = shapes.Text(vp.ztitle, pos=(0,0,0), normal=(1, -1, 0), s=aves/40*s, c=zcol)
zt.rotate(180, (1, -1, 0)).pos(wpos)
acts += [zl, zc, zt]
for a in acts:
a.PickableOff()
ass = Assembly(acts)
ass.PickableOff()
vp.renderer.AddActor(ass)
vp.axes_exist[r] = ass
elif vp.axes == 4:
axact = vtk.vtkAxesActor()
axact.SetShaftTypeToCylinder()
axact.SetCylinderRadius(0.03)
axact.SetXAxisLabelText(vp.xtitle)
axact.SetYAxisLabelText(vp.ytitle)
axact.SetZAxisLabelText(vp.ztitle)
axact.GetXAxisShaftProperty().SetColor(0, 0, 1)
axact.GetZAxisShaftProperty().SetColor(1, 0, 0)
axact.GetXAxisTipProperty().SetColor(0, 0, 1)
axact.GetZAxisTipProperty().SetColor(1, 0, 0)
bc = numpy.array(vp.renderer.GetBackground())
if numpy.sum(bc) < 1.5:
lc = (1, 1, 1)
else:
lc = (0, 0, 0)
axact.GetXAxisCaptionActor2D().GetCaptionTextProperty().BoldOff()
axact.GetYAxisCaptionActor2D().GetCaptionTextProperty().BoldOff()
axact.GetZAxisCaptionActor2D().GetCaptionTextProperty().BoldOff()
axact.GetXAxisCaptionActor2D().GetCaptionTextProperty().ItalicOff()
axact.GetYAxisCaptionActor2D().GetCaptionTextProperty().ItalicOff()
axact.GetZAxisCaptionActor2D().GetCaptionTextProperty().ItalicOff()
axact.GetXAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
axact.GetYAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
axact.GetZAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
axact.GetXAxisCaptionActor2D().GetCaptionTextProperty().SetColor(lc)
axact.GetYAxisCaptionActor2D().GetCaptionTextProperty().SetColor(lc)
axact.GetZAxisCaptionActor2D().GetCaptionTextProperty().SetColor(lc)
axact.PickableOff()
icn = addIcon(axact, size=0.1)
vp.axes_exist[r] = icn
elif vp.axes == 5:
axact = vtk.vtkAnnotatedCubeActor()
axact.GetCubeProperty().SetColor(0.75, 0.75, 0.75)
axact.SetTextEdgesVisibility(0)
axact.SetFaceTextScale(0.4)
axact.GetXPlusFaceProperty().SetColor(colors.getColor("b"))
axact.GetXMinusFaceProperty().SetColor(colors.getColor("db"))
axact.GetYPlusFaceProperty().SetColor(colors.getColor("g"))
axact.GetYMinusFaceProperty().SetColor(colors.getColor("dg"))
axact.GetZPlusFaceProperty().SetColor(colors.getColor("r"))
axact.GetZMinusFaceProperty().SetColor(colors.getColor("dr"))
axact.PickableOff()
icn = addIcon(axact, size=0.06)
vp.axes_exist[r] = icn
elif vp.axes == 6:
ocf = vtk.vtkOutlineCornerFilter()
ocf.SetCornerFactor(0.1)
largestact, sz = None, -1
for a in vp.actors:
if a.GetPickable():
b = a.GetBounds()
d = max(b[1]-b[0], b[3]-b[2], b[5]-b[4])
if sz < d:
largestact = a
sz = d
if isinstance(largestact, Assembly):
ocf.SetInputData(largestact.getActor(0).GetMapper().GetInput())
else:
ocf.SetInputData(largestact.polydata())
ocf.Update()
ocMapper = vtk.vtkHierarchicalPolyDataMapper()
ocMapper.SetInputConnection(0, ocf.GetOutputPort(0))
ocActor = vtk.vtkActor()
ocActor.SetMapper(ocMapper)
bc = numpy.array(vp.renderer.GetBackground())
if numpy.sum(bc) < 1.5:
lc = (1, 1, 1)
else:
lc = (0, 0, 0)
ocActor.GetProperty().SetColor(lc)
ocActor.PickableOff()
vp.renderer.AddActor(ocActor)
vp.axes_exist[r] = ocActor
elif vp.axes == 7:
# draws a simple ruler at the bottom of the window
ls = vtk.vtkLegendScaleActor()
ls.RightAxisVisibilityOff()
ls.TopAxisVisibilityOff()
ls.LegendVisibilityOff()
ls.LeftAxisVisibilityOff()
ls.GetBottomAxis().SetNumberOfMinorTicks(1)
ls.GetBottomAxis().GetProperty().SetColor(c)
ls.GetBottomAxis().GetLabelTextProperty().SetColor(c)
ls.GetBottomAxis().GetLabelTextProperty().BoldOff()
ls.GetBottomAxis().GetLabelTextProperty().ItalicOff()
ls.GetBottomAxis().GetLabelTextProperty().ShadowOff()
ls.PickableOff()
vp.renderer.AddActor(ls)
vp.axes_exist[r] = ls
elif vp.axes == 8:
ca = vtk.vtkCubeAxesActor()
ca.SetBounds(vbb)
if vp.camera:
ca.SetCamera(vp.camera)
else:
ca.SetCamera(vp.renderer.GetActiveCamera())
ca.GetXAxesLinesProperty().SetColor(c)
ca.GetYAxesLinesProperty().SetColor(c)
ca.GetZAxesLinesProperty().SetColor(c)
for i in range(3):
ca.GetLabelTextProperty(i).SetColor(c)
ca.GetTitleTextProperty(i).SetColor(c)
ca.SetTitleOffset(5)
ca.SetFlyMode(3)
ca.SetXTitle(vp.xtitle)
ca.SetYTitle(vp.ytitle)
ca.SetZTitle(vp.ztitle)
if vp.xtitle == "":
ca.SetXAxisVisibility(0)
ca.XAxisLabelVisibilityOff()
if vp.ytitle == "":
ca.SetYAxisVisibility(0)
ca.YAxisLabelVisibilityOff()
if vp.ztitle == "":
ca.SetZAxisVisibility(0)
ca.ZAxisLabelVisibilityOff()
ca.PickableOff()
vp.renderer.AddActor(ca)
vp.axes_exist[r] = ca
return
elif vp.axes == 9:
src = vtk.vtkCubeSource()
src.SetXLength(vbb[1] - vbb[0])
src.SetYLength(vbb[3] - vbb[2])
src.SetZLength(vbb[5] - vbb[4])
src.Update()
ca = Actor(src.GetOutput(), c=c, alpha=0.5, wire=1)
ca.pos((vbb[0] + vbb[1]) / 2, (vbb[3] + vbb[2]) / 2, (vbb[5] + vbb[4]) / 2)
ca.PickableOff()
vp.renderer.AddActor(ca)
vp.axes_exist[r] = ca
elif vp.axes == 10:
x0 = (vbb[0] + vbb[1]) / 2, (vbb[3] + vbb[2]) / 2, (vbb[5] + vbb[4]) / 2
rx, ry, rz = (vbb[1]-vbb[0])/2, (vbb[3]-vbb[2])/2, (vbb[5]-vbb[4])/2
rm = max(rx, ry, rz)
xc = shapes.Disc(x0, (0,0,1), r1=rm, r2=rm, c='lr', bc=None, res=1, resphi=72)
yc = shapes.Disc(x0, (0,1,0), r1=rm, r2=rm, c='lg', bc=None, res=1, resphi=72)
zc = shapes.Disc(x0, (1,0,0), r1=rm, r2=rm, c='lb', bc=None, res=1, resphi=72)
xc.clean().alpha(0.2).wire().lineWidth(2.5).PickableOff()
yc.clean().alpha(0.2).wire().lineWidth(2.5).PickableOff()
zc.clean().alpha(0.2).wire().lineWidth(2.5).PickableOff()
ca = xc + yc + zc
ca.PickableOff()
vp.renderer.AddActor(ca)
vp.axes_exist[r] = ca
else:
colors.printc('~bomb Keyword axes must be in range [0-10].', c=1)
colors.printc('''
~target Available axes types:
0 = no axes,
1 = draw three gray grid walls
2 = show cartesian axes from (0,0,0)
3 = show positive range of cartesian axes from (0,0,0)
4 = show a triad at bottom left
5 = show a cube at bottom left
6 = mark the corners of the bounding box
7 = draw a simple ruler at the bottom of the window
8 = show the vtkCubeAxesActor object
9 = show the bounding box outline
10 = show three circles representing the maximum bounding box
''', c=1, bold=0)
if not vp.axes_exist[r]:
vp.axes_exist[r] = True
return | Draw axes on scene. Available axes types:
:param int axtype:
- 0, no axes,
- 1, draw three gray grid walls
- 2, show cartesian axes from (0,0,0)
- 3, show positive range of cartesian axes from (0,0,0)
- 4, show a triad at bottom left
- 5, show a cube at bottom left
- 6, mark the corners of the bounding box
- 7, draw a simple ruler at the bottom of the window
- 8, show the ``vtkCubeAxesActor`` object
- 9, show the bounding box outLine
- 10, show three circles representing the maximum bounding box | Below is the the instruction that describes the task:
### Input:
Draw axes on scene. Available axes types:
:param int axtype:
- 0, no axes,
- 1, draw three gray grid walls
- 2, show cartesian axes from (0,0,0)
- 3, show positive range of cartesian axes from (0,0,0)
- 4, show a triad at bottom left
- 5, show a cube at bottom left
- 6, mark the corners of the bounding box
- 7, draw a simple ruler at the bottom of the window
- 8, show the ``vtkCubeAxesActor`` object
- 9, show the bounding box outLine
- 10, show three circles representing the maximum bounding box
### Response:
def addAxes(axtype=None, c=None):
"""Draw axes on scene. Available axes types:
:param int axtype:
- 0, no axes,
- 1, draw three gray grid walls
- 2, show cartesian axes from (0,0,0)
- 3, show positive range of cartesian axes from (0,0,0)
- 4, show a triad at bottom left
- 5, show a cube at bottom left
- 6, mark the corners of the bounding box
- 7, draw a simple ruler at the bottom of the window
- 8, show the ``vtkCubeAxesActor`` object
- 9, show the bounding box outLine
- 10, show three circles representing the maximum bounding box
"""
vp = settings.plotter_instance
if axtype is not None:
vp.axes = axtype # overrride
r = vp.renderers.index(vp.renderer)
if not vp.axes:
return
if c is None: # automatic black or white
c = (0.9, 0.9, 0.9)
if numpy.sum(vp.renderer.GetBackground()) > 1.5:
c = (0.1, 0.1, 0.1)
if not vp.renderer:
return
if vp.axes_exist[r]:
return
# calculate max actors bounds
bns = []
for a in vp.actors:
if a and a.GetPickable():
b = a.GetBounds()
if b:
bns.append(b)
if len(bns):
max_bns = numpy.max(bns, axis=0)
min_bns = numpy.min(bns, axis=0)
vbb = (min_bns[0], max_bns[1], min_bns[2], max_bns[3], min_bns[4], max_bns[5])
else:
vbb = vp.renderer.ComputeVisiblePropBounds()
max_bns = vbb
min_bns = vbb
sizes = (max_bns[1] - min_bns[0], max_bns[3] - min_bns[2], max_bns[5] - min_bns[4])
############################################################
if vp.axes == 1 or vp.axes == True: # gray grid walls
nd = 4 # number of divisions in the smallest axis
off = -0.04 # label offset
step = numpy.min(sizes) / nd
if not step:
# bad proportions, use vtkCubeAxesActor
vp.addAxes(axtype=8, c=c)
vp.axes = 1
return
rx, ry, rz = numpy.rint(sizes / step).astype(int)
if max([rx / ry, ry / rx, rx / rz, rz / rx, ry / rz, rz / ry]) > 15:
# bad proportions, use vtkCubeAxesActor
vp.addAxes(axtype=8, c=c)
vp.axes = 1
return
gxy = shapes.Grid(pos=(0.5, 0.5, 0), normal=[0, 0, 1], bc=None, resx=rx, resy=ry)
gxz = shapes.Grid(pos=(0.5, 0, 0.5), normal=[0, 1, 0], bc=None, resx=rz, resy=rx)
gyz = shapes.Grid(pos=(0, 0.5, 0.5), normal=[1, 0, 0], bc=None, resx=rz, resy=ry)
gxy.alpha(0.06).wire(False).color(c).lineWidth(1)
gxz.alpha(0.04).wire(False).color(c).lineWidth(1)
gyz.alpha(0.04).wire(False).color(c).lineWidth(1)
xa = shapes.Line([0, 0, 0], [1, 0, 0], c=c, lw=1)
ya = shapes.Line([0, 0, 0], [0, 1, 0], c=c, lw=1)
za = shapes.Line([0, 0, 0], [0, 0, 1], c=c, lw=1)
xt, yt, zt, ox, oy, oz = [None] * 6
if vp.xtitle:
xtitle = vp.xtitle
if min_bns[0] <= 0 and max_bns[1] > 0: # mark x origin
ox = shapes.Cube([-min_bns[0] / sizes[0], 0, 0], side=0.008, c=c)
if len(vp.xtitle) == 1: # add axis length info
xtitle = vp.xtitle + " /" + utils.precision(sizes[0], 4)
wpos = [1 - (len(vp.xtitle) + 1) / 40, off, 0]
xt = shapes.Text(xtitle, pos=wpos, normal=(0, 0, 1),
s=0.025, c=c, justify="bottom-right")
if vp.ytitle:
if min_bns[2] <= 0 and max_bns[3] > 0: # mark y origin
oy = shapes.Cube([0, -min_bns[2] / sizes[1], 0], side=0.008, c=c)
yt = shapes.Text(vp.ytitle, pos=(0, 0, 0), normal=(0, 0, 1),
s=0.025, c=c, justify="bottom-right")
if len(vp.ytitle) == 1:
wpos = [off, 1 - (len(vp.ytitle) + 1) / 40, 0]
yt.pos(wpos)
else:
wpos = [off * 0.7, 1 - (len(vp.ytitle) + 1) / 40, 0]
yt.rotateZ(90).pos(wpos)
if vp.ztitle:
if min_bns[4] <= 0 and max_bns[5] > 0: # mark z origin
oz = shapes.Cube([0, 0, -min_bns[4] / sizes[2]], side=0.008, c=c)
zt = shapes.Text(vp.ztitle, pos=(0, 0, 0), normal=(1, -1, 0),
s=0.025, c=c, justify="bottom-right")
if len(vp.ztitle) == 1:
wpos = [off * 0.6, off * 0.6, 1 - (len(vp.ztitle) + 1) / 40]
zt.rotate(90, (1, -1, 0)).pos(wpos)
else:
wpos = [off * 0.3, off * 0.3, 1 - (len(vp.ztitle) + 1) / 40]
zt.rotate(180, (1, -1, 0)).pos(wpos)
acts = [gxy, gxz, gyz, xa, ya, za, xt, yt, zt, ox, oy, oz]
for a in acts:
if a:
a.PickableOff()
aa = Assembly(acts)
aa.pos(min_bns[0], min_bns[2], min_bns[4])
aa.SetScale(sizes)
aa.PickableOff()
vp.renderer.AddActor(aa)
vp.axes_exist[r] = aa
elif vp.axes == 2 or vp.axes == 3:
vbb = vp.renderer.ComputeVisiblePropBounds() # to be double checked
xcol, ycol, zcol = "db", "dg", "dr"
s = 1
alpha = 1
centered = False
x0, x1, y0, y1, z0, z1 = vbb
dx, dy, dz = x1 - x0, y1 - y0, z1 - z0
aves = numpy.sqrt(dx * dx + dy * dy + dz * dz) / 2
x0, x1 = min(x0, 0), max(x1, 0)
y0, y1 = min(y0, 0), max(y1, 0)
z0, z1 = min(z0, 0), max(z1, 0)
if vp.axes == 3:
if x1 > 0:
x0 = 0
if y1 > 0:
y0 = 0
if z1 > 0:
z0 = 0
dx, dy, dz = x1 - x0, y1 - y0, z1 - z0
acts = []
if x0 * x1 <= 0 or y0 * z1 <= 0 or z0 * z1 <= 0: # some ranges contain origin
zero = shapes.Sphere(r=aves / 120 * s, c="k", alpha=alpha, res=10)
acts += [zero]
if len(vp.xtitle) and dx > aves/100:
xl = shapes.Cylinder([[x0, 0, 0], [x1, 0, 0]], r=aves/250*s, c=xcol, alpha=alpha)
xc = shapes.Cone(pos=[x1, 0, 0], c=xcol, alpha=alpha,
r=aves/100*s, height=aves/25*s, axis=[1, 0, 0], res=10)
wpos = [x1-(len(vp.xtitle)+1)*aves/40*s, -aves/25*s, 0] # aligned to arrow tip
if centered:
wpos = [(x0 + x1) / 2 - len(vp.xtitle) / 2 * aves / 40 * s, -aves / 25 * s, 0]
xt = shapes.Text(vp.xtitle, pos=wpos, normal=(0, 0, 1), s=aves / 40 * s, c=xcol)
acts += [xl, xc, xt]
if len(vp.ytitle) and dy > aves/100:
yl = shapes.Cylinder([[0, y0, 0], [0, y1, 0]], r=aves/250*s, c=ycol, alpha=alpha)
yc = shapes.Cone(pos=[0, y1, 0], c=ycol, alpha=alpha,
r=aves/100*s, height=aves/25*s, axis=[0, 1, 0], res=10)
wpos = [-aves/40*s, y1-(len(vp.ytitle)+1)*aves/40*s, 0]
if centered:
wpos = [-aves / 40 * s, (y0 + y1) / 2 - len(vp.ytitle) / 2 * aves / 40 * s, 0]
yt = shapes.Text(vp.ytitle, pos=(0, 0, 0), normal=(0, 0, 1), s=aves / 40 * s, c=ycol)
yt.rotate(90, [0, 0, 1]).pos(wpos)
acts += [yl, yc, yt]
if len(vp.ztitle) and dz > aves/100:
zl = shapes.Cylinder([[0, 0, z0], [0, 0, z1]], r=aves/250*s, c=zcol, alpha=alpha)
zc = shapes.Cone(pos=[0, 0, z1], c=zcol, alpha=alpha,
r=aves/100*s, height=aves/25*s, axis=[0, 0, 1], res=10)
wpos = [-aves/50*s, -aves/50*s, z1 - (len(vp.ztitle)+1)*aves/40*s]
if centered:
wpos = [-aves/50*s, -aves/50*s, (z0+z1)/2-len(vp.ztitle)/2*aves/40*s]
zt = shapes.Text(vp.ztitle, pos=(0,0,0), normal=(1, -1, 0), s=aves/40*s, c=zcol)
zt.rotate(180, (1, -1, 0)).pos(wpos)
acts += [zl, zc, zt]
for a in acts:
a.PickableOff()
ass = Assembly(acts)
ass.PickableOff()
vp.renderer.AddActor(ass)
vp.axes_exist[r] = ass
elif vp.axes == 4:
axact = vtk.vtkAxesActor()
axact.SetShaftTypeToCylinder()
axact.SetCylinderRadius(0.03)
axact.SetXAxisLabelText(vp.xtitle)
axact.SetYAxisLabelText(vp.ytitle)
axact.SetZAxisLabelText(vp.ztitle)
axact.GetXAxisShaftProperty().SetColor(0, 0, 1)
axact.GetZAxisShaftProperty().SetColor(1, 0, 0)
axact.GetXAxisTipProperty().SetColor(0, 0, 1)
axact.GetZAxisTipProperty().SetColor(1, 0, 0)
bc = numpy.array(vp.renderer.GetBackground())
if numpy.sum(bc) < 1.5:
lc = (1, 1, 1)
else:
lc = (0, 0, 0)
axact.GetXAxisCaptionActor2D().GetCaptionTextProperty().BoldOff()
axact.GetYAxisCaptionActor2D().GetCaptionTextProperty().BoldOff()
axact.GetZAxisCaptionActor2D().GetCaptionTextProperty().BoldOff()
axact.GetXAxisCaptionActor2D().GetCaptionTextProperty().ItalicOff()
axact.GetYAxisCaptionActor2D().GetCaptionTextProperty().ItalicOff()
axact.GetZAxisCaptionActor2D().GetCaptionTextProperty().ItalicOff()
axact.GetXAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
axact.GetYAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
axact.GetZAxisCaptionActor2D().GetCaptionTextProperty().ShadowOff()
axact.GetXAxisCaptionActor2D().GetCaptionTextProperty().SetColor(lc)
axact.GetYAxisCaptionActor2D().GetCaptionTextProperty().SetColor(lc)
axact.GetZAxisCaptionActor2D().GetCaptionTextProperty().SetColor(lc)
axact.PickableOff()
icn = addIcon(axact, size=0.1)
vp.axes_exist[r] = icn
elif vp.axes == 5:
axact = vtk.vtkAnnotatedCubeActor()
axact.GetCubeProperty().SetColor(0.75, 0.75, 0.75)
axact.SetTextEdgesVisibility(0)
axact.SetFaceTextScale(0.4)
axact.GetXPlusFaceProperty().SetColor(colors.getColor("b"))
axact.GetXMinusFaceProperty().SetColor(colors.getColor("db"))
axact.GetYPlusFaceProperty().SetColor(colors.getColor("g"))
axact.GetYMinusFaceProperty().SetColor(colors.getColor("dg"))
axact.GetZPlusFaceProperty().SetColor(colors.getColor("r"))
axact.GetZMinusFaceProperty().SetColor(colors.getColor("dr"))
axact.PickableOff()
icn = addIcon(axact, size=0.06)
vp.axes_exist[r] = icn
elif vp.axes == 6:
ocf = vtk.vtkOutlineCornerFilter()
ocf.SetCornerFactor(0.1)
largestact, sz = None, -1
for a in vp.actors:
if a.GetPickable():
b = a.GetBounds()
d = max(b[1]-b[0], b[3]-b[2], b[5]-b[4])
if sz < d:
largestact = a
sz = d
if isinstance(largestact, Assembly):
ocf.SetInputData(largestact.getActor(0).GetMapper().GetInput())
else:
ocf.SetInputData(largestact.polydata())
ocf.Update()
ocMapper = vtk.vtkHierarchicalPolyDataMapper()
ocMapper.SetInputConnection(0, ocf.GetOutputPort(0))
ocActor = vtk.vtkActor()
ocActor.SetMapper(ocMapper)
bc = numpy.array(vp.renderer.GetBackground())
if numpy.sum(bc) < 1.5:
lc = (1, 1, 1)
else:
lc = (0, 0, 0)
ocActor.GetProperty().SetColor(lc)
ocActor.PickableOff()
vp.renderer.AddActor(ocActor)
vp.axes_exist[r] = ocActor
elif vp.axes == 7:
# draws a simple ruler at the bottom of the window
ls = vtk.vtkLegendScaleActor()
ls.RightAxisVisibilityOff()
ls.TopAxisVisibilityOff()
ls.LegendVisibilityOff()
ls.LeftAxisVisibilityOff()
ls.GetBottomAxis().SetNumberOfMinorTicks(1)
ls.GetBottomAxis().GetProperty().SetColor(c)
ls.GetBottomAxis().GetLabelTextProperty().SetColor(c)
ls.GetBottomAxis().GetLabelTextProperty().BoldOff()
ls.GetBottomAxis().GetLabelTextProperty().ItalicOff()
ls.GetBottomAxis().GetLabelTextProperty().ShadowOff()
ls.PickableOff()
vp.renderer.AddActor(ls)
vp.axes_exist[r] = ls
elif vp.axes == 8:
ca = vtk.vtkCubeAxesActor()
ca.SetBounds(vbb)
if vp.camera:
ca.SetCamera(vp.camera)
else:
ca.SetCamera(vp.renderer.GetActiveCamera())
ca.GetXAxesLinesProperty().SetColor(c)
ca.GetYAxesLinesProperty().SetColor(c)
ca.GetZAxesLinesProperty().SetColor(c)
for i in range(3):
ca.GetLabelTextProperty(i).SetColor(c)
ca.GetTitleTextProperty(i).SetColor(c)
ca.SetTitleOffset(5)
ca.SetFlyMode(3)
ca.SetXTitle(vp.xtitle)
ca.SetYTitle(vp.ytitle)
ca.SetZTitle(vp.ztitle)
if vp.xtitle == "":
ca.SetXAxisVisibility(0)
ca.XAxisLabelVisibilityOff()
if vp.ytitle == "":
ca.SetYAxisVisibility(0)
ca.YAxisLabelVisibilityOff()
if vp.ztitle == "":
ca.SetZAxisVisibility(0)
ca.ZAxisLabelVisibilityOff()
ca.PickableOff()
vp.renderer.AddActor(ca)
vp.axes_exist[r] = ca
return
elif vp.axes == 9:
src = vtk.vtkCubeSource()
src.SetXLength(vbb[1] - vbb[0])
src.SetYLength(vbb[3] - vbb[2])
src.SetZLength(vbb[5] - vbb[4])
src.Update()
ca = Actor(src.GetOutput(), c=c, alpha=0.5, wire=1)
ca.pos((vbb[0] + vbb[1]) / 2, (vbb[3] + vbb[2]) / 2, (vbb[5] + vbb[4]) / 2)
ca.PickableOff()
vp.renderer.AddActor(ca)
vp.axes_exist[r] = ca
elif vp.axes == 10:
x0 = (vbb[0] + vbb[1]) / 2, (vbb[3] + vbb[2]) / 2, (vbb[5] + vbb[4]) / 2
rx, ry, rz = (vbb[1]-vbb[0])/2, (vbb[3]-vbb[2])/2, (vbb[5]-vbb[4])/2
rm = max(rx, ry, rz)
xc = shapes.Disc(x0, (0,0,1), r1=rm, r2=rm, c='lr', bc=None, res=1, resphi=72)
yc = shapes.Disc(x0, (0,1,0), r1=rm, r2=rm, c='lg', bc=None, res=1, resphi=72)
zc = shapes.Disc(x0, (1,0,0), r1=rm, r2=rm, c='lb', bc=None, res=1, resphi=72)
xc.clean().alpha(0.2).wire().lineWidth(2.5).PickableOff()
yc.clean().alpha(0.2).wire().lineWidth(2.5).PickableOff()
zc.clean().alpha(0.2).wire().lineWidth(2.5).PickableOff()
ca = xc + yc + zc
ca.PickableOff()
vp.renderer.AddActor(ca)
vp.axes_exist[r] = ca
else:
colors.printc('~bomb Keyword axes must be in range [0-10].', c=1)
colors.printc('''
~target Available axes types:
0 = no axes,
1 = draw three gray grid walls
2 = show cartesian axes from (0,0,0)
3 = show positive range of cartesian axes from (0,0,0)
4 = show a triad at bottom left
5 = show a cube at bottom left
6 = mark the corners of the bounding box
7 = draw a simple ruler at the bottom of the window
8 = show the vtkCubeAxesActor object
9 = show the bounding box outline
10 = show three circles representing the maximum bounding box
''', c=1, bold=0)
if not vp.axes_exist[r]:
vp.axes_exist[r] = True
return |
def _meco_frequency(m1, m2, chi1, chi2):
"""Returns the frequency of the minimum energy cutoff for 3.5pN (2.5pN spin)
"""
return velocity_to_frequency(meco_velocity(m1, m2, chi1, chi2), m1+m2) | Returns the frequency of the minimum energy cutoff for 3.5pN (2.5pN spin) | Below is the the instruction that describes the task:
### Input:
Returns the frequency of the minimum energy cutoff for 3.5pN (2.5pN spin)
### Response:
def _meco_frequency(m1, m2, chi1, chi2):
"""Returns the frequency of the minimum energy cutoff for 3.5pN (2.5pN spin)
"""
return velocity_to_frequency(meco_velocity(m1, m2, chi1, chi2), m1+m2) |
def plot_somas(somas):
'''Plot set of somas on same figure as spheres, each with different color'''
_, ax = common.get_figure(new_fig=True, subplot=111,
params={'projection': '3d', 'aspect': 'equal'})
for s in somas:
common.plot_sphere(ax, s.center, s.radius, color=random_color(), alpha=1)
plt.show() | Plot set of somas on same figure as spheres, each with different color | Below is the the instruction that describes the task:
### Input:
Plot set of somas on same figure as spheres, each with different color
### Response:
def plot_somas(somas):
'''Plot set of somas on same figure as spheres, each with different color'''
_, ax = common.get_figure(new_fig=True, subplot=111,
params={'projection': '3d', 'aspect': 'equal'})
for s in somas:
common.plot_sphere(ax, s.center, s.radius, color=random_color(), alpha=1)
plt.show() |
def underflow(self, axis=0):
"""
Return the underflow for the given axis.
Depending on the dimension of the histogram, may return an array.
"""
if axis not in range(3):
raise ValueError("axis must be 0, 1, or 2")
if self.DIM == 1:
return self.GetBinContent(0)
elif self.DIM == 2:
def idx(i):
arg = [i]
arg.insert(axis, 0)
return arg
return [
self.GetBinContent(*idx(i))
for i in self.bins_range(axis=(axis + 1) % 2, overflow=True)]
elif self.DIM == 3:
axes = [0, 1, 2]
axes.remove(axis)
axis2, axis3 = axes
def idx(i, j):
arg = [i, j]
arg.insert(axis, 0)
return arg
return [[
self.GetBinContent(*idx(i, j))
for i in self.bins_range(axis=axis2, overflow=True)]
for j in self.bins_range(axis=axis3, overflow=True)] | Return the underflow for the given axis.
Depending on the dimension of the histogram, may return an array. | Below is the the instruction that describes the task:
### Input:
Return the underflow for the given axis.
Depending on the dimension of the histogram, may return an array.
### Response:
def underflow(self, axis=0):
"""
Return the underflow for the given axis.
Depending on the dimension of the histogram, may return an array.
"""
if axis not in range(3):
raise ValueError("axis must be 0, 1, or 2")
if self.DIM == 1:
return self.GetBinContent(0)
elif self.DIM == 2:
def idx(i):
arg = [i]
arg.insert(axis, 0)
return arg
return [
self.GetBinContent(*idx(i))
for i in self.bins_range(axis=(axis + 1) % 2, overflow=True)]
elif self.DIM == 3:
axes = [0, 1, 2]
axes.remove(axis)
axis2, axis3 = axes
def idx(i, j):
arg = [i, j]
arg.insert(axis, 0)
return arg
return [[
self.GetBinContent(*idx(i, j))
for i in self.bins_range(axis=axis2, overflow=True)]
for j in self.bins_range(axis=axis3, overflow=True)] |
def __looks_like_html(response):
"""Guesses entity type when Content-Type header is missing.
Since Content-Type is not strictly required, some servers leave it out.
"""
text = response.text.lstrip().lower()
return text.startswith('<html') or text.startswith('<!doctype') | Guesses entity type when Content-Type header is missing.
Since Content-Type is not strictly required, some servers leave it out. | Below is the the instruction that describes the task:
### Input:
Guesses entity type when Content-Type header is missing.
Since Content-Type is not strictly required, some servers leave it out.
### Response:
def __looks_like_html(response):
"""Guesses entity type when Content-Type header is missing.
Since Content-Type is not strictly required, some servers leave it out.
"""
text = response.text.lstrip().lower()
return text.startswith('<html') or text.startswith('<!doctype') |
def process_char(buffer: str, char: str, mappings=_char_mappings):
"""This is a convinience method for use with
EventListener.wait_for_unicode_char(). In most cases it simply appends
char to buffer. Some replacements are done because presing return will
produce '\\r' but for most cases '\\n' would be desireable.
Also backspace cant just be added to a string either, therefore, if char is
"\\u0008" the last character from buffer will be cut off. The replacement
from '\\r' to '\\n' is done using the mappings argument, the default value
for it also contains a mapping from '\t' to 4 spaces.
:param buffer: the string to be updated
:type buffer: str
:param char: the unicode character to be processed
:type char: str
:param mappings: a dict containing mappings
:type mappings: dict
:returns: a new string"""
if char in mappings:
return buffer + mappings[char]
elif char == "\u0008":
return buffer[:-1] if len(buffer) > 0 else buffer
else:
return buffer + char | This is a convinience method for use with
EventListener.wait_for_unicode_char(). In most cases it simply appends
char to buffer. Some replacements are done because presing return will
produce '\\r' but for most cases '\\n' would be desireable.
Also backspace cant just be added to a string either, therefore, if char is
"\\u0008" the last character from buffer will be cut off. The replacement
from '\\r' to '\\n' is done using the mappings argument, the default value
for it also contains a mapping from '\t' to 4 spaces.
:param buffer: the string to be updated
:type buffer: str
:param char: the unicode character to be processed
:type char: str
:param mappings: a dict containing mappings
:type mappings: dict
:returns: a new string | Below is the the instruction that describes the task:
### Input:
This is a convinience method for use with
EventListener.wait_for_unicode_char(). In most cases it simply appends
char to buffer. Some replacements are done because presing return will
produce '\\r' but for most cases '\\n' would be desireable.
Also backspace cant just be added to a string either, therefore, if char is
"\\u0008" the last character from buffer will be cut off. The replacement
from '\\r' to '\\n' is done using the mappings argument, the default value
for it also contains a mapping from '\t' to 4 spaces.
:param buffer: the string to be updated
:type buffer: str
:param char: the unicode character to be processed
:type char: str
:param mappings: a dict containing mappings
:type mappings: dict
:returns: a new string
### Response:
def process_char(buffer: str, char: str, mappings=_char_mappings):
"""This is a convinience method for use with
EventListener.wait_for_unicode_char(). In most cases it simply appends
char to buffer. Some replacements are done because presing return will
produce '\\r' but for most cases '\\n' would be desireable.
Also backspace cant just be added to a string either, therefore, if char is
"\\u0008" the last character from buffer will be cut off. The replacement
from '\\r' to '\\n' is done using the mappings argument, the default value
for it also contains a mapping from '\t' to 4 spaces.
:param buffer: the string to be updated
:type buffer: str
:param char: the unicode character to be processed
:type char: str
:param mappings: a dict containing mappings
:type mappings: dict
:returns: a new string"""
if char in mappings:
return buffer + mappings[char]
elif char == "\u0008":
return buffer[:-1] if len(buffer) > 0 else buffer
else:
return buffer + char |
def remove_invalid_fields(self, queryset, fields, view, request):
"""
Extend :py:meth:`rest_framework.filters.OrderingFilter.remove_invalid_fields` to
validate that all provided sort fields exist (as contrasted with the super's behavior
which is to silently remove invalid fields).
:raises ValidationError: if a sort field is invalid.
"""
valid_fields = [
item[0] for item in self.get_valid_fields(queryset, view,
{'request': request})
]
bad_terms = [
term for term in fields
if format_value(term.replace(".", "__").lstrip('-'), "underscore") not in valid_fields
]
if bad_terms:
raise ValidationError('invalid sort parameter{}: {}'.format(
('s' if len(bad_terms) > 1 else ''), ','.join(bad_terms)))
# this looks like it duplicates code above, but we want the ValidationError to report
# the actual parameter supplied while we want the fields passed to the super() to
# be correctly rewritten.
# The leading `-` has to be stripped to prevent format_value from turning it into `_`.
underscore_fields = []
for item in fields:
item_rewritten = item.replace(".", "__")
if item_rewritten.startswith('-'):
underscore_fields.append(
'-' + format_value(item_rewritten.lstrip('-'), "underscore"))
else:
underscore_fields.append(format_value(item_rewritten, "underscore"))
return super(OrderingFilter, self).remove_invalid_fields(
queryset, underscore_fields, view, request) | Extend :py:meth:`rest_framework.filters.OrderingFilter.remove_invalid_fields` to
validate that all provided sort fields exist (as contrasted with the super's behavior
which is to silently remove invalid fields).
:raises ValidationError: if a sort field is invalid. | Below is the the instruction that describes the task:
### Input:
Extend :py:meth:`rest_framework.filters.OrderingFilter.remove_invalid_fields` to
validate that all provided sort fields exist (as contrasted with the super's behavior
which is to silently remove invalid fields).
:raises ValidationError: if a sort field is invalid.
### Response:
def remove_invalid_fields(self, queryset, fields, view, request):
"""
Extend :py:meth:`rest_framework.filters.OrderingFilter.remove_invalid_fields` to
validate that all provided sort fields exist (as contrasted with the super's behavior
which is to silently remove invalid fields).
:raises ValidationError: if a sort field is invalid.
"""
valid_fields = [
item[0] for item in self.get_valid_fields(queryset, view,
{'request': request})
]
bad_terms = [
term for term in fields
if format_value(term.replace(".", "__").lstrip('-'), "underscore") not in valid_fields
]
if bad_terms:
raise ValidationError('invalid sort parameter{}: {}'.format(
('s' if len(bad_terms) > 1 else ''), ','.join(bad_terms)))
# this looks like it duplicates code above, but we want the ValidationError to report
# the actual parameter supplied while we want the fields passed to the super() to
# be correctly rewritten.
# The leading `-` has to be stripped to prevent format_value from turning it into `_`.
underscore_fields = []
for item in fields:
item_rewritten = item.replace(".", "__")
if item_rewritten.startswith('-'):
underscore_fields.append(
'-' + format_value(item_rewritten.lstrip('-'), "underscore"))
else:
underscore_fields.append(format_value(item_rewritten, "underscore"))
return super(OrderingFilter, self).remove_invalid_fields(
queryset, underscore_fields, view, request) |
def shlex_split(s, **kwargs):
'''
Only split if variable is a string
'''
if isinstance(s, six.string_types):
# On PY2, shlex.split will fail with unicode types if there are
# non-ascii characters in the string. So, we need to make sure we
# invoke it with a str type, and then decode the resulting string back
# to unicode to return it.
return salt.utils.data.decode(
shlex.split(salt.utils.stringutils.to_str(s), **kwargs)
)
else:
return s | Only split if variable is a string | Below is the the instruction that describes the task:
### Input:
Only split if variable is a string
### Response:
def shlex_split(s, **kwargs):
'''
Only split if variable is a string
'''
if isinstance(s, six.string_types):
# On PY2, shlex.split will fail with unicode types if there are
# non-ascii characters in the string. So, we need to make sure we
# invoke it with a str type, and then decode the resulting string back
# to unicode to return it.
return salt.utils.data.decode(
shlex.split(salt.utils.stringutils.to_str(s), **kwargs)
)
else:
return s |
def StartsWithIgnoreCase(self, value):
"""Sets the type of the WHERE clause as "starts with ignore case".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
"""
self._awql = self._CreateSingleValueCondition(value,
'STARTS_WITH_IGNORE_CASE')
return self._query_builder | Sets the type of the WHERE clause as "starts with ignore case".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to. | Below is the the instruction that describes the task:
### Input:
Sets the type of the WHERE clause as "starts with ignore case".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
### Response:
def StartsWithIgnoreCase(self, value):
"""Sets the type of the WHERE clause as "starts with ignore case".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
"""
self._awql = self._CreateSingleValueCondition(value,
'STARTS_WITH_IGNORE_CASE')
return self._query_builder |
def _exc_middleware_factory(app):
"""Handle exceptions.
Route exceptions to handlers if they are registered in application.
"""
@web.middleware
async def middleware(request, handler):
try:
return await handler(request)
except Exception as exc:
for cls in type(exc).mro():
if cls in app._error_handlers:
request.exception = exc
response = await app._error_handlers[cls](request)
return response
raise
return middleware | Handle exceptions.
Route exceptions to handlers if they are registered in application. | Below is the the instruction that describes the task:
### Input:
Handle exceptions.
Route exceptions to handlers if they are registered in application.
### Response:
def _exc_middleware_factory(app):
"""Handle exceptions.
Route exceptions to handlers if they are registered in application.
"""
@web.middleware
async def middleware(request, handler):
try:
return await handler(request)
except Exception as exc:
for cls in type(exc).mro():
if cls in app._error_handlers:
request.exception = exc
response = await app._error_handlers[cls](request)
return response
raise
return middleware |
def addSuccess(self, test: unittest.case.TestCase) -> None:
"""
Transforms the test in a serializable version of it and sends it to a queue for further analysis
:param test: the test to save
"""
# noinspection PyTypeChecker
self.add_result(TestState.success, test) | Transforms the test in a serializable version of it and sends it to a queue for further analysis
:param test: the test to save | Below is the the instruction that describes the task:
### Input:
Transforms the test in a serializable version of it and sends it to a queue for further analysis
:param test: the test to save
### Response:
def addSuccess(self, test: unittest.case.TestCase) -> None:
"""
Transforms the test in a serializable version of it and sends it to a queue for further analysis
:param test: the test to save
"""
# noinspection PyTypeChecker
self.add_result(TestState.success, test) |
def mul(self, other, axis="columns", level=None, fill_value=None):
"""Multiplies this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
"""
return self._binary_op(
"mul", other, axis=axis, level=level, fill_value=fill_value
) | Multiplies this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied. | Below is the the instruction that describes the task:
### Input:
Multiplies this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
### Response:
def mul(self, other, axis="columns", level=None, fill_value=None):
"""Multiplies this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
"""
return self._binary_op(
"mul", other, axis=axis, level=level, fill_value=fill_value
) |
def fastaSubtract(fastaFiles):
"""
Given a list of open file descriptors, each with FASTA content,
remove the reads found in the 2nd, 3rd, etc files from the first file
in the list.
@param fastaFiles: a C{list} of FASTA filenames.
@raises IndexError: if passed an empty list.
@return: An iterator producing C{Bio.SeqRecord} instances suitable for
writing to a file using C{Bio.SeqIO.write}.
"""
reads = {}
firstFile = fastaFiles.pop(0)
for seq in SeqIO.parse(firstFile, 'fasta'):
reads[seq.id] = seq
for fastaFile in fastaFiles:
for seq in SeqIO.parse(fastaFile, 'fasta'):
# Make sure that reads with the same id have the same sequence.
if seq.id in reads:
assert str(seq.seq) == str(reads[seq.id].seq)
reads.pop(seq.id, None)
return iter(reads.values()) | Given a list of open file descriptors, each with FASTA content,
remove the reads found in the 2nd, 3rd, etc files from the first file
in the list.
@param fastaFiles: a C{list} of FASTA filenames.
@raises IndexError: if passed an empty list.
@return: An iterator producing C{Bio.SeqRecord} instances suitable for
writing to a file using C{Bio.SeqIO.write}. | Below is the the instruction that describes the task:
### Input:
Given a list of open file descriptors, each with FASTA content,
remove the reads found in the 2nd, 3rd, etc files from the first file
in the list.
@param fastaFiles: a C{list} of FASTA filenames.
@raises IndexError: if passed an empty list.
@return: An iterator producing C{Bio.SeqRecord} instances suitable for
writing to a file using C{Bio.SeqIO.write}.
### Response:
def fastaSubtract(fastaFiles):
"""
Given a list of open file descriptors, each with FASTA content,
remove the reads found in the 2nd, 3rd, etc files from the first file
in the list.
@param fastaFiles: a C{list} of FASTA filenames.
@raises IndexError: if passed an empty list.
@return: An iterator producing C{Bio.SeqRecord} instances suitable for
writing to a file using C{Bio.SeqIO.write}.
"""
reads = {}
firstFile = fastaFiles.pop(0)
for seq in SeqIO.parse(firstFile, 'fasta'):
reads[seq.id] = seq
for fastaFile in fastaFiles:
for seq in SeqIO.parse(fastaFile, 'fasta'):
# Make sure that reads with the same id have the same sequence.
if seq.id in reads:
assert str(seq.seq) == str(reads[seq.id].seq)
reads.pop(seq.id, None)
return iter(reads.values()) |
def _parse_sig(sig, arg_names, validate=False):
"""
Parses signatures into a ``OrderedDict`` of paramName => type.
Numerically-indexed arguments that do not correspond to an argument
name in python (ie: it takes a variable number of arguments) will be
keyed as the stringified version of it's index.
sig the signature to be parsed
arg_names a list of argument names extracted from python source
Returns a tuple of (method name, types dict, return type)
"""
d = SIG_RE.match(sig)
if not d:
raise ValueError('Invalid method signature %s' % sig)
d = d.groupdict()
ret = [(n, Any) for n in arg_names]
if 'args_sig' in d and type(
d['args_sig']) is str and d['args_sig'].strip():
for i, arg in enumerate(d['args_sig'].strip().split(',')):
_type_checking_available(sig, validate)
if '=' in arg:
if not type(ret) is OrderedDict:
ret = OrderedDict(ret)
dk = KWARG_RE.match(arg)
if not dk:
raise ValueError('Could not parse arg type %s in %s' %
(arg, sig))
dk = dk.groupdict()
if not sum(
[(k in dk and type(dk[k]) is str and bool(dk[k].strip()))
for k in ('arg_name', 'arg_type')]):
raise ValueError('Invalid kwarg value %s in %s' %
(arg, sig))
ret[dk['arg_name']] = _eval_arg_type(dk['arg_type'], None, arg,
sig)
else:
if type(ret) is OrderedDict:
raise ValueError('Positional arguments must occur '
'before keyword arguments in %s' % sig)
if len(ret) < i + 1:
ret.append((str(i), _eval_arg_type(arg, None, arg, sig)))
else:
ret[i] = (ret[i][0], _eval_arg_type(arg, None, arg, sig))
if not type(ret) is OrderedDict:
ret = OrderedDict(ret)
return (d['method_name'], ret,
(_eval_arg_type(d['return_sig'], Any, 'return', sig) if
d['return_sig'] else Any)) | Parses signatures into a ``OrderedDict`` of paramName => type.
Numerically-indexed arguments that do not correspond to an argument
name in python (ie: it takes a variable number of arguments) will be
keyed as the stringified version of it's index.
sig the signature to be parsed
arg_names a list of argument names extracted from python source
Returns a tuple of (method name, types dict, return type) | Below is the the instruction that describes the task:
### Input:
Parses signatures into a ``OrderedDict`` of paramName => type.
Numerically-indexed arguments that do not correspond to an argument
name in python (ie: it takes a variable number of arguments) will be
keyed as the stringified version of it's index.
sig the signature to be parsed
arg_names a list of argument names extracted from python source
Returns a tuple of (method name, types dict, return type)
### Response:
def _parse_sig(sig, arg_names, validate=False):
"""
Parses signatures into a ``OrderedDict`` of paramName => type.
Numerically-indexed arguments that do not correspond to an argument
name in python (ie: it takes a variable number of arguments) will be
keyed as the stringified version of it's index.
sig the signature to be parsed
arg_names a list of argument names extracted from python source
Returns a tuple of (method name, types dict, return type)
"""
d = SIG_RE.match(sig)
if not d:
raise ValueError('Invalid method signature %s' % sig)
d = d.groupdict()
ret = [(n, Any) for n in arg_names]
if 'args_sig' in d and type(
d['args_sig']) is str and d['args_sig'].strip():
for i, arg in enumerate(d['args_sig'].strip().split(',')):
_type_checking_available(sig, validate)
if '=' in arg:
if not type(ret) is OrderedDict:
ret = OrderedDict(ret)
dk = KWARG_RE.match(arg)
if not dk:
raise ValueError('Could not parse arg type %s in %s' %
(arg, sig))
dk = dk.groupdict()
if not sum(
[(k in dk and type(dk[k]) is str and bool(dk[k].strip()))
for k in ('arg_name', 'arg_type')]):
raise ValueError('Invalid kwarg value %s in %s' %
(arg, sig))
ret[dk['arg_name']] = _eval_arg_type(dk['arg_type'], None, arg,
sig)
else:
if type(ret) is OrderedDict:
raise ValueError('Positional arguments must occur '
'before keyword arguments in %s' % sig)
if len(ret) < i + 1:
ret.append((str(i), _eval_arg_type(arg, None, arg, sig)))
else:
ret[i] = (ret[i][0], _eval_arg_type(arg, None, arg, sig))
if not type(ret) is OrderedDict:
ret = OrderedDict(ret)
return (d['method_name'], ret,
(_eval_arg_type(d['return_sig'], Any, 'return', sig) if
d['return_sig'] else Any)) |
def File(self, name, directory = None, create = 1):
"""Look up or create a File node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a directory is found at the
specified path.
"""
return self._lookup(name, directory, File, create) | Look up or create a File node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a directory is found at the
specified path. | Below is the the instruction that describes the task:
### Input:
Look up or create a File node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a directory is found at the
specified path.
### Response:
def File(self, name, directory = None, create = 1):
"""Look up or create a File node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a directory is found at the
specified path.
"""
return self._lookup(name, directory, File, create) |
def create_admin(self, account_id, user_id, role):
"""
Flag an existing user as an admin within the account.
https://canvas.instructure.com/doc/api/admins.html#method.admins.create
"""
url = ADMINS_API.format(account_id)
body = {"user_id": unquote(str(user_id)),
"role": role,
"send_confirmation": False}
return CanvasAdmin(data=self._post_resource(url, body)) | Flag an existing user as an admin within the account.
https://canvas.instructure.com/doc/api/admins.html#method.admins.create | Below is the the instruction that describes the task:
### Input:
Flag an existing user as an admin within the account.
https://canvas.instructure.com/doc/api/admins.html#method.admins.create
### Response:
def create_admin(self, account_id, user_id, role):
"""
Flag an existing user as an admin within the account.
https://canvas.instructure.com/doc/api/admins.html#method.admins.create
"""
url = ADMINS_API.format(account_id)
body = {"user_id": unquote(str(user_id)),
"role": role,
"send_confirmation": False}
return CanvasAdmin(data=self._post_resource(url, body)) |
def best_buy_1(self):
""" 量大收紅
:rtype: bool
"""
result = self.data.value[-1] > self.data.value[-2] and \
self.data.price[-1] > self.data.openprice[-1]
return result | 量大收紅
:rtype: bool | Below is the the instruction that describes the task:
### Input:
量大收紅
:rtype: bool
### Response:
def best_buy_1(self):
""" 量大收紅
:rtype: bool
"""
result = self.data.value[-1] > self.data.value[-2] and \
self.data.price[-1] > self.data.openprice[-1]
return result |
def preprocess(pipeline, args):
"""Transfrom csv data into transfromed tf.example files.
Outline:
1) read the input data (as csv or bigquery) into a dict format
2) replace image paths with base64 encoded image files
3) build a csv input string with images paths replaced with base64. This
matches the serving csv that a trained model would expect.
4) batch the csv strings
5) run the transformations
6) write the results to tf.example files and save any errors.
"""
from tensorflow.python.lib.io import file_io
from trainer import feature_transforms
schema = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.SCHEMA_FILE)).decode())
features = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.FEATURES_FILE)).decode())
stats = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.STATS_FILE)).decode())
column_names = [col['name'] for col in schema]
if args.csv:
all_files = []
for i, file_pattern in enumerate(args.csv):
all_files.append(pipeline | ('ReadCSVFile%d' % i) >> beam.io.ReadFromText(file_pattern))
raw_data = (
all_files
| 'MergeCSVFiles' >> beam.Flatten()
| 'ParseCSVData' >> beam.Map(decode_csv, column_names))
else:
columns = ', '.join(column_names)
query = 'SELECT {columns} FROM `{table}`'.format(columns=columns,
table=args.bigquery)
raw_data = (
pipeline
| 'ReadBiqQueryData'
>> beam.io.Read(beam.io.BigQuerySource(query=query,
use_standard_sql=True)))
# Note that prepare_image_transforms does not make embeddings, it justs reads
# the image files and converts them to byte stings. TransformFeaturesDoFn()
# will make the image embeddings.
image_columns = image_transform_columns(features)
clean_csv_data = (
raw_data
| 'PreprocessTransferredLearningTransformations'
>> beam.Map(prepare_image_transforms, image_columns)
| 'BuildCSVString'
>> beam.Map(encode_csv, column_names))
if args.shuffle:
clean_csv_data = clean_csv_data | 'ShuffleData' >> shuffle()
transform_dofn = TransformFeaturesDoFn(args.analysis, features, schema, stats)
(transformed_data, errors) = (
clean_csv_data
| 'Batch Input'
>> beam.ParDo(EmitAsBatchDoFn(args.batch_size))
| 'Run TF Graph on Batches'
>> beam.ParDo(transform_dofn).with_outputs('errors', main='main'))
_ = (transformed_data
| 'SerializeExamples' >> beam.Map(serialize_example, feature_transforms.get_transformed_feature_info(features, schema))
| 'WriteExamples'
>> beam.io.WriteToTFRecord(
os.path.join(args.output, args.prefix),
file_name_suffix='.tfrecord.gz'))
_ = (errors
| 'WriteErrors'
>> beam.io.WriteToText(
os.path.join(args.output, 'errors_' + args.prefix),
file_name_suffix='.txt')) | Transfrom csv data into transfromed tf.example files.
Outline:
1) read the input data (as csv or bigquery) into a dict format
2) replace image paths with base64 encoded image files
3) build a csv input string with images paths replaced with base64. This
matches the serving csv that a trained model would expect.
4) batch the csv strings
5) run the transformations
6) write the results to tf.example files and save any errors. | Below is the the instruction that describes the task:
### Input:
Transfrom csv data into transfromed tf.example files.
Outline:
1) read the input data (as csv or bigquery) into a dict format
2) replace image paths with base64 encoded image files
3) build a csv input string with images paths replaced with base64. This
matches the serving csv that a trained model would expect.
4) batch the csv strings
5) run the transformations
6) write the results to tf.example files and save any errors.
### Response:
def preprocess(pipeline, args):
"""Transfrom csv data into transfromed tf.example files.
Outline:
1) read the input data (as csv or bigquery) into a dict format
2) replace image paths with base64 encoded image files
3) build a csv input string with images paths replaced with base64. This
matches the serving csv that a trained model would expect.
4) batch the csv strings
5) run the transformations
6) write the results to tf.example files and save any errors.
"""
from tensorflow.python.lib.io import file_io
from trainer import feature_transforms
schema = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.SCHEMA_FILE)).decode())
features = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.FEATURES_FILE)).decode())
stats = json.loads(file_io.read_file_to_string(
os.path.join(args.analysis, feature_transforms.STATS_FILE)).decode())
column_names = [col['name'] for col in schema]
if args.csv:
all_files = []
for i, file_pattern in enumerate(args.csv):
all_files.append(pipeline | ('ReadCSVFile%d' % i) >> beam.io.ReadFromText(file_pattern))
raw_data = (
all_files
| 'MergeCSVFiles' >> beam.Flatten()
| 'ParseCSVData' >> beam.Map(decode_csv, column_names))
else:
columns = ', '.join(column_names)
query = 'SELECT {columns} FROM `{table}`'.format(columns=columns,
table=args.bigquery)
raw_data = (
pipeline
| 'ReadBiqQueryData'
>> beam.io.Read(beam.io.BigQuerySource(query=query,
use_standard_sql=True)))
# Note that prepare_image_transforms does not make embeddings, it justs reads
# the image files and converts them to byte stings. TransformFeaturesDoFn()
# will make the image embeddings.
image_columns = image_transform_columns(features)
clean_csv_data = (
raw_data
| 'PreprocessTransferredLearningTransformations'
>> beam.Map(prepare_image_transforms, image_columns)
| 'BuildCSVString'
>> beam.Map(encode_csv, column_names))
if args.shuffle:
clean_csv_data = clean_csv_data | 'ShuffleData' >> shuffle()
transform_dofn = TransformFeaturesDoFn(args.analysis, features, schema, stats)
(transformed_data, errors) = (
clean_csv_data
| 'Batch Input'
>> beam.ParDo(EmitAsBatchDoFn(args.batch_size))
| 'Run TF Graph on Batches'
>> beam.ParDo(transform_dofn).with_outputs('errors', main='main'))
_ = (transformed_data
| 'SerializeExamples' >> beam.Map(serialize_example, feature_transforms.get_transformed_feature_info(features, schema))
| 'WriteExamples'
>> beam.io.WriteToTFRecord(
os.path.join(args.output, args.prefix),
file_name_suffix='.tfrecord.gz'))
_ = (errors
| 'WriteErrors'
>> beam.io.WriteToText(
os.path.join(args.output, 'errors_' + args.prefix),
file_name_suffix='.txt')) |
def observe_all(self, callback: Callable[[str, Any, Any], None]):
"""Subscribes to all keys changes"""
self._all_callbacks.append(callback) | Subscribes to all keys changes | Below is the the instruction that describes the task:
### Input:
Subscribes to all keys changes
### Response:
def observe_all(self, callback: Callable[[str, Any, Any], None]):
"""Subscribes to all keys changes"""
self._all_callbacks.append(callback) |
def _execute_expression(self, expression: Any):
"""
This does the bulk of the work of executing a logical form, recursively executing a single
expression. Basically, if the expression is a function we know about, we evaluate its
arguments then call the function. If it's a list, we evaluate all elements of the list.
If it's a constant (or a zero-argument function), we evaluate the constant.
"""
# pylint: disable=too-many-return-statements
if isinstance(expression, list):
if isinstance(expression[0], list):
function = self._execute_expression(expression[0])
elif expression[0] in self._functions:
function = self._functions[expression[0]]
else:
if isinstance(expression[0], str):
raise ExecutionError(f"Unrecognized function: {expression[0]}")
else:
raise ExecutionError(f"Unsupported expression type: {expression}")
arguments = [self._execute_expression(arg) for arg in expression[1:]]
try:
return function(*arguments)
except (TypeError, ValueError):
traceback.print_exc()
raise ExecutionError(f"Error executing expression {expression} (see stderr for stack trace)")
elif isinstance(expression, str):
if expression not in self._functions:
raise ExecutionError(f"Unrecognized constant: {expression}")
# This is a bit of a quirk in how we represent constants and zero-argument functions.
# For consistency, constants are wrapped in a zero-argument lambda. So both constants
# and zero-argument functions are callable in `self._functions`, and are `BasicTypes`
# in `self._function_types`. For these, we want to return
# `self._functions[expression]()` _calling_ the zero-argument function. If we get a
# `FunctionType` in here, that means we're referring to the function as a first-class
# object, instead of calling it (maybe as an argument to a higher-order function). In
# that case, we return the function _without_ calling it.
# Also, we just check the first function type here, because we assume you haven't
# registered the same function with both a constant type and a `FunctionType`.
if isinstance(self._function_types[expression][0], FunctionType):
return self._functions[expression]
else:
return self._functions[expression]()
return self._functions[expression]
else:
raise ExecutionError("Not sure how you got here. Please open a github issue with details.") | This does the bulk of the work of executing a logical form, recursively executing a single
expression. Basically, if the expression is a function we know about, we evaluate its
arguments then call the function. If it's a list, we evaluate all elements of the list.
If it's a constant (or a zero-argument function), we evaluate the constant. | Below is the the instruction that describes the task:
### Input:
This does the bulk of the work of executing a logical form, recursively executing a single
expression. Basically, if the expression is a function we know about, we evaluate its
arguments then call the function. If it's a list, we evaluate all elements of the list.
If it's a constant (or a zero-argument function), we evaluate the constant.
### Response:
def _execute_expression(self, expression: Any):
"""
This does the bulk of the work of executing a logical form, recursively executing a single
expression. Basically, if the expression is a function we know about, we evaluate its
arguments then call the function. If it's a list, we evaluate all elements of the list.
If it's a constant (or a zero-argument function), we evaluate the constant.
"""
# pylint: disable=too-many-return-statements
if isinstance(expression, list):
if isinstance(expression[0], list):
function = self._execute_expression(expression[0])
elif expression[0] in self._functions:
function = self._functions[expression[0]]
else:
if isinstance(expression[0], str):
raise ExecutionError(f"Unrecognized function: {expression[0]}")
else:
raise ExecutionError(f"Unsupported expression type: {expression}")
arguments = [self._execute_expression(arg) for arg in expression[1:]]
try:
return function(*arguments)
except (TypeError, ValueError):
traceback.print_exc()
raise ExecutionError(f"Error executing expression {expression} (see stderr for stack trace)")
elif isinstance(expression, str):
if expression not in self._functions:
raise ExecutionError(f"Unrecognized constant: {expression}")
# This is a bit of a quirk in how we represent constants and zero-argument functions.
# For consistency, constants are wrapped in a zero-argument lambda. So both constants
# and zero-argument functions are callable in `self._functions`, and are `BasicTypes`
# in `self._function_types`. For these, we want to return
# `self._functions[expression]()` _calling_ the zero-argument function. If we get a
# `FunctionType` in here, that means we're referring to the function as a first-class
# object, instead of calling it (maybe as an argument to a higher-order function). In
# that case, we return the function _without_ calling it.
# Also, we just check the first function type here, because we assume you haven't
# registered the same function with both a constant type and a `FunctionType`.
if isinstance(self._function_types[expression][0], FunctionType):
return self._functions[expression]
else:
return self._functions[expression]()
return self._functions[expression]
else:
raise ExecutionError("Not sure how you got here. Please open a github issue with details.") |
def cardInfo(self, resource_id):
"""Return card info.
:params resource_id: Resource id.
"""
# TODO: add referer to headers (futweb)
base_id = baseId(resource_id)
if base_id in self.players:
return self.players[base_id]
else: # not a player?
url = '{0}{1}.json'.format(card_info_url, base_id)
return requests.get(url, timeout=self.timeout).json() | Return card info.
:params resource_id: Resource id. | Below is the the instruction that describes the task:
### Input:
Return card info.
:params resource_id: Resource id.
### Response:
def cardInfo(self, resource_id):
"""Return card info.
:params resource_id: Resource id.
"""
# TODO: add referer to headers (futweb)
base_id = baseId(resource_id)
if base_id in self.players:
return self.players[base_id]
else: # not a player?
url = '{0}{1}.json'.format(card_info_url, base_id)
return requests.get(url, timeout=self.timeout).json() |
def _default_buffer_pos_changed(self, _):
""" When the cursor changes in the default buffer. Synchronize with
history buffer. """
# Only when this buffer has the focus.
if self.app.current_buffer == self.default_buffer:
try:
line_no = self.default_buffer.document.cursor_position_row - \
self.history_mapping.result_line_offset
if line_no < 0: # When the cursor is above the inserted region.
raise IndexError
history_lineno = sorted(self.history_mapping.selected_lines)[line_no]
except IndexError:
pass
else:
self.history_buffer.cursor_position = \
self.history_buffer.document.translate_row_col_to_index(history_lineno, 0) | When the cursor changes in the default buffer. Synchronize with
history buffer. | Below is the the instruction that describes the task:
### Input:
When the cursor changes in the default buffer. Synchronize with
history buffer.
### Response:
def _default_buffer_pos_changed(self, _):
""" When the cursor changes in the default buffer. Synchronize with
history buffer. """
# Only when this buffer has the focus.
if self.app.current_buffer == self.default_buffer:
try:
line_no = self.default_buffer.document.cursor_position_row - \
self.history_mapping.result_line_offset
if line_no < 0: # When the cursor is above the inserted region.
raise IndexError
history_lineno = sorted(self.history_mapping.selected_lines)[line_no]
except IndexError:
pass
else:
self.history_buffer.cursor_position = \
self.history_buffer.document.translate_row_col_to_index(history_lineno, 0) |
def alignment_correcter(self, alignment_file_list, output_file_name,
filter_minimum=None):
'''
Remove lower case insertions in alignment outputs from HMM align. Give
a list of alignments, and an output file name, and each alignment will
be corrected, and written to a single file, ready to be placed together
using pplacer.
Parameters
----------
alignment_file_list : array
List of strings, each the path to different alignments from the
inputs provided to GraftM
output_file_name : str
The path and filename of the output file desired.
filter_minimum : int
minimum number of positions that must be aligned for each sequence
Returns
-------
True or False, depending if reads were written to file
'''
corrected_sequences = {}
for alignment_file in alignment_file_list:
insert_list = [] # Define list containing inserted positions to be removed (lower case characters)
sequence_list = list(SeqIO.parse(open(alignment_file, 'r'), 'fasta'))
for sequence in sequence_list: # For each sequence in the alignment
for idx, nt in enumerate(list(sequence.seq)): # For each nucleotide in the sequence
if nt.islower(): # Check for lower case character
insert_list.append(idx) # Add to the insert list if it is
insert_list = list(OrderedDict.fromkeys(sorted(insert_list, reverse=True))) # Reverse the list and remove duplicate positions
for sequence in sequence_list: # For each sequence in the alignment
new_seq = list(sequence.seq) # Define a list of sequences to be iterable list for writing
for position in insert_list: # For each position in the removal list
del new_seq[position] # Delete that inserted position in every sequence
corrected_sequences['>' + sequence.id + '\n'] = (''.join(new_seq) + '\n').replace('~', '-')
pre_filter_count=len(corrected_sequences)
if filter_minimum:
# Use '>' not '>=' here because the sequence is on a single line,
# but also includes a newline character at the end of the sequence
corrected_sequences={key:item for key, item in corrected_sequences.iteritems() if len(item.replace('-', '')) > filter_minimum}
post_filter_count=len(corrected_sequences)
logging.info("Filtered %i short sequences from the alignment" % \
(pre_filter_count-post_filter_count)
)
logging.info("%i sequences remaining" % post_filter_count)
if len(corrected_sequences) >= 1:
with open(output_file_name, 'w') as output_file: # Create an open file to write the new sequences to
for fasta_id, fasta_seq in corrected_sequences.iteritems():
output_file.write(fasta_id)
output_file.write(fasta_seq)
return True
else:
return False | Remove lower case insertions in alignment outputs from HMM align. Give
a list of alignments, and an output file name, and each alignment will
be corrected, and written to a single file, ready to be placed together
using pplacer.
Parameters
----------
alignment_file_list : array
List of strings, each the path to different alignments from the
inputs provided to GraftM
output_file_name : str
The path and filename of the output file desired.
filter_minimum : int
minimum number of positions that must be aligned for each sequence
Returns
-------
True or False, depending if reads were written to file | Below is the the instruction that describes the task:
### Input:
Remove lower case insertions in alignment outputs from HMM align. Give
a list of alignments, and an output file name, and each alignment will
be corrected, and written to a single file, ready to be placed together
using pplacer.
Parameters
----------
alignment_file_list : array
List of strings, each the path to different alignments from the
inputs provided to GraftM
output_file_name : str
The path and filename of the output file desired.
filter_minimum : int
minimum number of positions that must be aligned for each sequence
Returns
-------
True or False, depending if reads were written to file
### Response:
def alignment_correcter(self, alignment_file_list, output_file_name,
filter_minimum=None):
'''
Remove lower case insertions in alignment outputs from HMM align. Give
a list of alignments, and an output file name, and each alignment will
be corrected, and written to a single file, ready to be placed together
using pplacer.
Parameters
----------
alignment_file_list : array
List of strings, each the path to different alignments from the
inputs provided to GraftM
output_file_name : str
The path and filename of the output file desired.
filter_minimum : int
minimum number of positions that must be aligned for each sequence
Returns
-------
True or False, depending if reads were written to file
'''
corrected_sequences = {}
for alignment_file in alignment_file_list:
insert_list = [] # Define list containing inserted positions to be removed (lower case characters)
sequence_list = list(SeqIO.parse(open(alignment_file, 'r'), 'fasta'))
for sequence in sequence_list: # For each sequence in the alignment
for idx, nt in enumerate(list(sequence.seq)): # For each nucleotide in the sequence
if nt.islower(): # Check for lower case character
insert_list.append(idx) # Add to the insert list if it is
insert_list = list(OrderedDict.fromkeys(sorted(insert_list, reverse=True))) # Reverse the list and remove duplicate positions
for sequence in sequence_list: # For each sequence in the alignment
new_seq = list(sequence.seq) # Define a list of sequences to be iterable list for writing
for position in insert_list: # For each position in the removal list
del new_seq[position] # Delete that inserted position in every sequence
corrected_sequences['>' + sequence.id + '\n'] = (''.join(new_seq) + '\n').replace('~', '-')
pre_filter_count=len(corrected_sequences)
if filter_minimum:
# Use '>' not '>=' here because the sequence is on a single line,
# but also includes a newline character at the end of the sequence
corrected_sequences={key:item for key, item in corrected_sequences.iteritems() if len(item.replace('-', '')) > filter_minimum}
post_filter_count=len(corrected_sequences)
logging.info("Filtered %i short sequences from the alignment" % \
(pre_filter_count-post_filter_count)
)
logging.info("%i sequences remaining" % post_filter_count)
if len(corrected_sequences) >= 1:
with open(output_file_name, 'w') as output_file: # Create an open file to write the new sequences to
for fasta_id, fasta_seq in corrected_sequences.iteritems():
output_file.write(fasta_id)
output_file.write(fasta_seq)
return True
else:
return False |
def humanize_bytes(bytesize, precision=2):
"""
Humanize byte size figures
https://gist.github.com/moird/3684595
"""
abbrevs = (
(1 << 50, 'PB'),
(1 << 40, 'TB'),
(1 << 30, 'GB'),
(1 << 20, 'MB'),
(1 << 10, 'kB'),
(1, 'bytes')
)
if bytesize == 1:
return '1 byte'
for factor, suffix in abbrevs:
if bytesize >= factor:
break
if factor == 1:
precision = 0
return '%.*f %s' % (precision, bytesize / float(factor), suffix) | Humanize byte size figures
https://gist.github.com/moird/3684595 | Below is the the instruction that describes the task:
### Input:
Humanize byte size figures
https://gist.github.com/moird/3684595
### Response:
def humanize_bytes(bytesize, precision=2):
"""
Humanize byte size figures
https://gist.github.com/moird/3684595
"""
abbrevs = (
(1 << 50, 'PB'),
(1 << 40, 'TB'),
(1 << 30, 'GB'),
(1 << 20, 'MB'),
(1 << 10, 'kB'),
(1, 'bytes')
)
if bytesize == 1:
return '1 byte'
for factor, suffix in abbrevs:
if bytesize >= factor:
break
if factor == 1:
precision = 0
return '%.*f %s' % (precision, bytesize / float(factor), suffix) |
def blastparser(self, report, sample, fieldnames):
"""
Parse the number of core genes present in the strain from the BLAST outputs
:param report: the name and path of the BLAST outputs
:param sample: the sample object
:param fieldnames: type LIST: List of fields used to in BLAST analyses
"""
try:
# Open the sequence profile file as a dictionary
blastdict = DictReader(open(report), fieldnames=self.fieldnames, dialect='excel-tab')
# Go through each BLAST result
for row in blastdict:
# Ignore the headers
if row['query_id'].startswith(fieldnames[0]):
pass
else:
# Calculate the percent identity and extract the bitscore from the row
# Percent identity is the (length of the alignment - number of mismatches) / total subject length
percentidentity = float('{:0.2f}'.format((float(row['positives']) - float(row['gaps'])) /
float(row['subject_length']) * 100))
# Split off any | and - from the sample name
target = row['subject_id'].split('|')[0].split('-')[0]
# If the hit passes the cutoff threshold, add it to the set of core genes present
if percentidentity >= self.cutoff:
sample[self.analysistype].coreset.add(target)
except FileNotFoundError:
pass | Parse the number of core genes present in the strain from the BLAST outputs
:param report: the name and path of the BLAST outputs
:param sample: the sample object
:param fieldnames: type LIST: List of fields used to in BLAST analyses | Below is the the instruction that describes the task:
### Input:
Parse the number of core genes present in the strain from the BLAST outputs
:param report: the name and path of the BLAST outputs
:param sample: the sample object
:param fieldnames: type LIST: List of fields used to in BLAST analyses
### Response:
def blastparser(self, report, sample, fieldnames):
"""
Parse the number of core genes present in the strain from the BLAST outputs
:param report: the name and path of the BLAST outputs
:param sample: the sample object
:param fieldnames: type LIST: List of fields used to in BLAST analyses
"""
try:
# Open the sequence profile file as a dictionary
blastdict = DictReader(open(report), fieldnames=self.fieldnames, dialect='excel-tab')
# Go through each BLAST result
for row in blastdict:
# Ignore the headers
if row['query_id'].startswith(fieldnames[0]):
pass
else:
# Calculate the percent identity and extract the bitscore from the row
# Percent identity is the (length of the alignment - number of mismatches) / total subject length
percentidentity = float('{:0.2f}'.format((float(row['positives']) - float(row['gaps'])) /
float(row['subject_length']) * 100))
# Split off any | and - from the sample name
target = row['subject_id'].split('|')[0].split('-')[0]
# If the hit passes the cutoff threshold, add it to the set of core genes present
if percentidentity >= self.cutoff:
sample[self.analysistype].coreset.add(target)
except FileNotFoundError:
pass |
def find_stream(cls, fileobj, max_bytes):
"""Returns a possibly valid _ADTSStream or None.
Args:
max_bytes (int): maximum bytes to read
"""
r = BitReader(fileobj)
stream = cls(r)
if stream.sync(max_bytes):
stream.offset = (r.get_position() - 12) // 8
return stream | Returns a possibly valid _ADTSStream or None.
Args:
max_bytes (int): maximum bytes to read | Below is the the instruction that describes the task:
### Input:
Returns a possibly valid _ADTSStream or None.
Args:
max_bytes (int): maximum bytes to read
### Response:
def find_stream(cls, fileobj, max_bytes):
"""Returns a possibly valid _ADTSStream or None.
Args:
max_bytes (int): maximum bytes to read
"""
r = BitReader(fileobj)
stream = cls(r)
if stream.sync(max_bytes):
stream.offset = (r.get_position() - 12) // 8
return stream |
def by_identifier_secret(self, request):
"""
Authenticates a client by its identifier and secret (aka password).
:param request: The incoming request
:type request: oauth2.web.Request
:return: The identified client
:rtype: oauth2.datatype.Client
:raises OAuthInvalidError: If the client could not be found, is not
allowed to to use the current grant or
supplied invalid credentials
"""
client_id, client_secret = self.source(request=request)
try:
client = self.client_store.fetch_by_client_id(client_id)
except ClientNotFoundError:
raise OAuthInvalidError(error="invalid_client",
explanation="No client could be found")
grant_type = request.post_param("grant_type")
if client.grant_type_supported(grant_type) is False:
raise OAuthInvalidError(error="unauthorized_client",
explanation="The client is not allowed "
"to use this grant type")
if client.secret != client_secret:
raise OAuthInvalidError(error="invalid_client",
explanation="Invalid client credentials")
return client | Authenticates a client by its identifier and secret (aka password).
:param request: The incoming request
:type request: oauth2.web.Request
:return: The identified client
:rtype: oauth2.datatype.Client
:raises OAuthInvalidError: If the client could not be found, is not
allowed to to use the current grant or
supplied invalid credentials | Below is the the instruction that describes the task:
### Input:
Authenticates a client by its identifier and secret (aka password).
:param request: The incoming request
:type request: oauth2.web.Request
:return: The identified client
:rtype: oauth2.datatype.Client
:raises OAuthInvalidError: If the client could not be found, is not
allowed to to use the current grant or
supplied invalid credentials
### Response:
def by_identifier_secret(self, request):
"""
Authenticates a client by its identifier and secret (aka password).
:param request: The incoming request
:type request: oauth2.web.Request
:return: The identified client
:rtype: oauth2.datatype.Client
:raises OAuthInvalidError: If the client could not be found, is not
allowed to to use the current grant or
supplied invalid credentials
"""
client_id, client_secret = self.source(request=request)
try:
client = self.client_store.fetch_by_client_id(client_id)
except ClientNotFoundError:
raise OAuthInvalidError(error="invalid_client",
explanation="No client could be found")
grant_type = request.post_param("grant_type")
if client.grant_type_supported(grant_type) is False:
raise OAuthInvalidError(error="unauthorized_client",
explanation="The client is not allowed "
"to use this grant type")
if client.secret != client_secret:
raise OAuthInvalidError(error="invalid_client",
explanation="Invalid client credentials")
return client |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.