code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def copy_course_content(self, course_id, exclude=None, only=None, source_course=None):
"""
Copy course content.
DEPRECATED: Please use the {api:ContentMigrationsController#create Content Migrations API}
Copies content from one course into another. The default is to copy all course
content. You can control specific types to copy by using either the 'except' option
or the 'only' option.
The response is the same as the course copy status endpoint
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - source_course
"""ID or SIS-ID of the course to copy the content from"""
if source_course is not None:
data["source_course"] = source_course
# OPTIONAL - except
"""A list of the course content types to exclude, all areas not listed will
be copied."""
if exclude is not None:
self._validate_enum(exclude, ["course_settings", "assignments", "external_tools", "files", "topics", "calendar_events", "quizzes", "wiki_pages", "modules", "outcomes"])
data["except"] = exclude
# OPTIONAL - only
"""A list of the course content types to copy, all areas not listed will not
be copied."""
if only is not None:
self._validate_enum(only, ["course_settings", "assignments", "external_tools", "files", "topics", "calendar_events", "quizzes", "wiki_pages", "modules", "outcomes"])
data["only"] = only
self.logger.debug("POST /api/v1/courses/{course_id}/course_copy with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/course_copy".format(**path), data=data, params=params, no_data=True) | Copy course content.
DEPRECATED: Please use the {api:ContentMigrationsController#create Content Migrations API}
Copies content from one course into another. The default is to copy all course
content. You can control specific types to copy by using either the 'except' option
or the 'only' option.
The response is the same as the course copy status endpoint | Below is the the instruction that describes the task:
### Input:
Copy course content.
DEPRECATED: Please use the {api:ContentMigrationsController#create Content Migrations API}
Copies content from one course into another. The default is to copy all course
content. You can control specific types to copy by using either the 'except' option
or the 'only' option.
The response is the same as the course copy status endpoint
### Response:
def copy_course_content(self, course_id, exclude=None, only=None, source_course=None):
"""
Copy course content.
DEPRECATED: Please use the {api:ContentMigrationsController#create Content Migrations API}
Copies content from one course into another. The default is to copy all course
content. You can control specific types to copy by using either the 'except' option
or the 'only' option.
The response is the same as the course copy status endpoint
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - source_course
"""ID or SIS-ID of the course to copy the content from"""
if source_course is not None:
data["source_course"] = source_course
# OPTIONAL - except
"""A list of the course content types to exclude, all areas not listed will
be copied."""
if exclude is not None:
self._validate_enum(exclude, ["course_settings", "assignments", "external_tools", "files", "topics", "calendar_events", "quizzes", "wiki_pages", "modules", "outcomes"])
data["except"] = exclude
# OPTIONAL - only
"""A list of the course content types to copy, all areas not listed will not
be copied."""
if only is not None:
self._validate_enum(only, ["course_settings", "assignments", "external_tools", "files", "topics", "calendar_events", "quizzes", "wiki_pages", "modules", "outcomes"])
data["only"] = only
self.logger.debug("POST /api/v1/courses/{course_id}/course_copy with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/course_copy".format(**path), data=data, params=params, no_data=True) |
def _normalize_numpy_indices(i):
"""Normalize the index in case it is a numpy integer or boolean
array."""
if isinstance(i, np.ndarray):
if i.dtype == bool:
i = tuple(j.tolist() for j in i.nonzero())
elif i.dtype == int:
i = i.tolist()
return i | Normalize the index in case it is a numpy integer or boolean
array. | Below is the the instruction that describes the task:
### Input:
Normalize the index in case it is a numpy integer or boolean
array.
### Response:
def _normalize_numpy_indices(i):
"""Normalize the index in case it is a numpy integer or boolean
array."""
if isinstance(i, np.ndarray):
if i.dtype == bool:
i = tuple(j.tolist() for j in i.nonzero())
elif i.dtype == int:
i = i.tolist()
return i |
def _reset_model(self, response):
"""Update the fields value with the received information."""
# pylint: disable=no-member
# Reset the model to the initial state
self._provision_done = False # Set back the provision flag
self._changes.clear() # Clear the changes
# Process the raw data from the update response
fields = self.process_raw_data(response)
# Update the current model representation
self._set_fields(fields)
# Lock the current model
self._provision_done = True | Update the fields value with the received information. | Below is the the instruction that describes the task:
### Input:
Update the fields value with the received information.
### Response:
def _reset_model(self, response):
"""Update the fields value with the received information."""
# pylint: disable=no-member
# Reset the model to the initial state
self._provision_done = False # Set back the provision flag
self._changes.clear() # Clear the changes
# Process the raw data from the update response
fields = self.process_raw_data(response)
# Update the current model representation
self._set_fields(fields)
# Lock the current model
self._provision_done = True |
def init_config(self, stype="idp"):
""" Remaining init of the server configuration
:param stype: The type of Server ("idp"/"aa")
"""
if stype == "aa":
return
# subject information is stored in a database
# default database is in memory which is OK in some setups
dbspec = self.config.getattr("subject_data", "idp")
idb = None
typ = ""
if not dbspec:
idb = {}
elif isinstance(dbspec, six.string_types):
idb = _shelve_compat(dbspec, writeback=True, protocol=2)
else: # database spec is a a 2-tuple (type, address)
# print(>> sys.stderr, "DBSPEC: %s" % (dbspec,))
(typ, addr) = dbspec
if typ == "shelve":
idb = _shelve_compat(addr, writeback=True, protocol=2)
elif typ == "memcached":
import memcache
idb = memcache.Client(addr)
elif typ == "dict": # in-memory dictionary
idb = {}
elif typ == "mongodb":
from saml2.mongo_store import IdentMDB
self.ident = IdentMDB(database=addr, collection="ident")
elif typ == "identdb":
mod, clas = addr.rsplit('.', 1)
mod = importlib.import_module(mod)
self.ident = getattr(mod, clas)()
if typ == "mongodb" or typ == "identdb":
pass
elif idb is not None:
self.ident = IdentDB(idb)
elif dbspec:
raise Exception("Couldn't open identity database: %s" %
(dbspec,))
try:
_domain = self.config.getattr("domain", "idp")
if _domain:
self.ident.domain = _domain
self.ident.name_qualifier = self.config.entityid
dbspec = self.config.getattr("edu_person_targeted_id", "idp")
if not dbspec:
pass
else:
typ = dbspec[0]
addr = dbspec[1]
secret = dbspec[2]
if typ == "shelve":
self.eptid = EptidShelve(secret, addr)
elif typ == "mongodb":
from saml2.mongo_store import EptidMDB
self.eptid = EptidMDB(secret, database=addr,
collection="eptid")
else:
self.eptid = Eptid(secret)
except Exception:
self.ident.close()
raise | Remaining init of the server configuration
:param stype: The type of Server ("idp"/"aa") | Below is the the instruction that describes the task:
### Input:
Remaining init of the server configuration
:param stype: The type of Server ("idp"/"aa")
### Response:
def init_config(self, stype="idp"):
""" Remaining init of the server configuration
:param stype: The type of Server ("idp"/"aa")
"""
if stype == "aa":
return
# subject information is stored in a database
# default database is in memory which is OK in some setups
dbspec = self.config.getattr("subject_data", "idp")
idb = None
typ = ""
if not dbspec:
idb = {}
elif isinstance(dbspec, six.string_types):
idb = _shelve_compat(dbspec, writeback=True, protocol=2)
else: # database spec is a a 2-tuple (type, address)
# print(>> sys.stderr, "DBSPEC: %s" % (dbspec,))
(typ, addr) = dbspec
if typ == "shelve":
idb = _shelve_compat(addr, writeback=True, protocol=2)
elif typ == "memcached":
import memcache
idb = memcache.Client(addr)
elif typ == "dict": # in-memory dictionary
idb = {}
elif typ == "mongodb":
from saml2.mongo_store import IdentMDB
self.ident = IdentMDB(database=addr, collection="ident")
elif typ == "identdb":
mod, clas = addr.rsplit('.', 1)
mod = importlib.import_module(mod)
self.ident = getattr(mod, clas)()
if typ == "mongodb" or typ == "identdb":
pass
elif idb is not None:
self.ident = IdentDB(idb)
elif dbspec:
raise Exception("Couldn't open identity database: %s" %
(dbspec,))
try:
_domain = self.config.getattr("domain", "idp")
if _domain:
self.ident.domain = _domain
self.ident.name_qualifier = self.config.entityid
dbspec = self.config.getattr("edu_person_targeted_id", "idp")
if not dbspec:
pass
else:
typ = dbspec[0]
addr = dbspec[1]
secret = dbspec[2]
if typ == "shelve":
self.eptid = EptidShelve(secret, addr)
elif typ == "mongodb":
from saml2.mongo_store import EptidMDB
self.eptid = EptidMDB(secret, database=addr,
collection="eptid")
else:
self.eptid = Eptid(secret)
except Exception:
self.ident.close()
raise |
def median(arr):
"""median of the values, must have more than 0 entries.
:param arr: list of numbers
:type arr: number[] a number array
:return: median
:rtype: float
"""
if len(arr) == 0:
sys.stderr.write("ERROR: no content in array to take average\n")
sys.exit()
if len(arr) == 1: return arr[0]
quot = len(arr)/2
rem = len(arr)%2
if rem != 0:
return sorted(arr)[quot]
return float(sum(sorted(arr)[quot-1:quot+1]))/float(2) | median of the values, must have more than 0 entries.
:param arr: list of numbers
:type arr: number[] a number array
:return: median
:rtype: float | Below is the the instruction that describes the task:
### Input:
median of the values, must have more than 0 entries.
:param arr: list of numbers
:type arr: number[] a number array
:return: median
:rtype: float
### Response:
def median(arr):
"""median of the values, must have more than 0 entries.
:param arr: list of numbers
:type arr: number[] a number array
:return: median
:rtype: float
"""
if len(arr) == 0:
sys.stderr.write("ERROR: no content in array to take average\n")
sys.exit()
if len(arr) == 1: return arr[0]
quot = len(arr)/2
rem = len(arr)%2
if rem != 0:
return sorted(arr)[quot]
return float(sum(sorted(arr)[quot-1:quot+1]))/float(2) |
def default_links_pagination_factory(page, urlkwargs):
"""Factory for record links generation."""
endpoint = '.communities_list'
links = {
'self': url_for(endpoint, page=page.page, _external=True, **urlkwargs),
}
if page.has_prev:
links['prev'] = url_for(endpoint, page=page.prev_num, _external=True,
**urlkwargs)
if page.has_next:
links['next'] = url_for(endpoint, page=page.next_num, _external=True,
**urlkwargs)
return links | Factory for record links generation. | Below is the the instruction that describes the task:
### Input:
Factory for record links generation.
### Response:
def default_links_pagination_factory(page, urlkwargs):
"""Factory for record links generation."""
endpoint = '.communities_list'
links = {
'self': url_for(endpoint, page=page.page, _external=True, **urlkwargs),
}
if page.has_prev:
links['prev'] = url_for(endpoint, page=page.prev_num, _external=True,
**urlkwargs)
if page.has_next:
links['next'] = url_for(endpoint, page=page.next_num, _external=True,
**urlkwargs)
return links |
def delete_index_item(item, model_name, refresh=True):
'''
Deletes an item from the index.
:param item: must be a serializable object.
:param model_name: doctype, which must also be the model name.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
'''
src = Bungiesearch()
logger.info('Getting index for model {}.'.format(model_name))
for index_name in src.get_index(model_name):
index_instance = src.get_model_index(model_name)
item_es_id = index_instance.fields['_id'].value(item)
try:
src.get_es_instance().delete(index_name, model_name, item_es_id)
except NotFoundError as e:
logger.warning('NotFoundError: could not delete {}.{} from index {}: {}.'.format(model_name, item_es_id, index_name, str(e)))
if refresh:
src.get_es_instance().indices.refresh(index=index_name) | Deletes an item from the index.
:param item: must be a serializable object.
:param model_name: doctype, which must also be the model name.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True. | Below is the the instruction that describes the task:
### Input:
Deletes an item from the index.
:param item: must be a serializable object.
:param model_name: doctype, which must also be the model name.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
### Response:
def delete_index_item(item, model_name, refresh=True):
'''
Deletes an item from the index.
:param item: must be a serializable object.
:param model_name: doctype, which must also be the model name.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
'''
src = Bungiesearch()
logger.info('Getting index for model {}.'.format(model_name))
for index_name in src.get_index(model_name):
index_instance = src.get_model_index(model_name)
item_es_id = index_instance.fields['_id'].value(item)
try:
src.get_es_instance().delete(index_name, model_name, item_es_id)
except NotFoundError as e:
logger.warning('NotFoundError: could not delete {}.{} from index {}: {}.'.format(model_name, item_es_id, index_name, str(e)))
if refresh:
src.get_es_instance().indices.refresh(index=index_name) |
def redistribute_duplicates(data):
"""Given a dictionary of photo sets, will look at lat/lon between
sets, if they match, randomly move them around so the google map
markeres do not overlap
"""
coordinate_list=[]
# Build a list of coordinates
for myset in data['sets']:
coordinate_list.append((myset['latitude'],myset['longitude']))
for myset in data['sets']:
lat=myset['latitude']
lon=myset['longitude']
item=(lat,lon)
if coordinate_list.count(item) > 1:
print("moving %s"%(myset['set_title']))
random_number=random.random()
myset['latitude']=str(random_number*POS_MOVE_DEG\
+float(myset['latitude']))
myset['longitude']=str(random_number*POS_MOVE_DEG\
+float(myset['longitude'])) | Given a dictionary of photo sets, will look at lat/lon between
sets, if they match, randomly move them around so the google map
markeres do not overlap | Below is the the instruction that describes the task:
### Input:
Given a dictionary of photo sets, will look at lat/lon between
sets, if they match, randomly move them around so the google map
markeres do not overlap
### Response:
def redistribute_duplicates(data):
"""Given a dictionary of photo sets, will look at lat/lon between
sets, if they match, randomly move them around so the google map
markeres do not overlap
"""
coordinate_list=[]
# Build a list of coordinates
for myset in data['sets']:
coordinate_list.append((myset['latitude'],myset['longitude']))
for myset in data['sets']:
lat=myset['latitude']
lon=myset['longitude']
item=(lat,lon)
if coordinate_list.count(item) > 1:
print("moving %s"%(myset['set_title']))
random_number=random.random()
myset['latitude']=str(random_number*POS_MOVE_DEG\
+float(myset['latitude']))
myset['longitude']=str(random_number*POS_MOVE_DEG\
+float(myset['longitude'])) |
def init(self, input_dim=0, input_dims=None, no_prepare=False):
"""
Initialize the layer.
:param no_prepare: avoid calling preparation function
"""
if self.initialized:
return
# configure input dimensions
if input_dims:
self.input_dims = input_dims
self.input_dim = input_dims[0]
else:
self.input_dim = input_dim
self.input_dims = [input_dims]
# set default output dimension
if self.output_dim == 0:
self.output_dim = self.input_dim
self.initialized = True
# call prepare
if not no_prepare:
self.prepare()
return self | Initialize the layer.
:param no_prepare: avoid calling preparation function | Below is the the instruction that describes the task:
### Input:
Initialize the layer.
:param no_prepare: avoid calling preparation function
### Response:
def init(self, input_dim=0, input_dims=None, no_prepare=False):
"""
Initialize the layer.
:param no_prepare: avoid calling preparation function
"""
if self.initialized:
return
# configure input dimensions
if input_dims:
self.input_dims = input_dims
self.input_dim = input_dims[0]
else:
self.input_dim = input_dim
self.input_dims = [input_dims]
# set default output dimension
if self.output_dim == 0:
self.output_dim = self.input_dim
self.initialized = True
# call prepare
if not no_prepare:
self.prepare()
return self |
def get_authorized_tokens(self, oauth_verifier):
"""Returns a dict of authorized tokens after they go through the
:class:`get_authentication_tokens` phase.
:param oauth_verifier: (required) The oauth_verifier (or a.k.a PIN
for non web apps) retrieved from the callback url querystring
:rtype: dict
"""
if self.oauth_version != 1:
raise TwythonError('This method can only be called when your \
OAuth version is 1.0.')
response = self.client.get(self.access_token_url,
params={'oauth_verifier': oauth_verifier},
headers={'Content-Type': 'application/\
json'})
if response.status_code == 401:
try:
try:
# try to get json
content = response.json()
except AttributeError: # pragma: no cover
# if unicode detected
content = json.loads(response.content)
except ValueError:
content = {}
raise TwythonError(content.get('error', 'Invalid / expired To \
ken'), error_code=response.status_code)
authorized_tokens = dict(parse_qsl(response.content.decode('utf-8')))
if not authorized_tokens:
raise TwythonError('Unable to decode authorized tokens.')
return authorized_tokens | Returns a dict of authorized tokens after they go through the
:class:`get_authentication_tokens` phase.
:param oauth_verifier: (required) The oauth_verifier (or a.k.a PIN
for non web apps) retrieved from the callback url querystring
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Returns a dict of authorized tokens after they go through the
:class:`get_authentication_tokens` phase.
:param oauth_verifier: (required) The oauth_verifier (or a.k.a PIN
for non web apps) retrieved from the callback url querystring
:rtype: dict
### Response:
def get_authorized_tokens(self, oauth_verifier):
"""Returns a dict of authorized tokens after they go through the
:class:`get_authentication_tokens` phase.
:param oauth_verifier: (required) The oauth_verifier (or a.k.a PIN
for non web apps) retrieved from the callback url querystring
:rtype: dict
"""
if self.oauth_version != 1:
raise TwythonError('This method can only be called when your \
OAuth version is 1.0.')
response = self.client.get(self.access_token_url,
params={'oauth_verifier': oauth_verifier},
headers={'Content-Type': 'application/\
json'})
if response.status_code == 401:
try:
try:
# try to get json
content = response.json()
except AttributeError: # pragma: no cover
# if unicode detected
content = json.loads(response.content)
except ValueError:
content = {}
raise TwythonError(content.get('error', 'Invalid / expired To \
ken'), error_code=response.status_code)
authorized_tokens = dict(parse_qsl(response.content.decode('utf-8')))
if not authorized_tokens:
raise TwythonError('Unable to decode authorized tokens.')
return authorized_tokens |
def exists(name=None, group=None, release=None, except_release=None, verbose=1):
"""
Determines if a virtual machine instance exists.
"""
verbose = int(verbose)
instances = list_instances(
name=name,
group=group,
release=release,
except_release=except_release,
verbose=verbose,
show=verbose)
ret = bool(instances)
if verbose:
print('\ninstance %s exist' % ('DOES' if ret else 'does NOT'))
#return ret
return instances | Determines if a virtual machine instance exists. | Below is the the instruction that describes the task:
### Input:
Determines if a virtual machine instance exists.
### Response:
def exists(name=None, group=None, release=None, except_release=None, verbose=1):
"""
Determines if a virtual machine instance exists.
"""
verbose = int(verbose)
instances = list_instances(
name=name,
group=group,
release=release,
except_release=except_release,
verbose=verbose,
show=verbose)
ret = bool(instances)
if verbose:
print('\ninstance %s exist' % ('DOES' if ret else 'does NOT'))
#return ret
return instances |
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):
"""Convert the job description to init params that can be handled by the class constructor
Args:
job_details: the returned job details from a describe_training_job API call.
model_channel_name (str): Name of the channel where pre-trained model data will be downloaded.
Returns:
dictionary: The transformed init_params
"""
init_params = dict()
init_params['role'] = job_details['RoleArn']
init_params['train_instance_count'] = job_details['ResourceConfig']['InstanceCount']
init_params['train_instance_type'] = job_details['ResourceConfig']['InstanceType']
init_params['train_volume_size'] = job_details['ResourceConfig']['VolumeSizeInGB']
init_params['train_max_run'] = job_details['StoppingCondition']['MaxRuntimeInSeconds']
init_params['input_mode'] = job_details['AlgorithmSpecification']['TrainingInputMode']
init_params['base_job_name'] = job_details['TrainingJobName']
init_params['output_path'] = job_details['OutputDataConfig']['S3OutputPath']
init_params['output_kms_key'] = job_details['OutputDataConfig']['KmsKeyId']
has_hps = 'HyperParameters' in job_details
init_params['hyperparameters'] = job_details['HyperParameters'] if has_hps else {}
if 'TrainingImage' in job_details['AlgorithmSpecification']:
init_params['image'] = job_details['AlgorithmSpecification']['TrainingImage']
elif 'AlgorithmName' in job_details['AlgorithmSpecification']:
init_params['algorithm_arn'] = job_details['AlgorithmSpecification']['AlgorithmName']
else:
raise RuntimeError('Invalid AlgorithmSpecification. Either TrainingImage or '
'AlgorithmName is expected. None was found.')
if 'MetricDefinitons' in job_details['AlgorithmSpecification']:
init_params['metric_definitions'] = job_details['AlgorithmSpecification']['MetricsDefinition']
if 'EnableInterContainerTrafficEncryption' in job_details:
init_params['encrypt_inter_container_traffic'] = \
job_details['EnableInterContainerTrafficEncryption']
subnets, security_group_ids = vpc_utils.from_dict(job_details.get(vpc_utils.VPC_CONFIG_KEY))
if subnets:
init_params['subnets'] = subnets
if security_group_ids:
init_params['security_group_ids'] = security_group_ids
if 'InputDataConfig' in job_details and model_channel_name:
for channel in job_details['InputDataConfig']:
if channel['ChannelName'] == model_channel_name:
init_params['model_channel_name'] = model_channel_name
init_params['model_uri'] = channel['DataSource']['S3DataSource']['S3Uri']
break
return init_params | Convert the job description to init params that can be handled by the class constructor
Args:
job_details: the returned job details from a describe_training_job API call.
model_channel_name (str): Name of the channel where pre-trained model data will be downloaded.
Returns:
dictionary: The transformed init_params | Below is the the instruction that describes the task:
### Input:
Convert the job description to init params that can be handled by the class constructor
Args:
job_details: the returned job details from a describe_training_job API call.
model_channel_name (str): Name of the channel where pre-trained model data will be downloaded.
Returns:
dictionary: The transformed init_params
### Response:
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):
"""Convert the job description to init params that can be handled by the class constructor
Args:
job_details: the returned job details from a describe_training_job API call.
model_channel_name (str): Name of the channel where pre-trained model data will be downloaded.
Returns:
dictionary: The transformed init_params
"""
init_params = dict()
init_params['role'] = job_details['RoleArn']
init_params['train_instance_count'] = job_details['ResourceConfig']['InstanceCount']
init_params['train_instance_type'] = job_details['ResourceConfig']['InstanceType']
init_params['train_volume_size'] = job_details['ResourceConfig']['VolumeSizeInGB']
init_params['train_max_run'] = job_details['StoppingCondition']['MaxRuntimeInSeconds']
init_params['input_mode'] = job_details['AlgorithmSpecification']['TrainingInputMode']
init_params['base_job_name'] = job_details['TrainingJobName']
init_params['output_path'] = job_details['OutputDataConfig']['S3OutputPath']
init_params['output_kms_key'] = job_details['OutputDataConfig']['KmsKeyId']
has_hps = 'HyperParameters' in job_details
init_params['hyperparameters'] = job_details['HyperParameters'] if has_hps else {}
if 'TrainingImage' in job_details['AlgorithmSpecification']:
init_params['image'] = job_details['AlgorithmSpecification']['TrainingImage']
elif 'AlgorithmName' in job_details['AlgorithmSpecification']:
init_params['algorithm_arn'] = job_details['AlgorithmSpecification']['AlgorithmName']
else:
raise RuntimeError('Invalid AlgorithmSpecification. Either TrainingImage or '
'AlgorithmName is expected. None was found.')
if 'MetricDefinitons' in job_details['AlgorithmSpecification']:
init_params['metric_definitions'] = job_details['AlgorithmSpecification']['MetricsDefinition']
if 'EnableInterContainerTrafficEncryption' in job_details:
init_params['encrypt_inter_container_traffic'] = \
job_details['EnableInterContainerTrafficEncryption']
subnets, security_group_ids = vpc_utils.from_dict(job_details.get(vpc_utils.VPC_CONFIG_KEY))
if subnets:
init_params['subnets'] = subnets
if security_group_ids:
init_params['security_group_ids'] = security_group_ids
if 'InputDataConfig' in job_details and model_channel_name:
for channel in job_details['InputDataConfig']:
if channel['ChannelName'] == model_channel_name:
init_params['model_channel_name'] = model_channel_name
init_params['model_uri'] = channel['DataSource']['S3DataSource']['S3Uri']
break
return init_params |
def filter_excluded_tags(self, all_tags):
"""
Filter tags according exclude_tags and exclude_tags_regex option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags.
"""
filtered_tags = copy.deepcopy(all_tags)
if self.options.exclude_tags:
filtered_tags = self.apply_exclude_tags(filtered_tags)
if self.options.exclude_tags_regex:
filtered_tags = self.apply_exclude_tags_regex(filtered_tags)
return filtered_tags | Filter tags according exclude_tags and exclude_tags_regex option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags. | Below is the the instruction that describes the task:
### Input:
Filter tags according exclude_tags and exclude_tags_regex option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags.
### Response:
def filter_excluded_tags(self, all_tags):
"""
Filter tags according exclude_tags and exclude_tags_regex option.
:param list(dict) all_tags: Pre-filtered tags.
:rtype: list(dict)
:return: Filtered tags.
"""
filtered_tags = copy.deepcopy(all_tags)
if self.options.exclude_tags:
filtered_tags = self.apply_exclude_tags(filtered_tags)
if self.options.exclude_tags_regex:
filtered_tags = self.apply_exclude_tags_regex(filtered_tags)
return filtered_tags |
def save(self, UserSave=True, *args, **kwargs):
""" Se sobreescribe el método salvar para poder gestionar la creación de un
User en caso que no se haya creado y enlazado uno antes.
Se puede elegir si se desea guardar los cambios efectuados en user.
"""
if hasattr(self, 'user') and self.user is not None:
# caso que se le asigna un usuario determinado
if hasattr(self.user, 'usuario') and self.user.usuario is not None:
# caso que se le haya asignado un usuario
if self.user.usuario.pk != self.pk:
# caso el usuario asignado ya tiene a otro
raise IntegrityError("Debe ingresar User que no tenga relación con ningún Usuario existente.")
else:
# en este caso se debe revisar si hay que modificar User
pass
else:
# caso que No tenga un usuario asignado de antemano
if not hasattr(self, 'password') or not hasattr(self, 'username'):
# caso que no tenga los atributos password y/o username
raise IntegrityError("Debe ingresar los campos password y username si desea automatizar la creación de un User.")
else:
if not hasattr(self, 'first_name'):
self.first_name = ""
if not hasattr(self, 'last_name'):
self.last_name = ""
if not hasattr(self, 'email'):
self.email = ""
user = User(
username=self.username,
first_name=self.first_name,
last_name=self.last_name,
email=self.email
)
# almacenar password de forma segura
user.set_password(self.password)
user.save()
self.user = user
# eliminar los atributos que no se debiesen estar en la instancia
for name in ['username', 'password', 'first_name', 'last_name', 'email']:
delattr(self, name)
# se guarda Usuario y User en caso que se haya modificado
super().save(*args, **kwargs)
if UserSave:
self.user.save() | Se sobreescribe el método salvar para poder gestionar la creación de un
User en caso que no se haya creado y enlazado uno antes.
Se puede elegir si se desea guardar los cambios efectuados en user. | Below is the the instruction that describes the task:
### Input:
Se sobreescribe el método salvar para poder gestionar la creación de un
User en caso que no se haya creado y enlazado uno antes.
Se puede elegir si se desea guardar los cambios efectuados en user.
### Response:
def save(self, UserSave=True, *args, **kwargs):
""" Se sobreescribe el método salvar para poder gestionar la creación de un
User en caso que no se haya creado y enlazado uno antes.
Se puede elegir si se desea guardar los cambios efectuados en user.
"""
if hasattr(self, 'user') and self.user is not None:
# caso que se le asigna un usuario determinado
if hasattr(self.user, 'usuario') and self.user.usuario is not None:
# caso que se le haya asignado un usuario
if self.user.usuario.pk != self.pk:
# caso el usuario asignado ya tiene a otro
raise IntegrityError("Debe ingresar User que no tenga relación con ningún Usuario existente.")
else:
# en este caso se debe revisar si hay que modificar User
pass
else:
# caso que No tenga un usuario asignado de antemano
if not hasattr(self, 'password') or not hasattr(self, 'username'):
# caso que no tenga los atributos password y/o username
raise IntegrityError("Debe ingresar los campos password y username si desea automatizar la creación de un User.")
else:
if not hasattr(self, 'first_name'):
self.first_name = ""
if not hasattr(self, 'last_name'):
self.last_name = ""
if not hasattr(self, 'email'):
self.email = ""
user = User(
username=self.username,
first_name=self.first_name,
last_name=self.last_name,
email=self.email
)
# almacenar password de forma segura
user.set_password(self.password)
user.save()
self.user = user
# eliminar los atributos que no se debiesen estar en la instancia
for name in ['username', 'password', 'first_name', 'last_name', 'email']:
delattr(self, name)
# se guarda Usuario y User en caso que se haya modificado
super().save(*args, **kwargs)
if UserSave:
self.user.save() |
def shallow_copy(obj, attribute_names,
ignore_missing_attributes=True):
"""
Return a shallow copy of the given object, including only the
specified attributes of this object.
@param obj: an object to copy.
@param attribute_names: a list of names of the attributes to copy.
@param ignore_missing_attributes: ``False`` indicates that the function
can ignore attributes that have been specified but that are not
defined in the given object; ``True`` indicates that the function
MUST raise a ``KeyError`` exception if some specified attributes
are not defined in the given object.
@return: a shallow copy of the given object with the specified
attributes only.
@raise KeyError: if the argument ``ignore_missing_attributes`` equals
``False`` and if some specified attributes are not defined in the
the given object.
"""
shallow_object = copy.copy(obj)
shallow_object.__dict__ = {}
for attribute_name in attribute_names:
try:
setattr(shallow_object, attribute_name, getattr(obj, attribute_name))
except KeyError, error:
if not ignore_missing_attributes:
raise error
return shallow_object | Return a shallow copy of the given object, including only the
specified attributes of this object.
@param obj: an object to copy.
@param attribute_names: a list of names of the attributes to copy.
@param ignore_missing_attributes: ``False`` indicates that the function
can ignore attributes that have been specified but that are not
defined in the given object; ``True`` indicates that the function
MUST raise a ``KeyError`` exception if some specified attributes
are not defined in the given object.
@return: a shallow copy of the given object with the specified
attributes only.
@raise KeyError: if the argument ``ignore_missing_attributes`` equals
``False`` and if some specified attributes are not defined in the
the given object. | Below is the the instruction that describes the task:
### Input:
Return a shallow copy of the given object, including only the
specified attributes of this object.
@param obj: an object to copy.
@param attribute_names: a list of names of the attributes to copy.
@param ignore_missing_attributes: ``False`` indicates that the function
can ignore attributes that have been specified but that are not
defined in the given object; ``True`` indicates that the function
MUST raise a ``KeyError`` exception if some specified attributes
are not defined in the given object.
@return: a shallow copy of the given object with the specified
attributes only.
@raise KeyError: if the argument ``ignore_missing_attributes`` equals
``False`` and if some specified attributes are not defined in the
the given object.
### Response:
def shallow_copy(obj, attribute_names,
ignore_missing_attributes=True):
"""
Return a shallow copy of the given object, including only the
specified attributes of this object.
@param obj: an object to copy.
@param attribute_names: a list of names of the attributes to copy.
@param ignore_missing_attributes: ``False`` indicates that the function
can ignore attributes that have been specified but that are not
defined in the given object; ``True`` indicates that the function
MUST raise a ``KeyError`` exception if some specified attributes
are not defined in the given object.
@return: a shallow copy of the given object with the specified
attributes only.
@raise KeyError: if the argument ``ignore_missing_attributes`` equals
``False`` and if some specified attributes are not defined in the
the given object.
"""
shallow_object = copy.copy(obj)
shallow_object.__dict__ = {}
for attribute_name in attribute_names:
try:
setattr(shallow_object, attribute_name, getattr(obj, attribute_name))
except KeyError, error:
if not ignore_missing_attributes:
raise error
return shallow_object |
def _constrain_glob(glob, paths, limit=5):
"""
Tweaks glob into a list of more specific globs that together still cover paths and not too much extra.
Saves us minutes long listings for long dataset histories.
Specifically, in this implementation the leftmost occurrences of "[0-9]"
give rise to a few separate globs that each specialize the expression to
digits that actually occur in paths.
"""
def digit_set_wildcard(chars):
"""
Makes a wildcard expression for the set, a bit readable, e.g. [1-5].
"""
chars = sorted(chars)
if len(chars) > 1 and ord(chars[-1]) - ord(chars[0]) == len(chars) - 1:
return '[%s-%s]' % (chars[0], chars[-1])
else:
return '[%s]' % ''.join(chars)
current = {glob: paths}
while True:
pos = list(current.keys())[0].find('[0-9]')
if pos == -1:
# no wildcard expressions left to specialize in the glob
return list(current.keys())
char_sets = {}
for g, p in six.iteritems(current):
char_sets[g] = sorted({path[pos] for path in p})
if sum(len(s) for s in char_sets.values()) > limit:
return [g.replace('[0-9]', digit_set_wildcard(char_sets[g]), 1) for g in current]
for g, s in six.iteritems(char_sets):
for c in s:
new_glob = g.replace('[0-9]', c, 1)
new_paths = list(filter(lambda p: p[pos] == c, current[g]))
current[new_glob] = new_paths
del current[g] | Tweaks glob into a list of more specific globs that together still cover paths and not too much extra.
Saves us minutes long listings for long dataset histories.
Specifically, in this implementation the leftmost occurrences of "[0-9]"
give rise to a few separate globs that each specialize the expression to
digits that actually occur in paths. | Below is the the instruction that describes the task:
### Input:
Tweaks glob into a list of more specific globs that together still cover paths and not too much extra.
Saves us minutes long listings for long dataset histories.
Specifically, in this implementation the leftmost occurrences of "[0-9]"
give rise to a few separate globs that each specialize the expression to
digits that actually occur in paths.
### Response:
def _constrain_glob(glob, paths, limit=5):
"""
Tweaks glob into a list of more specific globs that together still cover paths and not too much extra.
Saves us minutes long listings for long dataset histories.
Specifically, in this implementation the leftmost occurrences of "[0-9]"
give rise to a few separate globs that each specialize the expression to
digits that actually occur in paths.
"""
def digit_set_wildcard(chars):
"""
Makes a wildcard expression for the set, a bit readable, e.g. [1-5].
"""
chars = sorted(chars)
if len(chars) > 1 and ord(chars[-1]) - ord(chars[0]) == len(chars) - 1:
return '[%s-%s]' % (chars[0], chars[-1])
else:
return '[%s]' % ''.join(chars)
current = {glob: paths}
while True:
pos = list(current.keys())[0].find('[0-9]')
if pos == -1:
# no wildcard expressions left to specialize in the glob
return list(current.keys())
char_sets = {}
for g, p in six.iteritems(current):
char_sets[g] = sorted({path[pos] for path in p})
if sum(len(s) for s in char_sets.values()) > limit:
return [g.replace('[0-9]', digit_set_wildcard(char_sets[g]), 1) for g in current]
for g, s in six.iteritems(char_sets):
for c in s:
new_glob = g.replace('[0-9]', c, 1)
new_paths = list(filter(lambda p: p[pos] == c, current[g]))
current[new_glob] = new_paths
del current[g] |
def create_pipeline(name, unique_id, description='', region=None, key=None, keyid=None,
profile=None):
'''
Create a new, empty pipeline. This function is idempotent.
CLI example:
.. code-block:: bash
salt myminion boto_datapipeline.create_pipeline my_name my_unique_id
'''
client = _get_client(region, key, keyid, profile)
r = {}
try:
response = client.create_pipeline(
name=name,
uniqueId=unique_id,
description=description,
)
r['result'] = response['pipelineId']
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = six.text_type(e)
return r | Create a new, empty pipeline. This function is idempotent.
CLI example:
.. code-block:: bash
salt myminion boto_datapipeline.create_pipeline my_name my_unique_id | Below is the the instruction that describes the task:
### Input:
Create a new, empty pipeline. This function is idempotent.
CLI example:
.. code-block:: bash
salt myminion boto_datapipeline.create_pipeline my_name my_unique_id
### Response:
def create_pipeline(name, unique_id, description='', region=None, key=None, keyid=None,
profile=None):
'''
Create a new, empty pipeline. This function is idempotent.
CLI example:
.. code-block:: bash
salt myminion boto_datapipeline.create_pipeline my_name my_unique_id
'''
client = _get_client(region, key, keyid, profile)
r = {}
try:
response = client.create_pipeline(
name=name,
uniqueId=unique_id,
description=description,
)
r['result'] = response['pipelineId']
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
r['error'] = six.text_type(e)
return r |
def download_and_parse_mnist_file(fname, target_dir=None, force=False):
"""Download the IDX file named fname from the URL specified in dataset_url
and return it as a numpy array.
Parameters
----------
fname : str
File name to download and parse
target_dir : str
Directory where to store the file
force : bool
Force downloading the file, if it already exists
Returns
-------
data : numpy.ndarray
Numpy array with the dimensions and the data in the IDX file
"""
fname = download_file(fname, target_dir=target_dir, force=force)
fopen = gzip.open if os.path.splitext(fname)[1] == '.gz' else open
with fopen(fname, 'rb') as fd:
return parse_idx(fd) | Download the IDX file named fname from the URL specified in dataset_url
and return it as a numpy array.
Parameters
----------
fname : str
File name to download and parse
target_dir : str
Directory where to store the file
force : bool
Force downloading the file, if it already exists
Returns
-------
data : numpy.ndarray
Numpy array with the dimensions and the data in the IDX file | Below is the the instruction that describes the task:
### Input:
Download the IDX file named fname from the URL specified in dataset_url
and return it as a numpy array.
Parameters
----------
fname : str
File name to download and parse
target_dir : str
Directory where to store the file
force : bool
Force downloading the file, if it already exists
Returns
-------
data : numpy.ndarray
Numpy array with the dimensions and the data in the IDX file
### Response:
def download_and_parse_mnist_file(fname, target_dir=None, force=False):
"""Download the IDX file named fname from the URL specified in dataset_url
and return it as a numpy array.
Parameters
----------
fname : str
File name to download and parse
target_dir : str
Directory where to store the file
force : bool
Force downloading the file, if it already exists
Returns
-------
data : numpy.ndarray
Numpy array with the dimensions and the data in the IDX file
"""
fname = download_file(fname, target_dir=target_dir, force=force)
fopen = gzip.open if os.path.splitext(fname)[1] == '.gz' else open
with fopen(fname, 'rb') as fd:
return parse_idx(fd) |
def create_required_directories(self):
"""Creates any directories required for Engineer to function if they don't already exist."""
required = (self.CACHE_DIR,
self.LOG_DIR,
self.OUTPUT_DIR,
self.ENGINEER.JINJA_CACHE_DIR,)
for folder in required:
ensure_exists(folder, assume_dirs=True) | Creates any directories required for Engineer to function if they don't already exist. | Below is the the instruction that describes the task:
### Input:
Creates any directories required for Engineer to function if they don't already exist.
### Response:
def create_required_directories(self):
"""Creates any directories required for Engineer to function if they don't already exist."""
required = (self.CACHE_DIR,
self.LOG_DIR,
self.OUTPUT_DIR,
self.ENGINEER.JINJA_CACHE_DIR,)
for folder in required:
ensure_exists(folder, assume_dirs=True) |
def show(self, commits=None, encoding='utf-8'):
"""Show the data of a set of commits.
The method returns the output of Git show command for a
set of commits using the following options:
git show --raw --numstat --pretty=fuller --decorate=full
--parents -M -C -c [<commit>...<commit>]
When the list of commits is empty, the command will return
data about the last commit, like the default behaviour of
`git show`.
:param commits: list of commits to show data
:param encoding: encode the output using this format
:returns: a generator where each item is a line from the show output
:raises EmptyRepositoryError: when the repository is empty and
the action cannot be performed
:raises RepositoryError: when an error occurs fetching the show output
"""
if self.is_empty():
logger.warning("Git %s repository is empty; unable to run show",
self.uri)
raise EmptyRepositoryError(repository=self.uri)
if commits is None:
commits = []
cmd_show = ['git', 'show']
cmd_show.extend(self.GIT_PRETTY_OUTPUT_OPTS)
cmd_show.extend(commits)
for line in self._exec_nb(cmd_show, cwd=self.dirpath, env=self.gitenv):
yield line
logger.debug("Git show fetched from %s repository (%s)",
self.uri, self.dirpath) | Show the data of a set of commits.
The method returns the output of Git show command for a
set of commits using the following options:
git show --raw --numstat --pretty=fuller --decorate=full
--parents -M -C -c [<commit>...<commit>]
When the list of commits is empty, the command will return
data about the last commit, like the default behaviour of
`git show`.
:param commits: list of commits to show data
:param encoding: encode the output using this format
:returns: a generator where each item is a line from the show output
:raises EmptyRepositoryError: when the repository is empty and
the action cannot be performed
:raises RepositoryError: when an error occurs fetching the show output | Below is the the instruction that describes the task:
### Input:
Show the data of a set of commits.
The method returns the output of Git show command for a
set of commits using the following options:
git show --raw --numstat --pretty=fuller --decorate=full
--parents -M -C -c [<commit>...<commit>]
When the list of commits is empty, the command will return
data about the last commit, like the default behaviour of
`git show`.
:param commits: list of commits to show data
:param encoding: encode the output using this format
:returns: a generator where each item is a line from the show output
:raises EmptyRepositoryError: when the repository is empty and
the action cannot be performed
:raises RepositoryError: when an error occurs fetching the show output
### Response:
def show(self, commits=None, encoding='utf-8'):
"""Show the data of a set of commits.
The method returns the output of Git show command for a
set of commits using the following options:
git show --raw --numstat --pretty=fuller --decorate=full
--parents -M -C -c [<commit>...<commit>]
When the list of commits is empty, the command will return
data about the last commit, like the default behaviour of
`git show`.
:param commits: list of commits to show data
:param encoding: encode the output using this format
:returns: a generator where each item is a line from the show output
:raises EmptyRepositoryError: when the repository is empty and
the action cannot be performed
:raises RepositoryError: when an error occurs fetching the show output
"""
if self.is_empty():
logger.warning("Git %s repository is empty; unable to run show",
self.uri)
raise EmptyRepositoryError(repository=self.uri)
if commits is None:
commits = []
cmd_show = ['git', 'show']
cmd_show.extend(self.GIT_PRETTY_OUTPUT_OPTS)
cmd_show.extend(commits)
for line in self._exec_nb(cmd_show, cwd=self.dirpath, env=self.gitenv):
yield line
logger.debug("Git show fetched from %s repository (%s)",
self.uri, self.dirpath) |
def cli_run():
"""docstring for argparse"""
parser = argparse.ArgumentParser(description='Stupidly simple code answers from StackOverflow')
parser.add_argument('query', help="What's the problem ?", type=str, nargs='+')
parser.add_argument('-t','--tags', help='semicolon separated tags -> python;lambda')
args = parser.parse_args()
main(args) | docstring for argparse | Below is the the instruction that describes the task:
### Input:
docstring for argparse
### Response:
def cli_run():
"""docstring for argparse"""
parser = argparse.ArgumentParser(description='Stupidly simple code answers from StackOverflow')
parser.add_argument('query', help="What's the problem ?", type=str, nargs='+')
parser.add_argument('-t','--tags', help='semicolon separated tags -> python;lambda')
args = parser.parse_args()
main(args) |
def execute_command(self, parts, dry_run):
"""
Execute a command.
Parameters
----------
parts : list
Sequence of strings constituting a command.
dry_run : bool
Whether to just log the command instead of executing it.
Returns
-------
status : int
Status code of the executed command or 0 if `dry_run` is `True`.
"""
if dry_run:
self.logger.info("dry-run command '%s'", " ".join(map(str, parts)))
return 0
else: # pragma: no cover
self.logger.debug("executing command '%s'", " ".join(map(str, parts)))
status_code = os.spawnvpe(os.P_WAIT, parts[0], parts, os.environ)
if status_code:
self.logger.warning("command '%s' returned status code %d",
" ".join(map(str, parts)), status_code)
return status_code | Execute a command.
Parameters
----------
parts : list
Sequence of strings constituting a command.
dry_run : bool
Whether to just log the command instead of executing it.
Returns
-------
status : int
Status code of the executed command or 0 if `dry_run` is `True`. | Below is the the instruction that describes the task:
### Input:
Execute a command.
Parameters
----------
parts : list
Sequence of strings constituting a command.
dry_run : bool
Whether to just log the command instead of executing it.
Returns
-------
status : int
Status code of the executed command or 0 if `dry_run` is `True`.
### Response:
def execute_command(self, parts, dry_run):
"""
Execute a command.
Parameters
----------
parts : list
Sequence of strings constituting a command.
dry_run : bool
Whether to just log the command instead of executing it.
Returns
-------
status : int
Status code of the executed command or 0 if `dry_run` is `True`.
"""
if dry_run:
self.logger.info("dry-run command '%s'", " ".join(map(str, parts)))
return 0
else: # pragma: no cover
self.logger.debug("executing command '%s'", " ".join(map(str, parts)))
status_code = os.spawnvpe(os.P_WAIT, parts[0], parts, os.environ)
if status_code:
self.logger.warning("command '%s' returned status code %d",
" ".join(map(str, parts)), status_code)
return status_code |
def split_words(sentence, modify=True, keep_shorts=False):
"""
Extract and yield the keywords from the sentence:
- Drop keywords that are too short (keep_shorts=False)
- Drop the accents (modify=True)
- Make everything lower case (modify=True)
- Try to separate the words as much as possible (using 2 list of
separators, one being more complete than the others)
"""
if (sentence == "*"):
yield sentence
return
# TODO: i18n
if modify:
sentence = sentence.lower()
sentence = strip_accents(sentence)
words = FORCED_SPLIT_KEYWORDS_REGEX.split(sentence)
if keep_shorts:
word_iter = words
else:
word_iter = __cleanup_word_array(words)
for word in word_iter:
can_split = True
can_yield = False
subwords = WISHED_SPLIT_KEYWORDS_REGEX.split(word)
for subword in subwords:
if subword == "":
continue
can_yield = True
if not keep_shorts and len(subword) < MIN_KEYWORD_LEN:
can_split = False
break
if can_split:
for subword in subwords:
if subword == "":
continue
if subword[0] == '"':
subword = subword[1:]
if subword[-1] == '"':
subword = subword[:-1]
yield subword
elif can_yield:
if word[0] == '"':
word = word[1:]
if word[-1] == '"':
word = word[:-1]
yield word | Extract and yield the keywords from the sentence:
- Drop keywords that are too short (keep_shorts=False)
- Drop the accents (modify=True)
- Make everything lower case (modify=True)
- Try to separate the words as much as possible (using 2 list of
separators, one being more complete than the others) | Below is the the instruction that describes the task:
### Input:
Extract and yield the keywords from the sentence:
- Drop keywords that are too short (keep_shorts=False)
- Drop the accents (modify=True)
- Make everything lower case (modify=True)
- Try to separate the words as much as possible (using 2 list of
separators, one being more complete than the others)
### Response:
def split_words(sentence, modify=True, keep_shorts=False):
"""
Extract and yield the keywords from the sentence:
- Drop keywords that are too short (keep_shorts=False)
- Drop the accents (modify=True)
- Make everything lower case (modify=True)
- Try to separate the words as much as possible (using 2 list of
separators, one being more complete than the others)
"""
if (sentence == "*"):
yield sentence
return
# TODO: i18n
if modify:
sentence = sentence.lower()
sentence = strip_accents(sentence)
words = FORCED_SPLIT_KEYWORDS_REGEX.split(sentence)
if keep_shorts:
word_iter = words
else:
word_iter = __cleanup_word_array(words)
for word in word_iter:
can_split = True
can_yield = False
subwords = WISHED_SPLIT_KEYWORDS_REGEX.split(word)
for subword in subwords:
if subword == "":
continue
can_yield = True
if not keep_shorts and len(subword) < MIN_KEYWORD_LEN:
can_split = False
break
if can_split:
for subword in subwords:
if subword == "":
continue
if subword[0] == '"':
subword = subword[1:]
if subword[-1] == '"':
subword = subword[:-1]
yield subword
elif can_yield:
if word[0] == '"':
word = word[1:]
if word[-1] == '"':
word = word[:-1]
yield word |
def subgroup(self, t, i):
"""Handle parenthesis."""
current = []
# (?flags)
flags = self.get_flags(i)
if flags:
self.flags(flags[2:-1])
return [flags]
# (?#comment)
comments = self.get_comments(i)
if comments:
return [comments]
verbose = self.verbose
unicode_flag = self.unicode
# (?flags:pattern)
flags = self.get_flags(i, True)
if flags: # pragma: no cover
t = flags
self.flags(flags[2:-1], scoped=True)
current = []
try:
while t != ')':
if not current:
current.append(t)
else:
current.extend(self.normal(t, i))
t = next(i)
except StopIteration:
pass
# Restore flags after group
self.verbose = verbose
self.unicode = unicode_flag
if t == ")":
current.append(t)
return current | Handle parenthesis. | Below is the the instruction that describes the task:
### Input:
Handle parenthesis.
### Response:
def subgroup(self, t, i):
"""Handle parenthesis."""
current = []
# (?flags)
flags = self.get_flags(i)
if flags:
self.flags(flags[2:-1])
return [flags]
# (?#comment)
comments = self.get_comments(i)
if comments:
return [comments]
verbose = self.verbose
unicode_flag = self.unicode
# (?flags:pattern)
flags = self.get_flags(i, True)
if flags: # pragma: no cover
t = flags
self.flags(flags[2:-1], scoped=True)
current = []
try:
while t != ')':
if not current:
current.append(t)
else:
current.extend(self.normal(t, i))
t = next(i)
except StopIteration:
pass
# Restore flags after group
self.verbose = verbose
self.unicode = unicode_flag
if t == ")":
current.append(t)
return current |
def noisy(pattern, noiseLevel, totalNumCells):
"""
Generate a noisy copy of a pattern.
Given number of active bits w = len(pattern),
deactivate noiseLevel*w cells, and activate noiseLevel*w other cells.
@param pattern (set)
A set of active indices
@param noiseLevel (float)
The percentage of the bits to shuffle
@param totalNumCells (int)
The number of cells in the SDR, active and inactive
@return (set)
A noisy set of active indices
"""
n = int(noiseLevel * len(pattern))
noised = set(pattern)
noised.difference_update(random.sample(noised, n))
for _ in xrange(n):
while True:
v = random.randint(0, totalNumCells - 1)
if v not in pattern and v not in noised:
noised.add(v)
break
return noised | Generate a noisy copy of a pattern.
Given number of active bits w = len(pattern),
deactivate noiseLevel*w cells, and activate noiseLevel*w other cells.
@param pattern (set)
A set of active indices
@param noiseLevel (float)
The percentage of the bits to shuffle
@param totalNumCells (int)
The number of cells in the SDR, active and inactive
@return (set)
A noisy set of active indices | Below is the the instruction that describes the task:
### Input:
Generate a noisy copy of a pattern.
Given number of active bits w = len(pattern),
deactivate noiseLevel*w cells, and activate noiseLevel*w other cells.
@param pattern (set)
A set of active indices
@param noiseLevel (float)
The percentage of the bits to shuffle
@param totalNumCells (int)
The number of cells in the SDR, active and inactive
@return (set)
A noisy set of active indices
### Response:
def noisy(pattern, noiseLevel, totalNumCells):
"""
Generate a noisy copy of a pattern.
Given number of active bits w = len(pattern),
deactivate noiseLevel*w cells, and activate noiseLevel*w other cells.
@param pattern (set)
A set of active indices
@param noiseLevel (float)
The percentage of the bits to shuffle
@param totalNumCells (int)
The number of cells in the SDR, active and inactive
@return (set)
A noisy set of active indices
"""
n = int(noiseLevel * len(pattern))
noised = set(pattern)
noised.difference_update(random.sample(noised, n))
for _ in xrange(n):
while True:
v = random.randint(0, totalNumCells - 1)
if v not in pattern and v not in noised:
noised.add(v)
break
return noised |
def connect_widget(self):
"""Perform the initial connection of the widget
the default implementation will connect to the widgets signal
based on self.signal_name
"""
if self.signal_name is not None:
# None for read only widgets
sid = self.widget.connect(self.signal_name, self.widget_changed)
self.connections.append(sid) | Perform the initial connection of the widget
the default implementation will connect to the widgets signal
based on self.signal_name | Below is the the instruction that describes the task:
### Input:
Perform the initial connection of the widget
the default implementation will connect to the widgets signal
based on self.signal_name
### Response:
def connect_widget(self):
"""Perform the initial connection of the widget
the default implementation will connect to the widgets signal
based on self.signal_name
"""
if self.signal_name is not None:
# None for read only widgets
sid = self.widget.connect(self.signal_name, self.widget_changed)
self.connections.append(sid) |
def animate_panel(panel, keys=None, columns=None, interval=1000, blit=False, titles='', path='animate_panel', xlabel='Time', ylabel='Value', ext='gif',
replot=False, linewidth=3, close=False, fontsize=24, background_color='white', alpha=1, figsize=(12,8), xlabel_rotation=-25, plot_kwargs=(('rotation', 30),),
verbosity=1, **video_kwargs):
"""Animate a pandas.Panel by flipping through plots of the data in each dataframe
Arguments:
panel (pandas.Panel): Pandas Panel of DataFrames to animate (each DataFrame is an animation video frame)
keys (list of str): ordered list of panel keys (pages) to animate
columns (list of str): ordered list of data series names to include in plot for eath video frame
interval (int): number of milliseconds between video frames
titles (str or list of str): titles to place in plot on each data frame.
default = `keys` so that titles changes with each frame
path (str): path and base file name to save *.mp4 animation video ('' to not save)
kwargs (dict): pass-through kwargs for `animation.FuncAnimation(...).save(path, **kwargs)`
(Not used if `not path`)
TODO:
- Work with other 3-D data formats:
- dict (sorted by key) or OrderedDict
- list of 2-D arrays/lists
- 3-D arrays/lists
- generators of 2-D arrays/lists
- generators of generators of lists/arrays?
- Write json and html5 files for d3 SVG line plots with transitions!
>>> x = np.arange(0, 2*np.pi, 0.05)
>>> panel = pd.Panel(dict((i, pd.DataFrame({
... 'T=10': np.sin(x + i/10.),
... 'T=7': np.sin(x + i/7.),
... 'beat': np.sin(x + i/10.) + np.sin(x + i/7.),
... }, index=x)
... ) for i in range(50)))
>>> animate_panel(panel, interval=200, path='animate_panel_test') # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Drawing frames for a ".gif" animation...
Saving video to animate_panel_test.gif...
T=10 T=7 beat
0.00 0.000000 0.000000 0.000000
0.05 0.049979 0.049979 0.099958
...
[126 rows x 3 columns]
"""
plot_kwargs = plot_kwargs or {}
plot_kwargs = dict(plot_kwargs)
ext_kwargs = {
'mp4': {'writer': 'ffmpeg', 'codec': 'mpeg4', 'dpi': 100, 'bitrate': 2000},
'gif': {'writer': 'imagemagick', 'dpi': 100, 'bitrate': 2000},
'imagemagic.gif': {'writer': 'imagemagick_gif', 'dpi': 100, 'bitrate': 2000},
}
ext = str(ext).lower().strip() or 'gif'
default_kwargs = ext_kwargs.get(ext, {})
keys = keys or list(panel.keys())
if titles:
titles = listify(titles)
if len(titles) == 1:
titles *= len(keys)
else:
titles = keys
titles = dict((k, title) for k, title in zip(keys, titles))
columns = columns or list(panel[keys[0]].columns)
fig, ax = plt.subplots(figsize=figsize)
fig.patch.set_facecolor(background_color)
fig.patch.set_alpha(alpha)
i = 0
df = panel[keys[i]]
x = df.index.values
y = df[columns].values
lines = ax.plot(x, y)
ax.grid('on')
ax.patch.set_facecolor(background_color)
ax.patch.set_alpha(alpha)
ax.title.set_text(titles[keys[0]])
ax.title.set_fontsize(fontsize)
ax.title.set_fontweight('bold')
ax.xaxis.label.set_text(xlabel)
plt.setp(ax.get_xticklabels(), rotation=xlabel_rotation)
ax.yaxis.label.set_text(ylabel)
ax.legend(columns)
def animate(k):
df = panel[k]
x = df.index.values
y = df[columns].values.T
if replot:
# plt.cla()
# fig, ax = plt.subplots(figsize=figsize)
fig = ax.figure
fig.patch.set_facecolor(background_color)
fig.patch.set_alpha(alpha)
lines = ax.plot(x, y.T, linewidth=linewidth)
ax.grid('on')
ax.patch.set_facecolor(background_color)
ax.patch.set_alpha(alpha)
ax.title.set_text(titles[k])
ax.title.set_fontsize(fontsize)
ax.title.set_fontweight('bold')
ax.xaxis.label.set_text(xlabel)
plt.setp(ax.get_xticklabels(), rotation=xlabel_rotation)
ax.yaxis.label.set_text(ylabel)
ax.legend(columns)
else:
lines = ax.lines
fig = ax.figure
for i in range(len(lines)):
lines[i].set_xdata(x) # all lines have to share the same x-data
lines[i].set_ydata(y[i]) # update the data, don't replot a new line
lines[i].set_linewidth(linewidth)
lines[i].figure.set_facecolor(background_color)
lines[i].figure.set_alpha(alpha)
lines[i].axes.patch.set_facecolor(background_color)
lines[i].axes.patch.set_alpha(alpha)
ax.patch.set_facecolor(background_color)
ax.figure.patch.set_alpha(alpha)
ax.title.set_text(titles[k])
ax.title.set_fontsize(fontsize)
ax.title.set_fontweight('bold')
if blit:
return lines
# FIXME: doesn't work with ext=mp4
# init_func to mask out pixels to be redrawn/cleared which speeds redrawing of plot
def mask_lines():
if verbosity > 0:
print('Initialing mask_lines. . .')
df = panel[0]
x = df.index.values
y = df[columns].values.T
for i in range(len(lines)):
# FIXME: why are x-values used to set the y-data coordinates of the mask?
lines[i].set_xdata(np.ma.array(x, mask=True))
lines[i].set_ydata(np.ma.array(y[i], mask=True))
return lines
if verbosity > 0:
print('Drawing frames for a ".{0}" animation{1}. . .'.format(ext, ' with blitting' if blit else ''))
animate(keys[0])
ani = animation.FuncAnimation(fig, animate, keys, interval=interval, blit=blit) #, init_func=mask_lines, blit=True)
kwargs = dict(default_kwargs)
for k, v in six.iteritems(default_kwargs):
kwargs[k] = video_kwargs.get(k, v)
# if 'bitrate' in kwargs:
# kwargs['bitrate'] = min(kwargs['bitrate'], int(8e5 / interval)) # low information rate (long interval) might make it impossible to achieve a higher bitrate ight not
if path and isinstance(path, basestring):
path += '.{0}'.format(ext)
if verbosity > 0:
print('Saving video to {0}. . .'.format(path))
ani.save(path, **kwargs)
if close:
plt.close(fig)
return df | Animate a pandas.Panel by flipping through plots of the data in each dataframe
Arguments:
panel (pandas.Panel): Pandas Panel of DataFrames to animate (each DataFrame is an animation video frame)
keys (list of str): ordered list of panel keys (pages) to animate
columns (list of str): ordered list of data series names to include in plot for eath video frame
interval (int): number of milliseconds between video frames
titles (str or list of str): titles to place in plot on each data frame.
default = `keys` so that titles changes with each frame
path (str): path and base file name to save *.mp4 animation video ('' to not save)
kwargs (dict): pass-through kwargs for `animation.FuncAnimation(...).save(path, **kwargs)`
(Not used if `not path`)
TODO:
- Work with other 3-D data formats:
- dict (sorted by key) or OrderedDict
- list of 2-D arrays/lists
- 3-D arrays/lists
- generators of 2-D arrays/lists
- generators of generators of lists/arrays?
- Write json and html5 files for d3 SVG line plots with transitions!
>>> x = np.arange(0, 2*np.pi, 0.05)
>>> panel = pd.Panel(dict((i, pd.DataFrame({
... 'T=10': np.sin(x + i/10.),
... 'T=7': np.sin(x + i/7.),
... 'beat': np.sin(x + i/10.) + np.sin(x + i/7.),
... }, index=x)
... ) for i in range(50)))
>>> animate_panel(panel, interval=200, path='animate_panel_test') # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Drawing frames for a ".gif" animation...
Saving video to animate_panel_test.gif...
T=10 T=7 beat
0.00 0.000000 0.000000 0.000000
0.05 0.049979 0.049979 0.099958
...
[126 rows x 3 columns] | Below is the the instruction that describes the task:
### Input:
Animate a pandas.Panel by flipping through plots of the data in each dataframe
Arguments:
panel (pandas.Panel): Pandas Panel of DataFrames to animate (each DataFrame is an animation video frame)
keys (list of str): ordered list of panel keys (pages) to animate
columns (list of str): ordered list of data series names to include in plot for eath video frame
interval (int): number of milliseconds between video frames
titles (str or list of str): titles to place in plot on each data frame.
default = `keys` so that titles changes with each frame
path (str): path and base file name to save *.mp4 animation video ('' to not save)
kwargs (dict): pass-through kwargs for `animation.FuncAnimation(...).save(path, **kwargs)`
(Not used if `not path`)
TODO:
- Work with other 3-D data formats:
- dict (sorted by key) or OrderedDict
- list of 2-D arrays/lists
- 3-D arrays/lists
- generators of 2-D arrays/lists
- generators of generators of lists/arrays?
- Write json and html5 files for d3 SVG line plots with transitions!
>>> x = np.arange(0, 2*np.pi, 0.05)
>>> panel = pd.Panel(dict((i, pd.DataFrame({
... 'T=10': np.sin(x + i/10.),
... 'T=7': np.sin(x + i/7.),
... 'beat': np.sin(x + i/10.) + np.sin(x + i/7.),
... }, index=x)
... ) for i in range(50)))
>>> animate_panel(panel, interval=200, path='animate_panel_test') # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Drawing frames for a ".gif" animation...
Saving video to animate_panel_test.gif...
T=10 T=7 beat
0.00 0.000000 0.000000 0.000000
0.05 0.049979 0.049979 0.099958
...
[126 rows x 3 columns]
### Response:
def animate_panel(panel, keys=None, columns=None, interval=1000, blit=False, titles='', path='animate_panel', xlabel='Time', ylabel='Value', ext='gif',
replot=False, linewidth=3, close=False, fontsize=24, background_color='white', alpha=1, figsize=(12,8), xlabel_rotation=-25, plot_kwargs=(('rotation', 30),),
verbosity=1, **video_kwargs):
"""Animate a pandas.Panel by flipping through plots of the data in each dataframe
Arguments:
panel (pandas.Panel): Pandas Panel of DataFrames to animate (each DataFrame is an animation video frame)
keys (list of str): ordered list of panel keys (pages) to animate
columns (list of str): ordered list of data series names to include in plot for eath video frame
interval (int): number of milliseconds between video frames
titles (str or list of str): titles to place in plot on each data frame.
default = `keys` so that titles changes with each frame
path (str): path and base file name to save *.mp4 animation video ('' to not save)
kwargs (dict): pass-through kwargs for `animation.FuncAnimation(...).save(path, **kwargs)`
(Not used if `not path`)
TODO:
- Work with other 3-D data formats:
- dict (sorted by key) or OrderedDict
- list of 2-D arrays/lists
- 3-D arrays/lists
- generators of 2-D arrays/lists
- generators of generators of lists/arrays?
- Write json and html5 files for d3 SVG line plots with transitions!
>>> x = np.arange(0, 2*np.pi, 0.05)
>>> panel = pd.Panel(dict((i, pd.DataFrame({
... 'T=10': np.sin(x + i/10.),
... 'T=7': np.sin(x + i/7.),
... 'beat': np.sin(x + i/10.) + np.sin(x + i/7.),
... }, index=x)
... ) for i in range(50)))
>>> animate_panel(panel, interval=200, path='animate_panel_test') # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Drawing frames for a ".gif" animation...
Saving video to animate_panel_test.gif...
T=10 T=7 beat
0.00 0.000000 0.000000 0.000000
0.05 0.049979 0.049979 0.099958
...
[126 rows x 3 columns]
"""
plot_kwargs = plot_kwargs or {}
plot_kwargs = dict(plot_kwargs)
ext_kwargs = {
'mp4': {'writer': 'ffmpeg', 'codec': 'mpeg4', 'dpi': 100, 'bitrate': 2000},
'gif': {'writer': 'imagemagick', 'dpi': 100, 'bitrate': 2000},
'imagemagic.gif': {'writer': 'imagemagick_gif', 'dpi': 100, 'bitrate': 2000},
}
ext = str(ext).lower().strip() or 'gif'
default_kwargs = ext_kwargs.get(ext, {})
keys = keys or list(panel.keys())
if titles:
titles = listify(titles)
if len(titles) == 1:
titles *= len(keys)
else:
titles = keys
titles = dict((k, title) for k, title in zip(keys, titles))
columns = columns or list(panel[keys[0]].columns)
fig, ax = plt.subplots(figsize=figsize)
fig.patch.set_facecolor(background_color)
fig.patch.set_alpha(alpha)
i = 0
df = panel[keys[i]]
x = df.index.values
y = df[columns].values
lines = ax.plot(x, y)
ax.grid('on')
ax.patch.set_facecolor(background_color)
ax.patch.set_alpha(alpha)
ax.title.set_text(titles[keys[0]])
ax.title.set_fontsize(fontsize)
ax.title.set_fontweight('bold')
ax.xaxis.label.set_text(xlabel)
plt.setp(ax.get_xticklabels(), rotation=xlabel_rotation)
ax.yaxis.label.set_text(ylabel)
ax.legend(columns)
def animate(k):
df = panel[k]
x = df.index.values
y = df[columns].values.T
if replot:
# plt.cla()
# fig, ax = plt.subplots(figsize=figsize)
fig = ax.figure
fig.patch.set_facecolor(background_color)
fig.patch.set_alpha(alpha)
lines = ax.plot(x, y.T, linewidth=linewidth)
ax.grid('on')
ax.patch.set_facecolor(background_color)
ax.patch.set_alpha(alpha)
ax.title.set_text(titles[k])
ax.title.set_fontsize(fontsize)
ax.title.set_fontweight('bold')
ax.xaxis.label.set_text(xlabel)
plt.setp(ax.get_xticklabels(), rotation=xlabel_rotation)
ax.yaxis.label.set_text(ylabel)
ax.legend(columns)
else:
lines = ax.lines
fig = ax.figure
for i in range(len(lines)):
lines[i].set_xdata(x) # all lines have to share the same x-data
lines[i].set_ydata(y[i]) # update the data, don't replot a new line
lines[i].set_linewidth(linewidth)
lines[i].figure.set_facecolor(background_color)
lines[i].figure.set_alpha(alpha)
lines[i].axes.patch.set_facecolor(background_color)
lines[i].axes.patch.set_alpha(alpha)
ax.patch.set_facecolor(background_color)
ax.figure.patch.set_alpha(alpha)
ax.title.set_text(titles[k])
ax.title.set_fontsize(fontsize)
ax.title.set_fontweight('bold')
if blit:
return lines
# FIXME: doesn't work with ext=mp4
# init_func to mask out pixels to be redrawn/cleared which speeds redrawing of plot
def mask_lines():
if verbosity > 0:
print('Initialing mask_lines. . .')
df = panel[0]
x = df.index.values
y = df[columns].values.T
for i in range(len(lines)):
# FIXME: why are x-values used to set the y-data coordinates of the mask?
lines[i].set_xdata(np.ma.array(x, mask=True))
lines[i].set_ydata(np.ma.array(y[i], mask=True))
return lines
if verbosity > 0:
print('Drawing frames for a ".{0}" animation{1}. . .'.format(ext, ' with blitting' if blit else ''))
animate(keys[0])
ani = animation.FuncAnimation(fig, animate, keys, interval=interval, blit=blit) #, init_func=mask_lines, blit=True)
kwargs = dict(default_kwargs)
for k, v in six.iteritems(default_kwargs):
kwargs[k] = video_kwargs.get(k, v)
# if 'bitrate' in kwargs:
# kwargs['bitrate'] = min(kwargs['bitrate'], int(8e5 / interval)) # low information rate (long interval) might make it impossible to achieve a higher bitrate ight not
if path and isinstance(path, basestring):
path += '.{0}'.format(ext)
if verbosity > 0:
print('Saving video to {0}. . .'.format(path))
ani.save(path, **kwargs)
if close:
plt.close(fig)
return df |
def _import_config(config_file):
"""returns a configuration object
:param string config_file: path to config file
"""
# get config file path
jocker_lgr.debug('config file is: {0}'.format(config_file))
# append to path for importing
try:
jocker_lgr.debug('importing config...')
with open(config_file, 'r') as c:
return yaml.safe_load(c.read())
except IOError as ex:
jocker_lgr.error(str(ex))
raise RuntimeError('cannot access config file')
except yaml.parser.ParserError as ex:
jocker_lgr.error('invalid yaml file: {0}'.format(ex))
raise RuntimeError('invalid yaml file') | returns a configuration object
:param string config_file: path to config file | Below is the the instruction that describes the task:
### Input:
returns a configuration object
:param string config_file: path to config file
### Response:
def _import_config(config_file):
"""returns a configuration object
:param string config_file: path to config file
"""
# get config file path
jocker_lgr.debug('config file is: {0}'.format(config_file))
# append to path for importing
try:
jocker_lgr.debug('importing config...')
with open(config_file, 'r') as c:
return yaml.safe_load(c.read())
except IOError as ex:
jocker_lgr.error(str(ex))
raise RuntimeError('cannot access config file')
except yaml.parser.ParserError as ex:
jocker_lgr.error('invalid yaml file: {0}'.format(ex))
raise RuntimeError('invalid yaml file') |
def put_annotation(self, key, value):
"""
Annotate segment or subsegment with a key-value pair.
Annotations will be indexed for later search query.
:param str key: annotation key
:param object value: annotation value. Any type other than
string/number/bool will be dropped
"""
self._check_ended()
if not isinstance(key, string_types):
log.warning("ignoring non string type annotation key with type %s.", type(key))
return
if not isinstance(value, annotation_value_types):
log.warning("ignoring unsupported annotation value type %s.", type(value))
return
if any(character not in _valid_annotation_key_characters for character in key):
log.warning("ignoring annnotation with unsupported characters in key: '%s'.", key)
return
self.annotations[key] = value | Annotate segment or subsegment with a key-value pair.
Annotations will be indexed for later search query.
:param str key: annotation key
:param object value: annotation value. Any type other than
string/number/bool will be dropped | Below is the the instruction that describes the task:
### Input:
Annotate segment or subsegment with a key-value pair.
Annotations will be indexed for later search query.
:param str key: annotation key
:param object value: annotation value. Any type other than
string/number/bool will be dropped
### Response:
def put_annotation(self, key, value):
"""
Annotate segment or subsegment with a key-value pair.
Annotations will be indexed for later search query.
:param str key: annotation key
:param object value: annotation value. Any type other than
string/number/bool will be dropped
"""
self._check_ended()
if not isinstance(key, string_types):
log.warning("ignoring non string type annotation key with type %s.", type(key))
return
if not isinstance(value, annotation_value_types):
log.warning("ignoring unsupported annotation value type %s.", type(value))
return
if any(character not in _valid_annotation_key_characters for character in key):
log.warning("ignoring annnotation with unsupported characters in key: '%s'.", key)
return
self.annotations[key] = value |
def standard_output(self, ds, limit, check_name, groups):
"""
Generates the Terminal Output for Standard cases
Returns the dataset needed for the verbose output, as well as the failure flags.
"""
score_list, points, out_of = self.get_points(groups, limit)
issue_count = out_of - points
# Let's add the version number to the check name if it's missing
check_name = self._get_check_versioned_name(check_name)
check_url = self._get_check_url(check_name)
width = 2 * self.col_width
print('\n')
print("-" * width)
print('{:^{width}}'.format("IOOS Compliance Checker Report", width=width))
print('{:^{width}}'.format(check_name, width=width))
print('{:^{width}}'.format(check_url, width=width))
print("-" * width)
if issue_count > 0:
print('{:^{width}}'.format("Corrective Actions", width=width))
plural = '' if issue_count == 1 else 's'
print("{} has {} potential issue{}".format(os.path.basename(ds), issue_count, plural))
return [groups, points, out_of] | Generates the Terminal Output for Standard cases
Returns the dataset needed for the verbose output, as well as the failure flags. | Below is the the instruction that describes the task:
### Input:
Generates the Terminal Output for Standard cases
Returns the dataset needed for the verbose output, as well as the failure flags.
### Response:
def standard_output(self, ds, limit, check_name, groups):
"""
Generates the Terminal Output for Standard cases
Returns the dataset needed for the verbose output, as well as the failure flags.
"""
score_list, points, out_of = self.get_points(groups, limit)
issue_count = out_of - points
# Let's add the version number to the check name if it's missing
check_name = self._get_check_versioned_name(check_name)
check_url = self._get_check_url(check_name)
width = 2 * self.col_width
print('\n')
print("-" * width)
print('{:^{width}}'.format("IOOS Compliance Checker Report", width=width))
print('{:^{width}}'.format(check_name, width=width))
print('{:^{width}}'.format(check_url, width=width))
print("-" * width)
if issue_count > 0:
print('{:^{width}}'.format("Corrective Actions", width=width))
plural = '' if issue_count == 1 else 's'
print("{} has {} potential issue{}".format(os.path.basename(ds), issue_count, plural))
return [groups, points, out_of] |
def handle_import_error(caught_exc, name):
"""Allow or re-raise an import error.
This is to distinguish between expected and unexpected import errors.
If the module is not found, it simply means the Cython / Fortran speedups
were not built with the package. If the error message is different, e.g.
``... undefined symbol: __curve_intersection_MOD_all_intersections``, then
the import error **should** be raised.
Args:
caught_exc (ImportError): An exception caught when trying to import
a Cython module.
name (str): The name of the module. For example, for the module
``bezier._curve_speedup``, the name is ``"_curve_speedup"``.
Raises:
ImportError: If the error message is different than the basic
"missing module" error message.
"""
for template in TEMPLATES:
expected_msg = template.format(name)
if caught_exc.args == (expected_msg,):
return
raise caught_exc | Allow or re-raise an import error.
This is to distinguish between expected and unexpected import errors.
If the module is not found, it simply means the Cython / Fortran speedups
were not built with the package. If the error message is different, e.g.
``... undefined symbol: __curve_intersection_MOD_all_intersections``, then
the import error **should** be raised.
Args:
caught_exc (ImportError): An exception caught when trying to import
a Cython module.
name (str): The name of the module. For example, for the module
``bezier._curve_speedup``, the name is ``"_curve_speedup"``.
Raises:
ImportError: If the error message is different than the basic
"missing module" error message. | Below is the the instruction that describes the task:
### Input:
Allow or re-raise an import error.
This is to distinguish between expected and unexpected import errors.
If the module is not found, it simply means the Cython / Fortran speedups
were not built with the package. If the error message is different, e.g.
``... undefined symbol: __curve_intersection_MOD_all_intersections``, then
the import error **should** be raised.
Args:
caught_exc (ImportError): An exception caught when trying to import
a Cython module.
name (str): The name of the module. For example, for the module
``bezier._curve_speedup``, the name is ``"_curve_speedup"``.
Raises:
ImportError: If the error message is different than the basic
"missing module" error message.
### Response:
def handle_import_error(caught_exc, name):
"""Allow or re-raise an import error.
This is to distinguish between expected and unexpected import errors.
If the module is not found, it simply means the Cython / Fortran speedups
were not built with the package. If the error message is different, e.g.
``... undefined symbol: __curve_intersection_MOD_all_intersections``, then
the import error **should** be raised.
Args:
caught_exc (ImportError): An exception caught when trying to import
a Cython module.
name (str): The name of the module. For example, for the module
``bezier._curve_speedup``, the name is ``"_curve_speedup"``.
Raises:
ImportError: If the error message is different than the basic
"missing module" error message.
"""
for template in TEMPLATES:
expected_msg = template.format(name)
if caught_exc.args == (expected_msg,):
return
raise caught_exc |
def scan_index(content_dir):
""" Scan all files in a content directory """
def scan_directory(root, files):
""" Helper function to scan a single directory """
try:
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, content_dir)
fingerprint = utils.file_fingerprint(fullpath)
last_fingerprint = get_last_fingerprint(fullpath)
if fingerprint != last_fingerprint and SCHEDULED_FILES.add(fullpath):
scan_file(fullpath, relpath, False)
except: # pylint:disable=bare-except
logger.exception("Got error parsing directory %s", root)
for root, _, files in os.walk(content_dir, followlinks=True):
THREAD_POOL.submit(scan_directory, root, files)
for table in (model.Entry, model.Category, model.Image, model.FileFingerprint):
THREAD_POOL.submit(prune_missing, table) | Scan all files in a content directory | Below is the the instruction that describes the task:
### Input:
Scan all files in a content directory
### Response:
def scan_index(content_dir):
""" Scan all files in a content directory """
def scan_directory(root, files):
""" Helper function to scan a single directory """
try:
for file in files:
fullpath = os.path.join(root, file)
relpath = os.path.relpath(fullpath, content_dir)
fingerprint = utils.file_fingerprint(fullpath)
last_fingerprint = get_last_fingerprint(fullpath)
if fingerprint != last_fingerprint and SCHEDULED_FILES.add(fullpath):
scan_file(fullpath, relpath, False)
except: # pylint:disable=bare-except
logger.exception("Got error parsing directory %s", root)
for root, _, files in os.walk(content_dir, followlinks=True):
THREAD_POOL.submit(scan_directory, root, files)
for table in (model.Entry, model.Category, model.Image, model.FileFingerprint):
THREAD_POOL.submit(prune_missing, table) |
def _internal_add(self, pattern: Pattern, label, renaming) -> int:
"""Add a new pattern to the matcher.
Equivalent patterns are not added again. However, patterns that are structurally equivalent,
but have different constraints or different variable names are distinguished by the matcher.
Args:
pattern: The pattern to add.
Returns:
The internal id for the pattern. This is mainly used by the :class:`CommutativeMatcher`.
"""
pattern_index = len(self.patterns)
renamed_constraints = [c.with_renamed_vars(renaming) for c in pattern.local_constraints]
constraint_indices = [self._add_constraint(c, pattern_index) for c in renamed_constraints]
self.patterns.append((pattern, label, constraint_indices))
self.pattern_vars.append(renaming)
pattern = rename_variables(pattern.expression, renaming)
state = self.root
patterns_stack = [deque([pattern])]
self._process_pattern_stack(state, patterns_stack, renamed_constraints, pattern_index)
return pattern_index | Add a new pattern to the matcher.
Equivalent patterns are not added again. However, patterns that are structurally equivalent,
but have different constraints or different variable names are distinguished by the matcher.
Args:
pattern: The pattern to add.
Returns:
The internal id for the pattern. This is mainly used by the :class:`CommutativeMatcher`. | Below is the the instruction that describes the task:
### Input:
Add a new pattern to the matcher.
Equivalent patterns are not added again. However, patterns that are structurally equivalent,
but have different constraints or different variable names are distinguished by the matcher.
Args:
pattern: The pattern to add.
Returns:
The internal id for the pattern. This is mainly used by the :class:`CommutativeMatcher`.
### Response:
def _internal_add(self, pattern: Pattern, label, renaming) -> int:
"""Add a new pattern to the matcher.
Equivalent patterns are not added again. However, patterns that are structurally equivalent,
but have different constraints or different variable names are distinguished by the matcher.
Args:
pattern: The pattern to add.
Returns:
The internal id for the pattern. This is mainly used by the :class:`CommutativeMatcher`.
"""
pattern_index = len(self.patterns)
renamed_constraints = [c.with_renamed_vars(renaming) for c in pattern.local_constraints]
constraint_indices = [self._add_constraint(c, pattern_index) for c in renamed_constraints]
self.patterns.append((pattern, label, constraint_indices))
self.pattern_vars.append(renaming)
pattern = rename_variables(pattern.expression, renaming)
state = self.root
patterns_stack = [deque([pattern])]
self._process_pattern_stack(state, patterns_stack, renamed_constraints, pattern_index)
return pattern_index |
def indices_within_times(times, start, end):
"""
Return an index array into times that lie within the durations defined by start end arrays
Parameters
----------
times: numpy.ndarray
Array of times
start: numpy.ndarray
Array of duration start times
end: numpy.ndarray
Array of duration end times
Returns
-------
indices: numpy.ndarray
Array of indices into times
"""
# coalesce the start/end segments
start, end = segments_to_start_end(start_end_to_segments(start, end).coalesce())
tsort = times.argsort()
times_sorted = times[tsort]
left = numpy.searchsorted(times_sorted, start)
right = numpy.searchsorted(times_sorted, end)
if len(left) == 0:
return numpy.array([], dtype=numpy.uint32)
return tsort[numpy.hstack(numpy.r_[s:e] for s, e in zip(left, right))] | Return an index array into times that lie within the durations defined by start end arrays
Parameters
----------
times: numpy.ndarray
Array of times
start: numpy.ndarray
Array of duration start times
end: numpy.ndarray
Array of duration end times
Returns
-------
indices: numpy.ndarray
Array of indices into times | Below is the the instruction that describes the task:
### Input:
Return an index array into times that lie within the durations defined by start end arrays
Parameters
----------
times: numpy.ndarray
Array of times
start: numpy.ndarray
Array of duration start times
end: numpy.ndarray
Array of duration end times
Returns
-------
indices: numpy.ndarray
Array of indices into times
### Response:
def indices_within_times(times, start, end):
"""
Return an index array into times that lie within the durations defined by start end arrays
Parameters
----------
times: numpy.ndarray
Array of times
start: numpy.ndarray
Array of duration start times
end: numpy.ndarray
Array of duration end times
Returns
-------
indices: numpy.ndarray
Array of indices into times
"""
# coalesce the start/end segments
start, end = segments_to_start_end(start_end_to_segments(start, end).coalesce())
tsort = times.argsort()
times_sorted = times[tsort]
left = numpy.searchsorted(times_sorted, start)
right = numpy.searchsorted(times_sorted, end)
if len(left) == 0:
return numpy.array([], dtype=numpy.uint32)
return tsort[numpy.hstack(numpy.r_[s:e] for s, e in zip(left, right))] |
def toner_status(self, filter_supported: bool = True) -> Dict[str, Any]:
"""Return the state of all toners cartridges."""
toner_status = {}
for color in self.COLOR_NAMES:
try:
toner_stat = self.data.get(
'{}_{}'.format(SyncThru.TONER, color), {})
if filter_supported and toner_stat.get('opt', 0) == 0:
continue
else:
toner_status[color] = toner_stat
except (KeyError, AttributeError):
toner_status[color] = {}
return toner_status | Return the state of all toners cartridges. | Below is the the instruction that describes the task:
### Input:
Return the state of all toners cartridges.
### Response:
def toner_status(self, filter_supported: bool = True) -> Dict[str, Any]:
"""Return the state of all toners cartridges."""
toner_status = {}
for color in self.COLOR_NAMES:
try:
toner_stat = self.data.get(
'{}_{}'.format(SyncThru.TONER, color), {})
if filter_supported and toner_stat.get('opt', 0) == 0:
continue
else:
toner_status[color] = toner_stat
except (KeyError, AttributeError):
toner_status[color] = {}
return toner_status |
def ids_from_seq_two_step(seq, n, max_iterations, app, core_threshold, \
extra_threshold, lower_threshold, second_db=None):
"""Returns ids that match a seq, using a 2-tiered strategy.
Optionally uses a second database for the second search.
"""
#first time through: reset 'h' and 'e' to core
#-h is the e-value threshold for including seqs in the score matrix model
app.Parameters['-h'].on(core_threshold)
#-e is the e-value threshold for the final blast
app.Parameters['-e'].on(core_threshold)
checkpoints = []
ids = []
last_num_ids = None
for i in range(max_iterations):
if checkpoints:
app.Parameters['-R'].on(checkpoints[-1])
curr_check = 'checkpoint_%s.chk' % i
app.Parameters['-C'].on(curr_check)
output = app(seq)
#if we didn't write a checkpoint, bail out
if not access(curr_check, F_OK):
break
#if we got here, we wrote a checkpoint file
checkpoints.append(curr_check)
result = list(output.get('BlastOut', output['StdOut']))
output.cleanUp()
if result:
ids = LastProteinIds9(result,keep_values=True,filter_identity=False)
num_ids = len(ids)
if num_ids >= n:
break
if num_ids == last_num_ids:
break
last_num_ids = num_ids
#if we didn't write any checkpoints, second run won't work, so return ids
if not checkpoints:
return ids
#if we got too many ids and don't have a second database, return the ids we got
if (not second_db) and num_ids >= n:
return ids
#second time through: reset 'h' and 'e' to get extra hits, and switch the
#database if appropriate
app.Parameters['-h'].on(extra_threshold)
app.Parameters['-e'].on(lower_threshold)
if second_db:
app.Parameters['-d'].on(second_db)
for i in range(max_iterations): #will always have last_check if we get here
app.Parameters['-R'].on(checkpoints[-1])
curr_check = 'checkpoint_b_%s.chk' % i
app.Parameters['-C'].on(curr_check)
output = app(seq)
#bail out if we couldn't write a checkpoint
if not access(curr_check, F_OK):
break
#if we got here, the checkpoint worked
checkpoints.append(curr_check)
result = list(output.get('BlastOut', output['StdOut']))
if result:
ids = LastProteinIds9(result,keep_values=True,filter_identity=False)
num_ids = len(ids)
if num_ids >= n:
break
if num_ids == last_num_ids:
break
last_num_ids = num_ids
#return the ids we got. may not be as many as we wanted.
for c in checkpoints:
remove(c)
return ids | Returns ids that match a seq, using a 2-tiered strategy.
Optionally uses a second database for the second search. | Below is the the instruction that describes the task:
### Input:
Returns ids that match a seq, using a 2-tiered strategy.
Optionally uses a second database for the second search.
### Response:
def ids_from_seq_two_step(seq, n, max_iterations, app, core_threshold, \
extra_threshold, lower_threshold, second_db=None):
"""Returns ids that match a seq, using a 2-tiered strategy.
Optionally uses a second database for the second search.
"""
#first time through: reset 'h' and 'e' to core
#-h is the e-value threshold for including seqs in the score matrix model
app.Parameters['-h'].on(core_threshold)
#-e is the e-value threshold for the final blast
app.Parameters['-e'].on(core_threshold)
checkpoints = []
ids = []
last_num_ids = None
for i in range(max_iterations):
if checkpoints:
app.Parameters['-R'].on(checkpoints[-1])
curr_check = 'checkpoint_%s.chk' % i
app.Parameters['-C'].on(curr_check)
output = app(seq)
#if we didn't write a checkpoint, bail out
if not access(curr_check, F_OK):
break
#if we got here, we wrote a checkpoint file
checkpoints.append(curr_check)
result = list(output.get('BlastOut', output['StdOut']))
output.cleanUp()
if result:
ids = LastProteinIds9(result,keep_values=True,filter_identity=False)
num_ids = len(ids)
if num_ids >= n:
break
if num_ids == last_num_ids:
break
last_num_ids = num_ids
#if we didn't write any checkpoints, second run won't work, so return ids
if not checkpoints:
return ids
#if we got too many ids and don't have a second database, return the ids we got
if (not second_db) and num_ids >= n:
return ids
#second time through: reset 'h' and 'e' to get extra hits, and switch the
#database if appropriate
app.Parameters['-h'].on(extra_threshold)
app.Parameters['-e'].on(lower_threshold)
if second_db:
app.Parameters['-d'].on(second_db)
for i in range(max_iterations): #will always have last_check if we get here
app.Parameters['-R'].on(checkpoints[-1])
curr_check = 'checkpoint_b_%s.chk' % i
app.Parameters['-C'].on(curr_check)
output = app(seq)
#bail out if we couldn't write a checkpoint
if not access(curr_check, F_OK):
break
#if we got here, the checkpoint worked
checkpoints.append(curr_check)
result = list(output.get('BlastOut', output['StdOut']))
if result:
ids = LastProteinIds9(result,keep_values=True,filter_identity=False)
num_ids = len(ids)
if num_ids >= n:
break
if num_ids == last_num_ids:
break
last_num_ids = num_ids
#return the ids we got. may not be as many as we wanted.
for c in checkpoints:
remove(c)
return ids |
def at(self, instant):
"""Iterates (in chronological order) over all events that are occuring during `instant`.
Args:
instant (Arrow object)
"""
for event in self:
if event.begin <= instant <= event.end:
yield event | Iterates (in chronological order) over all events that are occuring during `instant`.
Args:
instant (Arrow object) | Below is the the instruction that describes the task:
### Input:
Iterates (in chronological order) over all events that are occuring during `instant`.
Args:
instant (Arrow object)
### Response:
def at(self, instant):
"""Iterates (in chronological order) over all events that are occuring during `instant`.
Args:
instant (Arrow object)
"""
for event in self:
if event.begin <= instant <= event.end:
yield event |
def _get_date_type(date):
"""
Returns the type of a date.
:param str|datetime.date date: The date.
:rtype: str
"""
if isinstance(date, str):
return 'str'
if isinstance(date, datetime.date):
return 'date'
if isinstance(date, int):
return 'int'
raise ValueError('Unexpected type {0!s}'.format(date.__class__)) | Returns the type of a date.
:param str|datetime.date date: The date.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Returns the type of a date.
:param str|datetime.date date: The date.
:rtype: str
### Response:
def _get_date_type(date):
"""
Returns the type of a date.
:param str|datetime.date date: The date.
:rtype: str
"""
if isinstance(date, str):
return 'str'
if isinstance(date, datetime.date):
return 'date'
if isinstance(date, int):
return 'int'
raise ValueError('Unexpected type {0!s}'.format(date.__class__)) |
def update_m(data, old_M, old_W, selected_genes, disp=False, inner_max_iters=100, parallel=True, threads=4, write_progress_file=None, tol=0.0, regularization=0.0, **kwargs):
"""
This returns a new M matrix that contains all genes, given an M that was
created from running state estimation with a subset of genes.
Args:
data (sparse matrix or dense array): data matrix of shape (genes, cells), containing all genes
old_M (array): shape is (selected_genes, k)
old_W (array): shape is (k, cells)
selected_genes (list): list of selected gene indices
Rest of the args are as in poisson_estimate_state
Returns:
new_M: array of shape (all_genes, k)
"""
genes, cells = data.shape
k = old_M.shape[1]
non_selected_genes = [x for x in range(genes) if x not in set(selected_genes)]
# 1. initialize new M
new_M = np.zeros((genes, k))
new_M[selected_genes, :] = old_M
# TODO: how to initialize rest of genes?
# data*w?
if disp:
print('computing initial guess for M by data*W.T')
new_M_non_selected = data[non_selected_genes, :] * sparse.csc_matrix(old_W.T)
new_M[non_selected_genes, :] = new_M_non_selected.toarray()
X = data.astype(float)
XT = X.T
is_sparse = False
if sparse.issparse(X):
is_sparse = True
update_fn = sparse_nolips_update_w
# convert to csc
X = sparse.csc_matrix(X)
XT = sparse.csc_matrix(XT)
if parallel:
update_fn = parallel_sparse_nolips_update_w
Xsum = np.asarray(X.sum(0)).flatten()
Xsum_m = np.asarray(X.sum(1)).flatten()
# L-BFGS-B won't work right now for sparse matrices
method = 'NoLips'
objective_fn = _call_sparse_obj
else:
objective_fn = objective
update_fn = nolips_update_w
Xsum = X.sum(0)
Xsum_m = X.sum(1)
# If method is NoLips, converting to a sparse matrix
# will always improve the performance (?) and never lower accuracy...
# will almost always improve performance?
# if sparsity is below 40%?
if method == 'NoLips':
is_sparse = True
X = sparse.csc_matrix(X)
XT = sparse.csc_matrix(XT)
update_fn = sparse_nolips_update_w
if parallel:
update_fn = parallel_sparse_nolips_update_w
objective_fn = _call_sparse_obj
if disp:
print('starting estimating M')
new_M = _estimate_w(XT, new_M.T, old_W.T, Xsum_m, update_fn, objective_fn, is_sparse, parallel, threads, method, tol, disp, inner_max_iters, 'M', regularization)
if write_progress_file is not None:
progress = open(write_progress_file, 'w')
progress.write('0')
progress.close()
return new_M.T | This returns a new M matrix that contains all genes, given an M that was
created from running state estimation with a subset of genes.
Args:
data (sparse matrix or dense array): data matrix of shape (genes, cells), containing all genes
old_M (array): shape is (selected_genes, k)
old_W (array): shape is (k, cells)
selected_genes (list): list of selected gene indices
Rest of the args are as in poisson_estimate_state
Returns:
new_M: array of shape (all_genes, k) | Below is the the instruction that describes the task:
### Input:
This returns a new M matrix that contains all genes, given an M that was
created from running state estimation with a subset of genes.
Args:
data (sparse matrix or dense array): data matrix of shape (genes, cells), containing all genes
old_M (array): shape is (selected_genes, k)
old_W (array): shape is (k, cells)
selected_genes (list): list of selected gene indices
Rest of the args are as in poisson_estimate_state
Returns:
new_M: array of shape (all_genes, k)
### Response:
def update_m(data, old_M, old_W, selected_genes, disp=False, inner_max_iters=100, parallel=True, threads=4, write_progress_file=None, tol=0.0, regularization=0.0, **kwargs):
"""
This returns a new M matrix that contains all genes, given an M that was
created from running state estimation with a subset of genes.
Args:
data (sparse matrix or dense array): data matrix of shape (genes, cells), containing all genes
old_M (array): shape is (selected_genes, k)
old_W (array): shape is (k, cells)
selected_genes (list): list of selected gene indices
Rest of the args are as in poisson_estimate_state
Returns:
new_M: array of shape (all_genes, k)
"""
genes, cells = data.shape
k = old_M.shape[1]
non_selected_genes = [x for x in range(genes) if x not in set(selected_genes)]
# 1. initialize new M
new_M = np.zeros((genes, k))
new_M[selected_genes, :] = old_M
# TODO: how to initialize rest of genes?
# data*w?
if disp:
print('computing initial guess for M by data*W.T')
new_M_non_selected = data[non_selected_genes, :] * sparse.csc_matrix(old_W.T)
new_M[non_selected_genes, :] = new_M_non_selected.toarray()
X = data.astype(float)
XT = X.T
is_sparse = False
if sparse.issparse(X):
is_sparse = True
update_fn = sparse_nolips_update_w
# convert to csc
X = sparse.csc_matrix(X)
XT = sparse.csc_matrix(XT)
if parallel:
update_fn = parallel_sparse_nolips_update_w
Xsum = np.asarray(X.sum(0)).flatten()
Xsum_m = np.asarray(X.sum(1)).flatten()
# L-BFGS-B won't work right now for sparse matrices
method = 'NoLips'
objective_fn = _call_sparse_obj
else:
objective_fn = objective
update_fn = nolips_update_w
Xsum = X.sum(0)
Xsum_m = X.sum(1)
# If method is NoLips, converting to a sparse matrix
# will always improve the performance (?) and never lower accuracy...
# will almost always improve performance?
# if sparsity is below 40%?
if method == 'NoLips':
is_sparse = True
X = sparse.csc_matrix(X)
XT = sparse.csc_matrix(XT)
update_fn = sparse_nolips_update_w
if parallel:
update_fn = parallel_sparse_nolips_update_w
objective_fn = _call_sparse_obj
if disp:
print('starting estimating M')
new_M = _estimate_w(XT, new_M.T, old_W.T, Xsum_m, update_fn, objective_fn, is_sparse, parallel, threads, method, tol, disp, inner_max_iters, 'M', regularization)
if write_progress_file is not None:
progress = open(write_progress_file, 'w')
progress.write('0')
progress.close()
return new_M.T |
def contact_number(self):
"""
This method returns the contact phone number.
:return:
"""
try:
number = self._ad_page_content.find(
'button', {'class': 'phone-number'})
return (base64.b64decode(number.attrs['data-p'])).decode('ascii')
except Exception as e:
if self._debug:
logging.error(
"Error getting contact_number. Error message: " + e.args[0])
return 'N/A' | This method returns the contact phone number.
:return: | Below is the the instruction that describes the task:
### Input:
This method returns the contact phone number.
:return:
### Response:
def contact_number(self):
"""
This method returns the contact phone number.
:return:
"""
try:
number = self._ad_page_content.find(
'button', {'class': 'phone-number'})
return (base64.b64decode(number.attrs['data-p'])).decode('ascii')
except Exception as e:
if self._debug:
logging.error(
"Error getting contact_number. Error message: " + e.args[0])
return 'N/A' |
def flux_producers(F, rtol=1e-05, atol=1e-12):
r"""Return indexes of states that are net flux producers.
Parameters
----------
F : (n, n) ndarray
Matrix of flux values between pairs of states.
rtol : float
relative tolerance. fulfilled if max(outflux-influx, 0) / max(outflux,influx) < rtol
atol : float
absolute tolerance. fulfilled if max(outflux-influx, 0) < atol
Returns
-------
producers : (n) ndarray of int
indexes of states that are net flux producers. May include "dirty" producers, i.e.
states that have influx but still produce more outflux and thereby violate flux
conservation.
"""
n = F.shape[0]
influxes = np.array(np.sum(F, axis=0)).flatten() # all that flows in
outfluxes = np.array(np.sum(F, axis=1)).flatten() # all that flows out
# net out flux absolute
prod_abs = np.maximum(outfluxes - influxes, np.zeros(n))
# net out flux relative
prod_rel = prod_abs / (np.maximum(outfluxes, influxes))
# return all indexes that are produces in terms of absolute and relative tolerance
return list(np.where((prod_abs > atol) * (prod_rel > rtol))[0]) | r"""Return indexes of states that are net flux producers.
Parameters
----------
F : (n, n) ndarray
Matrix of flux values between pairs of states.
rtol : float
relative tolerance. fulfilled if max(outflux-influx, 0) / max(outflux,influx) < rtol
atol : float
absolute tolerance. fulfilled if max(outflux-influx, 0) < atol
Returns
-------
producers : (n) ndarray of int
indexes of states that are net flux producers. May include "dirty" producers, i.e.
states that have influx but still produce more outflux and thereby violate flux
conservation. | Below is the the instruction that describes the task:
### Input:
r"""Return indexes of states that are net flux producers.
Parameters
----------
F : (n, n) ndarray
Matrix of flux values between pairs of states.
rtol : float
relative tolerance. fulfilled if max(outflux-influx, 0) / max(outflux,influx) < rtol
atol : float
absolute tolerance. fulfilled if max(outflux-influx, 0) < atol
Returns
-------
producers : (n) ndarray of int
indexes of states that are net flux producers. May include "dirty" producers, i.e.
states that have influx but still produce more outflux and thereby violate flux
conservation.
### Response:
def flux_producers(F, rtol=1e-05, atol=1e-12):
r"""Return indexes of states that are net flux producers.
Parameters
----------
F : (n, n) ndarray
Matrix of flux values between pairs of states.
rtol : float
relative tolerance. fulfilled if max(outflux-influx, 0) / max(outflux,influx) < rtol
atol : float
absolute tolerance. fulfilled if max(outflux-influx, 0) < atol
Returns
-------
producers : (n) ndarray of int
indexes of states that are net flux producers. May include "dirty" producers, i.e.
states that have influx but still produce more outflux and thereby violate flux
conservation.
"""
n = F.shape[0]
influxes = np.array(np.sum(F, axis=0)).flatten() # all that flows in
outfluxes = np.array(np.sum(F, axis=1)).flatten() # all that flows out
# net out flux absolute
prod_abs = np.maximum(outfluxes - influxes, np.zeros(n))
# net out flux relative
prod_rel = prod_abs / (np.maximum(outfluxes, influxes))
# return all indexes that are produces in terms of absolute and relative tolerance
return list(np.where((prod_abs > atol) * (prod_rel > rtol))[0]) |
def dvds_current_releases(self, **kwargs):
"""Gets the upcoming movies from the API.
Args:
page_limit (optional): number of movies to show per page, default=16
page (optional): results page number, default=1
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('dvds_current_releases')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Gets the upcoming movies from the API.
Args:
page_limit (optional): number of movies to show per page, default=16
page (optional): results page number, default=1
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API. | Below is the the instruction that describes the task:
### Input:
Gets the upcoming movies from the API.
Args:
page_limit (optional): number of movies to show per page, default=16
page (optional): results page number, default=1
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
### Response:
def dvds_current_releases(self, **kwargs):
"""Gets the upcoming movies from the API.
Args:
page_limit (optional): number of movies to show per page, default=16
page (optional): results page number, default=1
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('dvds_current_releases')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response |
def parquet_to_df(filename, use_threads=1):
"""parquet_to_df: Reads a Parquet file into a Pandas DataFrame
Args:
filename (string): The full path to the filename for the Parquet file
ntreads (int): The number of threads to use (defaults to 1)
"""
try:
return pq.read_table(filename, use_threads=use_threads).to_pandas()
except pa.lib.ArrowIOError:
print('Could not read parquet file {:s}'.format(filename))
return None | parquet_to_df: Reads a Parquet file into a Pandas DataFrame
Args:
filename (string): The full path to the filename for the Parquet file
ntreads (int): The number of threads to use (defaults to 1) | Below is the the instruction that describes the task:
### Input:
parquet_to_df: Reads a Parquet file into a Pandas DataFrame
Args:
filename (string): The full path to the filename for the Parquet file
ntreads (int): The number of threads to use (defaults to 1)
### Response:
def parquet_to_df(filename, use_threads=1):
"""parquet_to_df: Reads a Parquet file into a Pandas DataFrame
Args:
filename (string): The full path to the filename for the Parquet file
ntreads (int): The number of threads to use (defaults to 1)
"""
try:
return pq.read_table(filename, use_threads=use_threads).to_pandas()
except pa.lib.ArrowIOError:
print('Could not read parquet file {:s}'.format(filename))
return None |
def export_image(self, filename='refcycle.png', format=None,
dot_executable='dot'):
"""
Export graph as an image.
This requires that Graphviz is installed and that the ``dot``
executable is in your path.
The *filename* argument specifies the output filename.
The *format* argument lets you specify the output format. It may be
any format that ``dot`` understands, including extended format
specifications like ``png:cairo``. If omitted, the filename extension
will be used; if no filename extension is present, ``png`` will be
used.
The *dot_executable* argument lets you provide a full path to the
``dot`` executable if necessary.
"""
# Figure out what output format to use.
if format is None:
_, extension = os.path.splitext(filename)
if extension.startswith('.') and len(extension) > 1:
format = extension[1:]
else:
format = 'png'
# Convert to 'dot' format.
dot_graph = self.to_dot()
# We'll send the graph directly to the process stdin.
cmd = [
dot_executable,
'-T{}'.format(format),
'-o{}'.format(filename),
]
dot = subprocess.Popen(cmd, stdin=subprocess.PIPE)
dot.communicate(dot_graph.encode('utf-8')) | Export graph as an image.
This requires that Graphviz is installed and that the ``dot``
executable is in your path.
The *filename* argument specifies the output filename.
The *format* argument lets you specify the output format. It may be
any format that ``dot`` understands, including extended format
specifications like ``png:cairo``. If omitted, the filename extension
will be used; if no filename extension is present, ``png`` will be
used.
The *dot_executable* argument lets you provide a full path to the
``dot`` executable if necessary. | Below is the the instruction that describes the task:
### Input:
Export graph as an image.
This requires that Graphviz is installed and that the ``dot``
executable is in your path.
The *filename* argument specifies the output filename.
The *format* argument lets you specify the output format. It may be
any format that ``dot`` understands, including extended format
specifications like ``png:cairo``. If omitted, the filename extension
will be used; if no filename extension is present, ``png`` will be
used.
The *dot_executable* argument lets you provide a full path to the
``dot`` executable if necessary.
### Response:
def export_image(self, filename='refcycle.png', format=None,
dot_executable='dot'):
"""
Export graph as an image.
This requires that Graphviz is installed and that the ``dot``
executable is in your path.
The *filename* argument specifies the output filename.
The *format* argument lets you specify the output format. It may be
any format that ``dot`` understands, including extended format
specifications like ``png:cairo``. If omitted, the filename extension
will be used; if no filename extension is present, ``png`` will be
used.
The *dot_executable* argument lets you provide a full path to the
``dot`` executable if necessary.
"""
# Figure out what output format to use.
if format is None:
_, extension = os.path.splitext(filename)
if extension.startswith('.') and len(extension) > 1:
format = extension[1:]
else:
format = 'png'
# Convert to 'dot' format.
dot_graph = self.to_dot()
# We'll send the graph directly to the process stdin.
cmd = [
dot_executable,
'-T{}'.format(format),
'-o{}'.format(filename),
]
dot = subprocess.Popen(cmd, stdin=subprocess.PIPE)
dot.communicate(dot_graph.encode('utf-8')) |
def to_array_list(df, length=None, by_id=True):
"""Converts a dataframe to a list of arrays, with one array for every unique index entry.
Index is assumed to be 0-based contiguous. If there is a missing index entry, an empty
numpy array is returned for it.
Elements in the arrays are sorted by their id.
:param df:
:param length:
:return:
"""
if by_id:
assert 'id' in df.columns
# if `id` is the only column, don't sort it (and don't remove it)
if len(df.columns) == 1:
by_id = False
idx = df.index.unique()
if length is None:
length = max(idx) + 1
l = [np.empty(0) for _ in xrange(length)]
for i in idx:
a = df.loc[i]
if by_id:
if isinstance(a, pd.Series):
a = a[1:]
else:
a = a.copy().set_index('id').sort_index()
l[i] = a.values.reshape((-1, a.shape[-1]))
return np.asarray(l) | Converts a dataframe to a list of arrays, with one array for every unique index entry.
Index is assumed to be 0-based contiguous. If there is a missing index entry, an empty
numpy array is returned for it.
Elements in the arrays are sorted by their id.
:param df:
:param length:
:return: | Below is the the instruction that describes the task:
### Input:
Converts a dataframe to a list of arrays, with one array for every unique index entry.
Index is assumed to be 0-based contiguous. If there is a missing index entry, an empty
numpy array is returned for it.
Elements in the arrays are sorted by their id.
:param df:
:param length:
:return:
### Response:
def to_array_list(df, length=None, by_id=True):
"""Converts a dataframe to a list of arrays, with one array for every unique index entry.
Index is assumed to be 0-based contiguous. If there is a missing index entry, an empty
numpy array is returned for it.
Elements in the arrays are sorted by their id.
:param df:
:param length:
:return:
"""
if by_id:
assert 'id' in df.columns
# if `id` is the only column, don't sort it (and don't remove it)
if len(df.columns) == 1:
by_id = False
idx = df.index.unique()
if length is None:
length = max(idx) + 1
l = [np.empty(0) for _ in xrange(length)]
for i in idx:
a = df.loc[i]
if by_id:
if isinstance(a, pd.Series):
a = a[1:]
else:
a = a.copy().set_index('id').sort_index()
l[i] = a.values.reshape((-1, a.shape[-1]))
return np.asarray(l) |
def averaging(grid, numGrid, numPix):
"""
resize 2d pixel grid with numGrid to numPix and averages over the pixels
:param grid: higher resolution pixel grid
:param numGrid: number of pixels per axis in the high resolution input image
:param numPix: lower number of pixels per axis in the output image (numGrid/numPix is integer number)
:return:
"""
Nbig = numGrid
Nsmall = numPix
small = grid.reshape([int(Nsmall), int(Nbig/Nsmall), int(Nsmall), int(Nbig/Nsmall)]).mean(3).mean(1)
return small | resize 2d pixel grid with numGrid to numPix and averages over the pixels
:param grid: higher resolution pixel grid
:param numGrid: number of pixels per axis in the high resolution input image
:param numPix: lower number of pixels per axis in the output image (numGrid/numPix is integer number)
:return: | Below is the the instruction that describes the task:
### Input:
resize 2d pixel grid with numGrid to numPix and averages over the pixels
:param grid: higher resolution pixel grid
:param numGrid: number of pixels per axis in the high resolution input image
:param numPix: lower number of pixels per axis in the output image (numGrid/numPix is integer number)
:return:
### Response:
def averaging(grid, numGrid, numPix):
"""
resize 2d pixel grid with numGrid to numPix and averages over the pixels
:param grid: higher resolution pixel grid
:param numGrid: number of pixels per axis in the high resolution input image
:param numPix: lower number of pixels per axis in the output image (numGrid/numPix is integer number)
:return:
"""
Nbig = numGrid
Nsmall = numPix
small = grid.reshape([int(Nsmall), int(Nbig/Nsmall), int(Nsmall), int(Nbig/Nsmall)]).mean(3).mean(1)
return small |
def pref(preference, field=None, verbose_name=None, help_text='', static=True, readonly=False):
"""Marks a preference.
:param preference: Preference variable.
:param Field field: Django model field to represent this preference.
:param str|unicode verbose_name: Field verbose name.
:param str|unicode help_text: Field help text.
:param bool static: Leave this preference static (do not store in DB).
:param bool readonly: Make this field read only.
:rtype: PrefProxy|None
"""
try:
bound = bind_proxy(
(preference,),
field=field,
verbose_name=verbose_name,
help_text=help_text,
static=static,
readonly=readonly,
)
return bound[0]
except IndexError:
return | Marks a preference.
:param preference: Preference variable.
:param Field field: Django model field to represent this preference.
:param str|unicode verbose_name: Field verbose name.
:param str|unicode help_text: Field help text.
:param bool static: Leave this preference static (do not store in DB).
:param bool readonly: Make this field read only.
:rtype: PrefProxy|None | Below is the the instruction that describes the task:
### Input:
Marks a preference.
:param preference: Preference variable.
:param Field field: Django model field to represent this preference.
:param str|unicode verbose_name: Field verbose name.
:param str|unicode help_text: Field help text.
:param bool static: Leave this preference static (do not store in DB).
:param bool readonly: Make this field read only.
:rtype: PrefProxy|None
### Response:
def pref(preference, field=None, verbose_name=None, help_text='', static=True, readonly=False):
"""Marks a preference.
:param preference: Preference variable.
:param Field field: Django model field to represent this preference.
:param str|unicode verbose_name: Field verbose name.
:param str|unicode help_text: Field help text.
:param bool static: Leave this preference static (do not store in DB).
:param bool readonly: Make this field read only.
:rtype: PrefProxy|None
"""
try:
bound = bind_proxy(
(preference,),
field=field,
verbose_name=verbose_name,
help_text=help_text,
static=static,
readonly=readonly,
)
return bound[0]
except IndexError:
return |
def adopt_module_key_flags(module, flag_values=_flagvalues.FLAGS):
"""Declares that all flags key to a module are key to the current module.
Args:
module: module, the module object from which all key flags will be declared
as key flags to the current module.
flag_values: FlagValues, the FlagValues instance in which the flags will
be declared as key flags. This should almost never need to be
overridden.
Raises:
Error: Raised when given an argument that is a module name (a string),
instead of a module object.
"""
if not isinstance(module, types.ModuleType):
raise _exceptions.Error('Expected a module object, not %r.' % (module,))
_internal_declare_key_flags(
[f.name for f in flag_values.get_key_flags_for_module(module.__name__)],
flag_values=flag_values)
# If module is this flag module, take _helpers.SPECIAL_FLAGS into account.
if module == _helpers.FLAGS_MODULE:
_internal_declare_key_flags(
# As we associate flags with get_calling_module_object_and_name(), the
# special flags defined in this module are incorrectly registered with
# a different module. So, we can't use get_key_flags_for_module.
# Instead, we take all flags from _helpers.SPECIAL_FLAGS (a private
# FlagValues, where no other module should register flags).
[_helpers.SPECIAL_FLAGS[name].name for name in _helpers.SPECIAL_FLAGS],
flag_values=_helpers.SPECIAL_FLAGS,
key_flag_values=flag_values) | Declares that all flags key to a module are key to the current module.
Args:
module: module, the module object from which all key flags will be declared
as key flags to the current module.
flag_values: FlagValues, the FlagValues instance in which the flags will
be declared as key flags. This should almost never need to be
overridden.
Raises:
Error: Raised when given an argument that is a module name (a string),
instead of a module object. | Below is the the instruction that describes the task:
### Input:
Declares that all flags key to a module are key to the current module.
Args:
module: module, the module object from which all key flags will be declared
as key flags to the current module.
flag_values: FlagValues, the FlagValues instance in which the flags will
be declared as key flags. This should almost never need to be
overridden.
Raises:
Error: Raised when given an argument that is a module name (a string),
instead of a module object.
### Response:
def adopt_module_key_flags(module, flag_values=_flagvalues.FLAGS):
"""Declares that all flags key to a module are key to the current module.
Args:
module: module, the module object from which all key flags will be declared
as key flags to the current module.
flag_values: FlagValues, the FlagValues instance in which the flags will
be declared as key flags. This should almost never need to be
overridden.
Raises:
Error: Raised when given an argument that is a module name (a string),
instead of a module object.
"""
if not isinstance(module, types.ModuleType):
raise _exceptions.Error('Expected a module object, not %r.' % (module,))
_internal_declare_key_flags(
[f.name for f in flag_values.get_key_flags_for_module(module.__name__)],
flag_values=flag_values)
# If module is this flag module, take _helpers.SPECIAL_FLAGS into account.
if module == _helpers.FLAGS_MODULE:
_internal_declare_key_flags(
# As we associate flags with get_calling_module_object_and_name(), the
# special flags defined in this module are incorrectly registered with
# a different module. So, we can't use get_key_flags_for_module.
# Instead, we take all flags from _helpers.SPECIAL_FLAGS (a private
# FlagValues, where no other module should register flags).
[_helpers.SPECIAL_FLAGS[name].name for name in _helpers.SPECIAL_FLAGS],
flag_values=_helpers.SPECIAL_FLAGS,
key_flag_values=flag_values) |
def get_edge_mark(ttree):
""" makes a simple Graph Mark object"""
## tree style
if ttree._kwargs["tree_style"] in ["c", "cladogram"]:
a=ttree.edges
vcoordinates=ttree.verts
else:
a=ttree._lines
vcoordinates=ttree._coords
## fixed args
along='x'
vmarker='o'
vcolor=None
vlshow=False
vsize=0.
estyle=ttree._kwargs["edge_style"]
## get axes
layout = toyplot.layout.graph(a, vcoordinates=vcoordinates)
along = toyplot.require.value_in(along, ["x", "y"])
if along == "x":
coordinate_axes = ["x", "y"]
elif along == "y":
coordinate_axes = ["y", "x"]
## broadcast args along axes
vlabel = layout.vids
vmarker = toyplot.broadcast.pyobject(vmarker, layout.vcount)
vsize = toyplot.broadcast.scalar(vsize, layout.vcount)
estyle = toyplot.style.require(estyle, allowed=toyplot.style.allowed.line)
## fixed args
vcolor = toyplot.color.broadcast(colors=None, shape=layout.vcount, default=toyplot.color.black)
vopacity = toyplot.broadcast.scalar(1.0, layout.vcount)
vtitle = toyplot.broadcast.pyobject(None, layout.vcount)
vstyle = None
vlstyle = None
## this could be modified in the future to allow diff color edges
ecolor = toyplot.color.broadcast(colors=None, shape=layout.ecount, default=toyplot.color.black)
ewidth = toyplot.broadcast.scalar(1.0, layout.ecount)
eopacity = toyplot.broadcast.scalar(1.0, layout.ecount)
hmarker = toyplot.broadcast.pyobject(None, layout.ecount)
mmarker = toyplot.broadcast.pyobject(None, layout.ecount)
mposition = toyplot.broadcast.scalar(0.5, layout.ecount)
tmarker = toyplot.broadcast.pyobject(None, layout.ecount)
## tables are required if I don't want to edit the class
vtable = toyplot.data.Table()
vtable["id"] = layout.vids
for axis, coordinates in zip(coordinate_axes, layout.vcoordinates.T):
vtable[axis] = coordinates
#_mark_exportable(vtable, axis)
vtable["label"] = vlabel
vtable["marker"] = vmarker
vtable["size"] = vsize
vtable["color"] = vcolor
vtable["opacity"] = vopacity
vtable["title"] = vtitle
etable = toyplot.data.Table()
etable["source"] = layout.edges.T[0]
#_mark_exportable(etable, "source")
etable["target"] = layout.edges.T[1]
#_mark_exportable(etable, "target")
etable["shape"] = layout.eshapes
etable["color"] = ecolor
etable["width"] = ewidth
etable["opacity"] = eopacity
etable["hmarker"] = hmarker
etable["mmarker"] = mmarker
etable["mposition"] = mposition
etable["tmarker"] = tmarker
edge_mark = toyplot.mark.Graph(
coordinate_axes=['x', 'y'],
ecolor=["color"],
ecoordinates=layout.ecoordinates,
efilename=None,
eopacity=["opacity"],
eshape=["shape"],
esource=["source"],
estyle=estyle,
etable=etable,
etarget=["target"],
ewidth=["width"],
hmarker=["hmarker"],
mmarker=["mmarker"],
mposition=["mposition"],
tmarker=["tmarker"],
vcolor=["color"],
vcoordinates=['x', 'y'],
vfilename=None,
vid=["id"],
vlabel=["label"],
vlshow=False,
vlstyle=None,
vmarker=["marker"],
vopacity=["opacity"],
vsize=["size"],
vstyle=None,
vtable=vtable,
vtitle=["title"],
)
return edge_mark | makes a simple Graph Mark object | Below is the the instruction that describes the task:
### Input:
makes a simple Graph Mark object
### Response:
def get_edge_mark(ttree):
""" makes a simple Graph Mark object"""
## tree style
if ttree._kwargs["tree_style"] in ["c", "cladogram"]:
a=ttree.edges
vcoordinates=ttree.verts
else:
a=ttree._lines
vcoordinates=ttree._coords
## fixed args
along='x'
vmarker='o'
vcolor=None
vlshow=False
vsize=0.
estyle=ttree._kwargs["edge_style"]
## get axes
layout = toyplot.layout.graph(a, vcoordinates=vcoordinates)
along = toyplot.require.value_in(along, ["x", "y"])
if along == "x":
coordinate_axes = ["x", "y"]
elif along == "y":
coordinate_axes = ["y", "x"]
## broadcast args along axes
vlabel = layout.vids
vmarker = toyplot.broadcast.pyobject(vmarker, layout.vcount)
vsize = toyplot.broadcast.scalar(vsize, layout.vcount)
estyle = toyplot.style.require(estyle, allowed=toyplot.style.allowed.line)
## fixed args
vcolor = toyplot.color.broadcast(colors=None, shape=layout.vcount, default=toyplot.color.black)
vopacity = toyplot.broadcast.scalar(1.0, layout.vcount)
vtitle = toyplot.broadcast.pyobject(None, layout.vcount)
vstyle = None
vlstyle = None
## this could be modified in the future to allow diff color edges
ecolor = toyplot.color.broadcast(colors=None, shape=layout.ecount, default=toyplot.color.black)
ewidth = toyplot.broadcast.scalar(1.0, layout.ecount)
eopacity = toyplot.broadcast.scalar(1.0, layout.ecount)
hmarker = toyplot.broadcast.pyobject(None, layout.ecount)
mmarker = toyplot.broadcast.pyobject(None, layout.ecount)
mposition = toyplot.broadcast.scalar(0.5, layout.ecount)
tmarker = toyplot.broadcast.pyobject(None, layout.ecount)
## tables are required if I don't want to edit the class
vtable = toyplot.data.Table()
vtable["id"] = layout.vids
for axis, coordinates in zip(coordinate_axes, layout.vcoordinates.T):
vtable[axis] = coordinates
#_mark_exportable(vtable, axis)
vtable["label"] = vlabel
vtable["marker"] = vmarker
vtable["size"] = vsize
vtable["color"] = vcolor
vtable["opacity"] = vopacity
vtable["title"] = vtitle
etable = toyplot.data.Table()
etable["source"] = layout.edges.T[0]
#_mark_exportable(etable, "source")
etable["target"] = layout.edges.T[1]
#_mark_exportable(etable, "target")
etable["shape"] = layout.eshapes
etable["color"] = ecolor
etable["width"] = ewidth
etable["opacity"] = eopacity
etable["hmarker"] = hmarker
etable["mmarker"] = mmarker
etable["mposition"] = mposition
etable["tmarker"] = tmarker
edge_mark = toyplot.mark.Graph(
coordinate_axes=['x', 'y'],
ecolor=["color"],
ecoordinates=layout.ecoordinates,
efilename=None,
eopacity=["opacity"],
eshape=["shape"],
esource=["source"],
estyle=estyle,
etable=etable,
etarget=["target"],
ewidth=["width"],
hmarker=["hmarker"],
mmarker=["mmarker"],
mposition=["mposition"],
tmarker=["tmarker"],
vcolor=["color"],
vcoordinates=['x', 'y'],
vfilename=None,
vid=["id"],
vlabel=["label"],
vlshow=False,
vlstyle=None,
vmarker=["marker"],
vopacity=["opacity"],
vsize=["size"],
vstyle=None,
vtable=vtable,
vtitle=["title"],
)
return edge_mark |
def create_logger(name: str) -> Logger:
"""
Creates a logger with the given name.
:param name: name of the logger (gets prefixed with the package name)
:return: the created logger
"""
logger = logging.getLogger(f"{PACKAGE_NAME}.{name}")
logger.addHandler(StreamHandler())
return logger | Creates a logger with the given name.
:param name: name of the logger (gets prefixed with the package name)
:return: the created logger | Below is the the instruction that describes the task:
### Input:
Creates a logger with the given name.
:param name: name of the logger (gets prefixed with the package name)
:return: the created logger
### Response:
def create_logger(name: str) -> Logger:
"""
Creates a logger with the given name.
:param name: name of the logger (gets prefixed with the package name)
:return: the created logger
"""
logger = logging.getLogger(f"{PACKAGE_NAME}.{name}")
logger.addHandler(StreamHandler())
return logger |
def _prepare_servers(self):
"""
Prepare the variables that are exposed to the servers.
Most attributes in the server config are used directly. However, due
to variations in how cloud providers treat regions and availability
zones, this method allows either the ``availability_zone`` or the
``region_name`` to be used as the target availability zone for a
server. If both are specified, then ``availability_zone`` is used. If
``availability_zone`` is not specified in the server config, then the
``region_name`` value is used as the target availability zone.
"""
stack = {
A.NAME: self[A.NAME],
A.VERSION: self[A.VERSION],
}
for server in self.get(R.SERVERS, []):
# default cloud values
if A.PROVIDER in server:
if A.server.LAUNCH_TIMEOUT not in server:
server[A.server.LAUNCH_TIMEOUT] = DEFAULT_LAUNCH_TIMEOUT_S
if A.server.POST_DELAY not in server:
server[A.server.POST_DELAY] = DEFAULT_LAUNCH_TIMEOUT_S
if A.server.AZ not in server:
server[A.server.AZ] = server[A.server.REGION]
# distribute the config scope attributes
svars = {
A.STACK: stack,
A.SERVER_CLASS: server[A.NAME],
}
for scope in server.get(A.server.SCOPES, []):
# allow scopes to be defined inline
if isinstance(scope, collections.Mapping):
svars.update(scope)
else:
svars[scope] = self[scope]
# make all of the launch-time attributes (e.g. disk_image_id,
# launch_timeout_s, ssh_key_name, etc...) available as facts in
# case you need them in a playbook.
sattrs = server.copy()
sattrs.pop(A.server.SCOPES, None)
svars[A.server.BANG_ATTRS] = sattrs
server[A.server.VARS] = svars | Prepare the variables that are exposed to the servers.
Most attributes in the server config are used directly. However, due
to variations in how cloud providers treat regions and availability
zones, this method allows either the ``availability_zone`` or the
``region_name`` to be used as the target availability zone for a
server. If both are specified, then ``availability_zone`` is used. If
``availability_zone`` is not specified in the server config, then the
``region_name`` value is used as the target availability zone. | Below is the the instruction that describes the task:
### Input:
Prepare the variables that are exposed to the servers.
Most attributes in the server config are used directly. However, due
to variations in how cloud providers treat regions and availability
zones, this method allows either the ``availability_zone`` or the
``region_name`` to be used as the target availability zone for a
server. If both are specified, then ``availability_zone`` is used. If
``availability_zone`` is not specified in the server config, then the
``region_name`` value is used as the target availability zone.
### Response:
def _prepare_servers(self):
"""
Prepare the variables that are exposed to the servers.
Most attributes in the server config are used directly. However, due
to variations in how cloud providers treat regions and availability
zones, this method allows either the ``availability_zone`` or the
``region_name`` to be used as the target availability zone for a
server. If both are specified, then ``availability_zone`` is used. If
``availability_zone`` is not specified in the server config, then the
``region_name`` value is used as the target availability zone.
"""
stack = {
A.NAME: self[A.NAME],
A.VERSION: self[A.VERSION],
}
for server in self.get(R.SERVERS, []):
# default cloud values
if A.PROVIDER in server:
if A.server.LAUNCH_TIMEOUT not in server:
server[A.server.LAUNCH_TIMEOUT] = DEFAULT_LAUNCH_TIMEOUT_S
if A.server.POST_DELAY not in server:
server[A.server.POST_DELAY] = DEFAULT_LAUNCH_TIMEOUT_S
if A.server.AZ not in server:
server[A.server.AZ] = server[A.server.REGION]
# distribute the config scope attributes
svars = {
A.STACK: stack,
A.SERVER_CLASS: server[A.NAME],
}
for scope in server.get(A.server.SCOPES, []):
# allow scopes to be defined inline
if isinstance(scope, collections.Mapping):
svars.update(scope)
else:
svars[scope] = self[scope]
# make all of the launch-time attributes (e.g. disk_image_id,
# launch_timeout_s, ssh_key_name, etc...) available as facts in
# case you need them in a playbook.
sattrs = server.copy()
sattrs.pop(A.server.SCOPES, None)
svars[A.server.BANG_ATTRS] = sattrs
server[A.server.VARS] = svars |
def root_path():
"""Get the absolute path to the root of the demosys package"""
module_dir = os.path.dirname(globals()['__file__'])
return os.path.dirname(os.path.dirname(module_dir)) | Get the absolute path to the root of the demosys package | Below is the the instruction that describes the task:
### Input:
Get the absolute path to the root of the demosys package
### Response:
def root_path():
"""Get the absolute path to the root of the demosys package"""
module_dir = os.path.dirname(globals()['__file__'])
return os.path.dirname(os.path.dirname(module_dir)) |
def _set_system_max(self, v, load=False):
"""
Setter method for system_max, mapped from YANG variable /rbridge_id/system_max (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_max is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_max() directly.
YANG Description: Configure system-wide maximum values (reload required)'.
This support is obsoleted.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=system_max.system_max, is_container='container', presence=False, yang_name="system-max", rest_name="system-max", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure system-wide maximum values', u'hidden': u'full', u'callpoint': u'ArpConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-arp', defining_module='brocade-arp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_max must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=system_max.system_max, is_container='container', presence=False, yang_name="system-max", rest_name="system-max", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure system-wide maximum values', u'hidden': u'full', u'callpoint': u'ArpConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-arp', defining_module='brocade-arp', yang_type='container', is_config=True)""",
})
self.__system_max = t
if hasattr(self, '_set'):
self._set() | Setter method for system_max, mapped from YANG variable /rbridge_id/system_max (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_max is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_max() directly.
YANG Description: Configure system-wide maximum values (reload required)'.
This support is obsoleted. | Below is the the instruction that describes the task:
### Input:
Setter method for system_max, mapped from YANG variable /rbridge_id/system_max (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_max is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_max() directly.
YANG Description: Configure system-wide maximum values (reload required)'.
This support is obsoleted.
### Response:
def _set_system_max(self, v, load=False):
"""
Setter method for system_max, mapped from YANG variable /rbridge_id/system_max (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_max is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_max() directly.
YANG Description: Configure system-wide maximum values (reload required)'.
This support is obsoleted.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=system_max.system_max, is_container='container', presence=False, yang_name="system-max", rest_name="system-max", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure system-wide maximum values', u'hidden': u'full', u'callpoint': u'ArpConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-arp', defining_module='brocade-arp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_max must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=system_max.system_max, is_container='container', presence=False, yang_name="system-max", rest_name="system-max", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure system-wide maximum values', u'hidden': u'full', u'callpoint': u'ArpConfigCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-arp', defining_module='brocade-arp', yang_type='container', is_config=True)""",
})
self.__system_max = t
if hasattr(self, '_set'):
self._set() |
def bar(self,xdata,ydata,disp=True,**kwargs):
'''Displays a bar graph.
xdata: list of bar graph categories/bins. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example.
ydata: list of values associated with categories in xdata. If xdata includes a header, include a header list on ydata as well.
disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot.
**kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
'''
#combine data into proper format
data = combineData(xdata,ydata,self.xlabel)
#Include other options, supplied by **kwargs
other = ''
for option in kwargs:
other += option + ': ' + kwargs[option] + ',\n'
#input argument format to template is in dictionary format (see template for where variables are inserted)
argDict = { 'data':str(data),
'title':self.title,
'functionName':slugify(self.title),
'height':self.height,
'width':self.width,
'logScaleFlag':'false',
'ylabel':self.ylabel,
'plotType':'BarChart',
'numFig':self.numFig,
'other':other}
self.javascript = templateType(xdata) % argDict
if disp:
self.dispFile() | Displays a bar graph.
xdata: list of bar graph categories/bins. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example.
ydata: list of values associated with categories in xdata. If xdata includes a header, include a header list on ydata as well.
disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot.
**kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code. | Below is the the instruction that describes the task:
### Input:
Displays a bar graph.
xdata: list of bar graph categories/bins. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example.
ydata: list of values associated with categories in xdata. If xdata includes a header, include a header list on ydata as well.
disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot.
**kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
### Response:
def bar(self,xdata,ydata,disp=True,**kwargs):
'''Displays a bar graph.
xdata: list of bar graph categories/bins. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example.
ydata: list of values associated with categories in xdata. If xdata includes a header, include a header list on ydata as well.
disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot.
**kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
'''
#combine data into proper format
data = combineData(xdata,ydata,self.xlabel)
#Include other options, supplied by **kwargs
other = ''
for option in kwargs:
other += option + ': ' + kwargs[option] + ',\n'
#input argument format to template is in dictionary format (see template for where variables are inserted)
argDict = { 'data':str(data),
'title':self.title,
'functionName':slugify(self.title),
'height':self.height,
'width':self.width,
'logScaleFlag':'false',
'ylabel':self.ylabel,
'plotType':'BarChart',
'numFig':self.numFig,
'other':other}
self.javascript = templateType(xdata) % argDict
if disp:
self.dispFile() |
def create_space(deployment_name,
space_name,
security_policy='public',
events_retention_days=0,
metrics_retention_days=0,
token_manager=None,
app_url=defaults.APP_URL):
"""
create a space within the deployment specified and with the various
rentention values set
"""
deployment_id = get_deployment_id(deployment_name,
token_manager=token_manager,
app_url=app_url)
payload = {
'name': space_name,
'security_policy': security_policy,
'events_retention_days': events_retention_days,
'metrics_retention_days': metrics_retention_days,
}
headers = token_manager.get_access_token_headers()
deployment_url = environment.get_deployment_url(app_url=app_url)
response = requests.post('%s/api/v1/deployments/%s/spaces' %
(deployment_url, deployment_id),
data=json.dumps(payload),
headers=headers)
if response.status_code == 201:
return response.json()
else:
raise JutException('Error %s: %s' % (response.status_code, response.text)) | create a space within the deployment specified and with the various
rentention values set | Below is the the instruction that describes the task:
### Input:
create a space within the deployment specified and with the various
rentention values set
### Response:
def create_space(deployment_name,
space_name,
security_policy='public',
events_retention_days=0,
metrics_retention_days=0,
token_manager=None,
app_url=defaults.APP_URL):
"""
create a space within the deployment specified and with the various
rentention values set
"""
deployment_id = get_deployment_id(deployment_name,
token_manager=token_manager,
app_url=app_url)
payload = {
'name': space_name,
'security_policy': security_policy,
'events_retention_days': events_retention_days,
'metrics_retention_days': metrics_retention_days,
}
headers = token_manager.get_access_token_headers()
deployment_url = environment.get_deployment_url(app_url=app_url)
response = requests.post('%s/api/v1/deployments/%s/spaces' %
(deployment_url, deployment_id),
data=json.dumps(payload),
headers=headers)
if response.status_code == 201:
return response.json()
else:
raise JutException('Error %s: %s' % (response.status_code, response.text)) |
def assist(self, text_query):
"""Send a text request to the Assistant and playback the response.
"""
def iter_assist_requests():
config = embedded_assistant_pb2.AssistConfig(
audio_out_config=embedded_assistant_pb2.AudioOutConfig(
encoding='LINEAR16',
sample_rate_hertz=16000,
volume_percentage=0,
),
dialog_state_in=embedded_assistant_pb2.DialogStateIn(
language_code=self.language_code,
conversation_state=self.conversation_state,
is_new_conversation=self.is_new_conversation,
),
device_config=embedded_assistant_pb2.DeviceConfig(
device_id=self.device_id,
device_model_id=self.device_model_id,
),
text_query=text_query,
)
# Continue current conversation with later requests.
self.is_new_conversation = False
if self.display:
config.screen_out_config.screen_mode = PLAYING
req = embedded_assistant_pb2.AssistRequest(config=config)
assistant_helpers.log_assist_request_without_audio(req)
yield req
text_response = None
html_response = None
for resp in self.assistant.Assist(iter_assist_requests(),
self.deadline):
assistant_helpers.log_assist_response_without_audio(resp)
if resp.screen_out.data:
html_response = resp.screen_out.data
if resp.dialog_state_out.conversation_state:
conversation_state = resp.dialog_state_out.conversation_state
self.conversation_state = conversation_state
if resp.dialog_state_out.supplemental_display_text:
text_response = resp.dialog_state_out.supplemental_display_text
return text_response, html_response | Send a text request to the Assistant and playback the response. | Below is the the instruction that describes the task:
### Input:
Send a text request to the Assistant and playback the response.
### Response:
def assist(self, text_query):
"""Send a text request to the Assistant and playback the response.
"""
def iter_assist_requests():
config = embedded_assistant_pb2.AssistConfig(
audio_out_config=embedded_assistant_pb2.AudioOutConfig(
encoding='LINEAR16',
sample_rate_hertz=16000,
volume_percentage=0,
),
dialog_state_in=embedded_assistant_pb2.DialogStateIn(
language_code=self.language_code,
conversation_state=self.conversation_state,
is_new_conversation=self.is_new_conversation,
),
device_config=embedded_assistant_pb2.DeviceConfig(
device_id=self.device_id,
device_model_id=self.device_model_id,
),
text_query=text_query,
)
# Continue current conversation with later requests.
self.is_new_conversation = False
if self.display:
config.screen_out_config.screen_mode = PLAYING
req = embedded_assistant_pb2.AssistRequest(config=config)
assistant_helpers.log_assist_request_without_audio(req)
yield req
text_response = None
html_response = None
for resp in self.assistant.Assist(iter_assist_requests(),
self.deadline):
assistant_helpers.log_assist_response_without_audio(resp)
if resp.screen_out.data:
html_response = resp.screen_out.data
if resp.dialog_state_out.conversation_state:
conversation_state = resp.dialog_state_out.conversation_state
self.conversation_state = conversation_state
if resp.dialog_state_out.supplemental_display_text:
text_response = resp.dialog_state_out.supplemental_display_text
return text_response, html_response |
def add_tokens_for_group(self, with_pass=False):
"""Add the tokens for the group signature"""
kls = self.groups.super_kls
name = self.groups.kls_name
# Reset indentation to beginning and add signature
self.reset_indentation('')
self.result.extend(self.tokens.make_describe(kls, name))
# Add pass if necessary
if with_pass:
self.add_tokens_for_pass()
self.groups.finish_signature() | Add the tokens for the group signature | Below is the the instruction that describes the task:
### Input:
Add the tokens for the group signature
### Response:
def add_tokens_for_group(self, with_pass=False):
"""Add the tokens for the group signature"""
kls = self.groups.super_kls
name = self.groups.kls_name
# Reset indentation to beginning and add signature
self.reset_indentation('')
self.result.extend(self.tokens.make_describe(kls, name))
# Add pass if necessary
if with_pass:
self.add_tokens_for_pass()
self.groups.finish_signature() |
def infer_list(values: List[GenericAny]) -> Array:
"""Infer the :class:`~ibis.expr.datatypes.Array` type of `values`."""
if not values:
return Array(null)
return Array(highest_precedence(map(infer, values))) | Infer the :class:`~ibis.expr.datatypes.Array` type of `values`. | Below is the the instruction that describes the task:
### Input:
Infer the :class:`~ibis.expr.datatypes.Array` type of `values`.
### Response:
def infer_list(values: List[GenericAny]) -> Array:
"""Infer the :class:`~ibis.expr.datatypes.Array` type of `values`."""
if not values:
return Array(null)
return Array(highest_precedence(map(infer, values))) |
def make_delete_request(url, params, headers, connection):
"""
Helper function that makes an HTTP DELETE request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is NULL. However, if the status code is not 2x or 403,
an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/1',
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => NULL or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.delete(url, params=params, headers=headers, timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status() | Helper function that makes an HTTP DELETE request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is NULL. However, if the status code is not 2x or 403,
an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/1',
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => NULL or {'error': 'Permission denied.'} | Below is the the instruction that describes the task:
### Input:
Helper function that makes an HTTP DELETE request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is NULL. However, if the status code is not 2x or 403,
an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/1',
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => NULL or {'error': 'Permission denied.'}
### Response:
def make_delete_request(url, params, headers, connection):
"""
Helper function that makes an HTTP DELETE request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is NULL. However, if the status code is not 2x or 403,
an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/1',
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => NULL or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.delete(url, params=params, headers=headers, timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status() |
def tokenize(self, config):
"""
Break the config into a series of tokens
"""
tokens = []
reg_ex = re.compile(self.TOKENS[0], re.M | re.I)
for token in re.finditer(reg_ex, config):
value = token.group(0)
if token.group("operator"):
t_type = "operator"
elif token.group("literal"):
t_type = "literal"
elif token.group("newline"):
t_type = "newline"
elif token.group("function"):
t_type = "function"
elif token.group("unknown"):
t_type = "unknown"
else:
continue
tokens.append(
{"type": t_type, "value": value, "match": token, "start": token.start()}
)
self.tokens = tokens | Break the config into a series of tokens | Below is the the instruction that describes the task:
### Input:
Break the config into a series of tokens
### Response:
def tokenize(self, config):
"""
Break the config into a series of tokens
"""
tokens = []
reg_ex = re.compile(self.TOKENS[0], re.M | re.I)
for token in re.finditer(reg_ex, config):
value = token.group(0)
if token.group("operator"):
t_type = "operator"
elif token.group("literal"):
t_type = "literal"
elif token.group("newline"):
t_type = "newline"
elif token.group("function"):
t_type = "function"
elif token.group("unknown"):
t_type = "unknown"
else:
continue
tokens.append(
{"type": t_type, "value": value, "match": token, "start": token.start()}
)
self.tokens = tokens |
def _query(
self,
sql,
url,
fmt,
log
):
"""* query*
"""
self.log.info('starting the ``_query`` method')
try:
response = requests.get(
url=url,
params={
"cmd": self._filtercomment(sql),
"format": fmt,
},
headers={
"Cookie": "ASP.NET_SessionId=d0fiwrodvk4rdf21gh3jzr3t; SERVERID=dsa003",
},
)
# print('Response HTTP Status Code: {status_code}'.format(
# status_code=response.status_code))
# print('Response HTTP Response Body: {content}'.format(
# content=response.content))
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_query`` method')
return response.content | * query* | Below is the the instruction that describes the task:
### Input:
* query*
### Response:
def _query(
self,
sql,
url,
fmt,
log
):
"""* query*
"""
self.log.info('starting the ``_query`` method')
try:
response = requests.get(
url=url,
params={
"cmd": self._filtercomment(sql),
"format": fmt,
},
headers={
"Cookie": "ASP.NET_SessionId=d0fiwrodvk4rdf21gh3jzr3t; SERVERID=dsa003",
},
)
# print('Response HTTP Status Code: {status_code}'.format(
# status_code=response.status_code))
# print('Response HTTP Response Body: {content}'.format(
# content=response.content))
except requests.exceptions.RequestException:
print('HTTP Request failed')
self.log.info('completed the ``_query`` method')
return response.content |
def match(cls, field, query, operator=None):
'''
A family of match queries that accept text/numerics/dates, analyzes it, and constructs a query out of it. For example:
{
"match" : {
"message" : "this is a test"
}
}
Note, message is the name of a field, you can subsitute the name of any field (including _all) instead.
'''
instance = cls(match={field: {'query': query}})
if operator is not None:
instance['match'][field]['operator'] = operator
return instance | A family of match queries that accept text/numerics/dates, analyzes it, and constructs a query out of it. For example:
{
"match" : {
"message" : "this is a test"
}
}
Note, message is the name of a field, you can subsitute the name of any field (including _all) instead. | Below is the the instruction that describes the task:
### Input:
A family of match queries that accept text/numerics/dates, analyzes it, and constructs a query out of it. For example:
{
"match" : {
"message" : "this is a test"
}
}
Note, message is the name of a field, you can subsitute the name of any field (including _all) instead.
### Response:
def match(cls, field, query, operator=None):
'''
A family of match queries that accept text/numerics/dates, analyzes it, and constructs a query out of it. For example:
{
"match" : {
"message" : "this is a test"
}
}
Note, message is the name of a field, you can subsitute the name of any field (including _all) instead.
'''
instance = cls(match={field: {'query': query}})
if operator is not None:
instance['match'][field]['operator'] = operator
return instance |
def permission_required(*actions, obj=None, raise_exception=False):
"""Permission checking decorator -- works like the
``permission_required`` decorator in the default Django
authentication system, except that it takes a sequence of actions
to check, an object must be supplied, and the user must have
permission to perform all of the actions on the given object for
the permissions test to pass. *Not actually sure how useful this
is going to be: in any case where obj is not None, it's going to
be tricky to get the object into the decorator. Class-based views
are definitely best here...*
"""
def checker(user):
ok = False
if user.is_authenticated() and check_perms(user, actions, [obj]):
ok = True
if raise_exception and not ok:
raise PermissionDenied
else:
return ok
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if checker(request.user):
return view_func(request, *args, **kwargs)
return _wrapped_view
return decorator | Permission checking decorator -- works like the
``permission_required`` decorator in the default Django
authentication system, except that it takes a sequence of actions
to check, an object must be supplied, and the user must have
permission to perform all of the actions on the given object for
the permissions test to pass. *Not actually sure how useful this
is going to be: in any case where obj is not None, it's going to
be tricky to get the object into the decorator. Class-based views
are definitely best here...* | Below is the the instruction that describes the task:
### Input:
Permission checking decorator -- works like the
``permission_required`` decorator in the default Django
authentication system, except that it takes a sequence of actions
to check, an object must be supplied, and the user must have
permission to perform all of the actions on the given object for
the permissions test to pass. *Not actually sure how useful this
is going to be: in any case where obj is not None, it's going to
be tricky to get the object into the decorator. Class-based views
are definitely best here...*
### Response:
def permission_required(*actions, obj=None, raise_exception=False):
"""Permission checking decorator -- works like the
``permission_required`` decorator in the default Django
authentication system, except that it takes a sequence of actions
to check, an object must be supplied, and the user must have
permission to perform all of the actions on the given object for
the permissions test to pass. *Not actually sure how useful this
is going to be: in any case where obj is not None, it's going to
be tricky to get the object into the decorator. Class-based views
are definitely best here...*
"""
def checker(user):
ok = False
if user.is_authenticated() and check_perms(user, actions, [obj]):
ok = True
if raise_exception and not ok:
raise PermissionDenied
else:
return ok
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if checker(request.user):
return view_func(request, *args, **kwargs)
return _wrapped_view
return decorator |
def merge_lists(src, new):
"""Update a value list with a list of new or updated values."""
l_min, l_max = (src, new) if len(src) < len(new) else (new, src)
l_min.extend(None for i in range(len(l_min), len(l_max)))
for i, val in enumerate(new):
if isinstance(val, dict) and isinstance(src[i], dict):
new[i] = merge_dicts(src[i], val)
elif isinstance(val, list) and isinstance(src[i], list):
new[i] = merge_lists(src[i], val)
elif val is not None:
new[i] = val
else:
new[i] = src[i]
return new | Update a value list with a list of new or updated values. | Below is the the instruction that describes the task:
### Input:
Update a value list with a list of new or updated values.
### Response:
def merge_lists(src, new):
"""Update a value list with a list of new or updated values."""
l_min, l_max = (src, new) if len(src) < len(new) else (new, src)
l_min.extend(None for i in range(len(l_min), len(l_max)))
for i, val in enumerate(new):
if isinstance(val, dict) and isinstance(src[i], dict):
new[i] = merge_dicts(src[i], val)
elif isinstance(val, list) and isinstance(src[i], list):
new[i] = merge_lists(src[i], val)
elif val is not None:
new[i] = val
else:
new[i] = src[i]
return new |
def DateStringToDateObject(date_string):
"""Return a date object for a string "YYYYMMDD"."""
# If this becomes a bottleneck date objects could be cached
if re.match('^\d{8}$', date_string) == None:
return None
try:
return datetime.date(int(date_string[0:4]), int(date_string[4:6]),
int(date_string[6:8]))
except ValueError:
return None | Return a date object for a string "YYYYMMDD". | Below is the the instruction that describes the task:
### Input:
Return a date object for a string "YYYYMMDD".
### Response:
def DateStringToDateObject(date_string):
"""Return a date object for a string "YYYYMMDD"."""
# If this becomes a bottleneck date objects could be cached
if re.match('^\d{8}$', date_string) == None:
return None
try:
return datetime.date(int(date_string[0:4]), int(date_string[4:6]),
int(date_string[6:8]))
except ValueError:
return None |
def inflate_plugin_list(plugin_list, inflate_plugin):
"""
Inflate a list of strings/dictionaries to a list of plugin instances.
Args:
plugin_list (list): a list of str/dict.
inflate_plugin (method): the method to inflate the plugin.
Returns:
list: a plugin instances list.
Raises:
ValueError: when a dictionary item contains more than one key.
"""
plugins = []
for plugin_def in plugin_list:
if isinstance(plugin_def, str):
try:
plugins.append(inflate_plugin(plugin_def))
except PluginNotFoundError as e:
logger.error('Could not import plugin identified by %s. '
'Exception: %s.', plugin_def, e)
elif isinstance(plugin_def, dict):
if len(plugin_def) > 1:
raise ValueError(
'When using a plugin list, each dictionary item '
'must contain only one key.')
identifier = list(plugin_def.keys())[0]
definition = plugin_def[identifier]
try:
plugins.append(inflate_plugin(identifier, definition))
except PluginNotFoundError as e:
logger.error('Could not import plugin identified by %s. '
'Inflate method: %s. Exception: %s.',
identifier, inflate_plugin, e)
return plugins | Inflate a list of strings/dictionaries to a list of plugin instances.
Args:
plugin_list (list): a list of str/dict.
inflate_plugin (method): the method to inflate the plugin.
Returns:
list: a plugin instances list.
Raises:
ValueError: when a dictionary item contains more than one key. | Below is the the instruction that describes the task:
### Input:
Inflate a list of strings/dictionaries to a list of plugin instances.
Args:
plugin_list (list): a list of str/dict.
inflate_plugin (method): the method to inflate the plugin.
Returns:
list: a plugin instances list.
Raises:
ValueError: when a dictionary item contains more than one key.
### Response:
def inflate_plugin_list(plugin_list, inflate_plugin):
"""
Inflate a list of strings/dictionaries to a list of plugin instances.
Args:
plugin_list (list): a list of str/dict.
inflate_plugin (method): the method to inflate the plugin.
Returns:
list: a plugin instances list.
Raises:
ValueError: when a dictionary item contains more than one key.
"""
plugins = []
for plugin_def in plugin_list:
if isinstance(plugin_def, str):
try:
plugins.append(inflate_plugin(plugin_def))
except PluginNotFoundError as e:
logger.error('Could not import plugin identified by %s. '
'Exception: %s.', plugin_def, e)
elif isinstance(plugin_def, dict):
if len(plugin_def) > 1:
raise ValueError(
'When using a plugin list, each dictionary item '
'must contain only one key.')
identifier = list(plugin_def.keys())[0]
definition = plugin_def[identifier]
try:
plugins.append(inflate_plugin(identifier, definition))
except PluginNotFoundError as e:
logger.error('Could not import plugin identified by %s. '
'Inflate method: %s. Exception: %s.',
identifier, inflate_plugin, e)
return plugins |
def blockChildrenSignals(self, block):
""" If block equals True, the signals of the combo boxes and spin boxes are blocked
Returns the old blocking state.
"""
logger.debug("Blocking collector signals")
for spinBox in self._spinBoxes:
spinBox.blockSignals(block)
for comboBox in self._comboBoxes:
comboBox.blockSignals(block)
result = self._signalsBlocked
self._signalsBlocked = block
return result | If block equals True, the signals of the combo boxes and spin boxes are blocked
Returns the old blocking state. | Below is the the instruction that describes the task:
### Input:
If block equals True, the signals of the combo boxes and spin boxes are blocked
Returns the old blocking state.
### Response:
def blockChildrenSignals(self, block):
""" If block equals True, the signals of the combo boxes and spin boxes are blocked
Returns the old blocking state.
"""
logger.debug("Blocking collector signals")
for spinBox in self._spinBoxes:
spinBox.blockSignals(block)
for comboBox in self._comboBoxes:
comboBox.blockSignals(block)
result = self._signalsBlocked
self._signalsBlocked = block
return result |
def get_chempot_correction(element, temp, pres):
"""
Get the normalized correction term Δμ for chemical potential of a gas
phase consisting of element at given temperature and pressure,
referenced to that in the standard state (T_std = 298.15 K,
T_std = 1 bar). The gas phase is limited to be one of O2, N2, Cl2,
F2, H2. Calculation formula can be found in the documentation of
Materials Project website.
Args:
element (string): The string representing the element.
temp (float): The temperature of the gas phase.
pres (float): The pressure of the gas phase.
Returns:
The correction of chemical potential in eV/atom of the gas
phase at given temperature and pressure.
"""
if element not in ["O", "N", "Cl", "F", "H"]:
return 0
std_temp = 298.15
std_pres = 1E5
ideal_gas_const = 8.3144598
# Cp and S at standard state in J/(K.mol). Data from
# https://janaf.nist.gov/tables/O-029.html
# https://janaf.nist.gov/tables/N-023.html
# https://janaf.nist.gov/tables/Cl-073.html
# https://janaf.nist.gov/tables/F-054.html
# https://janaf.nist.gov/tables/H-050.html
Cp_dict = {"O": 29.376,
"N": 29.124,
"Cl": 33.949,
"F": 31.302,
"H": 28.836}
S_dict = {"O": 205.147,
"N": 191.609,
"Cl": 223.079,
"F": 202.789,
"H": 130.680}
Cp_std = Cp_dict[element]
S_std = S_dict[element]
PV_correction = ideal_gas_const * temp * np.log(pres / std_pres)
TS_correction = - Cp_std * (temp * np.log(temp)
- std_temp * np.log(std_temp)) \
+ Cp_std * (temp - std_temp) \
* (1 + np.log(std_temp)) \
- S_std * (temp - std_temp)
dG = PV_correction + TS_correction
# Convert to eV/molecule unit.
dG /= 1000 * InterfacialReactivity.EV_TO_KJ_PER_MOL
# Normalize by number of atoms in the gas molecule. For elements
# considered, the gas molecules are all diatomic.
dG /= 2
return dG | Get the normalized correction term Δμ for chemical potential of a gas
phase consisting of element at given temperature and pressure,
referenced to that in the standard state (T_std = 298.15 K,
T_std = 1 bar). The gas phase is limited to be one of O2, N2, Cl2,
F2, H2. Calculation formula can be found in the documentation of
Materials Project website.
Args:
element (string): The string representing the element.
temp (float): The temperature of the gas phase.
pres (float): The pressure of the gas phase.
Returns:
The correction of chemical potential in eV/atom of the gas
phase at given temperature and pressure. | Below is the the instruction that describes the task:
### Input:
Get the normalized correction term Δμ for chemical potential of a gas
phase consisting of element at given temperature and pressure,
referenced to that in the standard state (T_std = 298.15 K,
T_std = 1 bar). The gas phase is limited to be one of O2, N2, Cl2,
F2, H2. Calculation formula can be found in the documentation of
Materials Project website.
Args:
element (string): The string representing the element.
temp (float): The temperature of the gas phase.
pres (float): The pressure of the gas phase.
Returns:
The correction of chemical potential in eV/atom of the gas
phase at given temperature and pressure.
### Response:
def get_chempot_correction(element, temp, pres):
"""
Get the normalized correction term Δμ for chemical potential of a gas
phase consisting of element at given temperature and pressure,
referenced to that in the standard state (T_std = 298.15 K,
T_std = 1 bar). The gas phase is limited to be one of O2, N2, Cl2,
F2, H2. Calculation formula can be found in the documentation of
Materials Project website.
Args:
element (string): The string representing the element.
temp (float): The temperature of the gas phase.
pres (float): The pressure of the gas phase.
Returns:
The correction of chemical potential in eV/atom of the gas
phase at given temperature and pressure.
"""
if element not in ["O", "N", "Cl", "F", "H"]:
return 0
std_temp = 298.15
std_pres = 1E5
ideal_gas_const = 8.3144598
# Cp and S at standard state in J/(K.mol). Data from
# https://janaf.nist.gov/tables/O-029.html
# https://janaf.nist.gov/tables/N-023.html
# https://janaf.nist.gov/tables/Cl-073.html
# https://janaf.nist.gov/tables/F-054.html
# https://janaf.nist.gov/tables/H-050.html
Cp_dict = {"O": 29.376,
"N": 29.124,
"Cl": 33.949,
"F": 31.302,
"H": 28.836}
S_dict = {"O": 205.147,
"N": 191.609,
"Cl": 223.079,
"F": 202.789,
"H": 130.680}
Cp_std = Cp_dict[element]
S_std = S_dict[element]
PV_correction = ideal_gas_const * temp * np.log(pres / std_pres)
TS_correction = - Cp_std * (temp * np.log(temp)
- std_temp * np.log(std_temp)) \
+ Cp_std * (temp - std_temp) \
* (1 + np.log(std_temp)) \
- S_std * (temp - std_temp)
dG = PV_correction + TS_correction
# Convert to eV/molecule unit.
dG /= 1000 * InterfacialReactivity.EV_TO_KJ_PER_MOL
# Normalize by number of atoms in the gas molecule. For elements
# considered, the gas molecules are all diatomic.
dG /= 2
return dG |
def get_gsim_lt(oqparam, trts=['*']):
"""
:param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:param trts:
a sequence of tectonic region types as strings; trts=['*']
means that there is no filtering
:returns:
a GsimLogicTree instance obtained by filtering on the provided
tectonic region types.
"""
if 'gsim_logic_tree' not in oqparam.inputs:
return logictree.GsimLogicTree.from_(oqparam.gsim)
gsim_file = os.path.join(
oqparam.base_path, oqparam.inputs['gsim_logic_tree'])
gsim_lt = logictree.GsimLogicTree(gsim_file, trts)
gmfcorr = oqparam.correl_model
for trt, gsims in gsim_lt.values.items():
for gsim in gsims:
if gmfcorr and (gsim.DEFINED_FOR_STANDARD_DEVIATION_TYPES ==
{StdDev.TOTAL}):
raise CorrelationButNoInterIntraStdDevs(gmfcorr, gsim)
trts = set(oqparam.minimum_magnitude) - {'default'}
expected_trts = set(gsim_lt.values)
assert trts <= expected_trts, (trts, expected_trts)
imt_dep_w = any(len(branch.weight.dic) > 1 for branch in gsim_lt.branches)
if oqparam.number_of_logic_tree_samples and imt_dep_w:
raise NotImplementedError('IMT-dependent weights in the logic tree '
'do not work with sampling!')
return gsim_lt | :param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:param trts:
a sequence of tectonic region types as strings; trts=['*']
means that there is no filtering
:returns:
a GsimLogicTree instance obtained by filtering on the provided
tectonic region types. | Below is the the instruction that describes the task:
### Input:
:param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:param trts:
a sequence of tectonic region types as strings; trts=['*']
means that there is no filtering
:returns:
a GsimLogicTree instance obtained by filtering on the provided
tectonic region types.
### Response:
def get_gsim_lt(oqparam, trts=['*']):
"""
:param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:param trts:
a sequence of tectonic region types as strings; trts=['*']
means that there is no filtering
:returns:
a GsimLogicTree instance obtained by filtering on the provided
tectonic region types.
"""
if 'gsim_logic_tree' not in oqparam.inputs:
return logictree.GsimLogicTree.from_(oqparam.gsim)
gsim_file = os.path.join(
oqparam.base_path, oqparam.inputs['gsim_logic_tree'])
gsim_lt = logictree.GsimLogicTree(gsim_file, trts)
gmfcorr = oqparam.correl_model
for trt, gsims in gsim_lt.values.items():
for gsim in gsims:
if gmfcorr and (gsim.DEFINED_FOR_STANDARD_DEVIATION_TYPES ==
{StdDev.TOTAL}):
raise CorrelationButNoInterIntraStdDevs(gmfcorr, gsim)
trts = set(oqparam.minimum_magnitude) - {'default'}
expected_trts = set(gsim_lt.values)
assert trts <= expected_trts, (trts, expected_trts)
imt_dep_w = any(len(branch.weight.dic) > 1 for branch in gsim_lt.branches)
if oqparam.number_of_logic_tree_samples and imt_dep_w:
raise NotImplementedError('IMT-dependent weights in the logic tree '
'do not work with sampling!')
return gsim_lt |
def hierarchy_flatten(annotation):
'''Flatten a multi_segment annotation into mir_eval style.
Parameters
----------
annotation : jams.Annotation
An annotation in the `multi_segment` namespace
Returns
-------
hier_intervalss : list
A list of lists of intervals, ordered by increasing specificity.
hier_labels : list
A list of lists of labels, ordered by increasing specificity.
'''
intervals, values = annotation.to_interval_values()
ordering = dict()
for interval, value in zip(intervals, values):
level = value['level']
if level not in ordering:
ordering[level] = dict(intervals=list(), labels=list())
ordering[level]['intervals'].append(interval)
ordering[level]['labels'].append(value['label'])
levels = sorted(list(ordering.keys()))
hier_intervals = [ordering[level]['intervals'] for level in levels]
hier_labels = [ordering[level]['labels'] for level in levels]
return hier_intervals, hier_labels | Flatten a multi_segment annotation into mir_eval style.
Parameters
----------
annotation : jams.Annotation
An annotation in the `multi_segment` namespace
Returns
-------
hier_intervalss : list
A list of lists of intervals, ordered by increasing specificity.
hier_labels : list
A list of lists of labels, ordered by increasing specificity. | Below is the the instruction that describes the task:
### Input:
Flatten a multi_segment annotation into mir_eval style.
Parameters
----------
annotation : jams.Annotation
An annotation in the `multi_segment` namespace
Returns
-------
hier_intervalss : list
A list of lists of intervals, ordered by increasing specificity.
hier_labels : list
A list of lists of labels, ordered by increasing specificity.
### Response:
def hierarchy_flatten(annotation):
'''Flatten a multi_segment annotation into mir_eval style.
Parameters
----------
annotation : jams.Annotation
An annotation in the `multi_segment` namespace
Returns
-------
hier_intervalss : list
A list of lists of intervals, ordered by increasing specificity.
hier_labels : list
A list of lists of labels, ordered by increasing specificity.
'''
intervals, values = annotation.to_interval_values()
ordering = dict()
for interval, value in zip(intervals, values):
level = value['level']
if level not in ordering:
ordering[level] = dict(intervals=list(), labels=list())
ordering[level]['intervals'].append(interval)
ordering[level]['labels'].append(value['label'])
levels = sorted(list(ordering.keys()))
hier_intervals = [ordering[level]['intervals'] for level in levels]
hier_labels = [ordering[level]['labels'] for level in levels]
return hier_intervals, hier_labels |
def get_methods(*objs):
""" Return the names of all callable attributes of an object"""
return set(
attr
for obj in objs
for attr in dir(obj)
if not attr.startswith('_') and callable(getattr(obj, attr))
) | Return the names of all callable attributes of an object | Below is the the instruction that describes the task:
### Input:
Return the names of all callable attributes of an object
### Response:
def get_methods(*objs):
""" Return the names of all callable attributes of an object"""
return set(
attr
for obj in objs
for attr in dir(obj)
if not attr.startswith('_') and callable(getattr(obj, attr))
) |
def monitor(self, field, callback, poll_interval=None):
""" Monitor `field` for change
Will monitor ``field`` for change and execute ``callback`` when
change is detected.
Example usage::
def handle(resource, field, previous, current):
print "Change from {} to {}".format(previous, current)
switch = TapSwitch.objects.get(id=3)
# Note that we monitor the entire state of the Hue Tap
# switch rather than a specific field
switch.monitor(lambda sw: sw.state.as_dict(), handle, poll_interval=0.2)
# Execution will stop here and the API client will begin polling for changes
hue_api.start_monitor_loop()
Args:
field (string): The name of the field to be monitored. This may also
be a callable which will be called with the resource
instance as its single argument and must return a
value which can be compared to previous values.
callback (callable): The callable to be called when a change is
detected. It will be called with parameters
as follows:
* resource instance
* field name,
* previous value
* current value.
poll_interval (float): Interval between polling in seconds.
Defaults to the API's `poll_interval` value (which defaults
to 0.1 second.
Returns:
Monitor:
"""
poll_interval = poll_interval or self.api.poll_interval
monitor = self.monitor_class(
resource=self,
field=field,
callback=callback,
poll_interval=poll_interval,
event_queue=self.api.event_queue,
poll_pool=self.api.poll_pool,
)
monitor.start()
return monitor | Monitor `field` for change
Will monitor ``field`` for change and execute ``callback`` when
change is detected.
Example usage::
def handle(resource, field, previous, current):
print "Change from {} to {}".format(previous, current)
switch = TapSwitch.objects.get(id=3)
# Note that we monitor the entire state of the Hue Tap
# switch rather than a specific field
switch.monitor(lambda sw: sw.state.as_dict(), handle, poll_interval=0.2)
# Execution will stop here and the API client will begin polling for changes
hue_api.start_monitor_loop()
Args:
field (string): The name of the field to be monitored. This may also
be a callable which will be called with the resource
instance as its single argument and must return a
value which can be compared to previous values.
callback (callable): The callable to be called when a change is
detected. It will be called with parameters
as follows:
* resource instance
* field name,
* previous value
* current value.
poll_interval (float): Interval between polling in seconds.
Defaults to the API's `poll_interval` value (which defaults
to 0.1 second.
Returns:
Monitor: | Below is the the instruction that describes the task:
### Input:
Monitor `field` for change
Will monitor ``field`` for change and execute ``callback`` when
change is detected.
Example usage::
def handle(resource, field, previous, current):
print "Change from {} to {}".format(previous, current)
switch = TapSwitch.objects.get(id=3)
# Note that we monitor the entire state of the Hue Tap
# switch rather than a specific field
switch.monitor(lambda sw: sw.state.as_dict(), handle, poll_interval=0.2)
# Execution will stop here and the API client will begin polling for changes
hue_api.start_monitor_loop()
Args:
field (string): The name of the field to be monitored. This may also
be a callable which will be called with the resource
instance as its single argument and must return a
value which can be compared to previous values.
callback (callable): The callable to be called when a change is
detected. It will be called with parameters
as follows:
* resource instance
* field name,
* previous value
* current value.
poll_interval (float): Interval between polling in seconds.
Defaults to the API's `poll_interval` value (which defaults
to 0.1 second.
Returns:
Monitor:
### Response:
def monitor(self, field, callback, poll_interval=None):
""" Monitor `field` for change
Will monitor ``field`` for change and execute ``callback`` when
change is detected.
Example usage::
def handle(resource, field, previous, current):
print "Change from {} to {}".format(previous, current)
switch = TapSwitch.objects.get(id=3)
# Note that we monitor the entire state of the Hue Tap
# switch rather than a specific field
switch.monitor(lambda sw: sw.state.as_dict(), handle, poll_interval=0.2)
# Execution will stop here and the API client will begin polling for changes
hue_api.start_monitor_loop()
Args:
field (string): The name of the field to be monitored. This may also
be a callable which will be called with the resource
instance as its single argument and must return a
value which can be compared to previous values.
callback (callable): The callable to be called when a change is
detected. It will be called with parameters
as follows:
* resource instance
* field name,
* previous value
* current value.
poll_interval (float): Interval between polling in seconds.
Defaults to the API's `poll_interval` value (which defaults
to 0.1 second.
Returns:
Monitor:
"""
poll_interval = poll_interval or self.api.poll_interval
monitor = self.monitor_class(
resource=self,
field=field,
callback=callback,
poll_interval=poll_interval,
event_queue=self.api.event_queue,
poll_pool=self.api.poll_pool,
)
monitor.start()
return monitor |
def eigb(A, y0, eps, rmax=150, nswp=20, max_full_size=1000, verb=1):
""" Approximate computation of minimal eigenvalues in tensor train format
This function uses alternating least-squares algorithm for the computation of several
minimal eigenvalues. If you want maximal eigenvalues, just send -A to the function.
:Reference:
S. V. Dolgov, B. N. Khoromskij, I. V. Oseledets, and D. V. Savostyanov.
Computation of extreme eigenvalues in higher dimensions using block tensor train format. Computer Phys. Comm.,
185(4):1207-1216, 2014. http://dx.doi.org/10.1016/j.cpc.2013.12.017
:param A: Matrix in the TT-format
:type A: matrix
:param y0: Initial guess in the block TT-format, r(d+1) is the number of eigenvalues sought
:type y0: tensor
:param eps: Accuracy required
:type eps: float
:param rmax: Maximal rank
:type rmax: int
:param kickrank: Addition rank, the larger the more robus the method,
:type kickrank: int
:rtype: A tuple (ev, tensor), where ev is a list of eigenvalues, tensor is an approximation to eigenvectors.
:Example:
>>> import tt
>>> import tt.eigb
>>> d = 8; f = 3
>>> r = [8] * (d * f + 1); r[d * f] = 8; r[0] = 1
>>> x = tt.rand(n, d * f, r)
>>> a = tt.qlaplace_dd([8, 8, 8])
>>> sol, ev = tt.eigb.eigb(a, x, 1e-6, verb=0)
Solving a block eigenvalue problem
Looking for 8 eigenvalues with accuracy 1E-06
swp: 1 er = 35.93 rmax:19
swp: 2 er = 4.51015E-04 rmax:18
swp: 3 er = 1.87584E-12 rmax:17
Total number of matvecs: 0
>>> print ev
[ 0.00044828 0.00089654 0.00089654 0.00089654 0.0013448 0.0013448
0.0013448 0.00164356]
"""
ry = y0.r.copy()
lam = tt_eigb.tt_block_eig.tt_eigb(y0.d, A.n, A.m, A.tt.r, A.tt.core, y0.core, ry, eps,
rmax, ry[y0.d], 0, nswp, max_full_size, verb)
y = tensor()
y.d = y0.d
y.n = A.n.copy()
y.r = ry
y.core = tt_eigb.tt_block_eig.result_core.copy()
tt_eigb.tt_block_eig.deallocate_result()
y.get_ps()
return y, lam | Approximate computation of minimal eigenvalues in tensor train format
This function uses alternating least-squares algorithm for the computation of several
minimal eigenvalues. If you want maximal eigenvalues, just send -A to the function.
:Reference:
S. V. Dolgov, B. N. Khoromskij, I. V. Oseledets, and D. V. Savostyanov.
Computation of extreme eigenvalues in higher dimensions using block tensor train format. Computer Phys. Comm.,
185(4):1207-1216, 2014. http://dx.doi.org/10.1016/j.cpc.2013.12.017
:param A: Matrix in the TT-format
:type A: matrix
:param y0: Initial guess in the block TT-format, r(d+1) is the number of eigenvalues sought
:type y0: tensor
:param eps: Accuracy required
:type eps: float
:param rmax: Maximal rank
:type rmax: int
:param kickrank: Addition rank, the larger the more robus the method,
:type kickrank: int
:rtype: A tuple (ev, tensor), where ev is a list of eigenvalues, tensor is an approximation to eigenvectors.
:Example:
>>> import tt
>>> import tt.eigb
>>> d = 8; f = 3
>>> r = [8] * (d * f + 1); r[d * f] = 8; r[0] = 1
>>> x = tt.rand(n, d * f, r)
>>> a = tt.qlaplace_dd([8, 8, 8])
>>> sol, ev = tt.eigb.eigb(a, x, 1e-6, verb=0)
Solving a block eigenvalue problem
Looking for 8 eigenvalues with accuracy 1E-06
swp: 1 er = 35.93 rmax:19
swp: 2 er = 4.51015E-04 rmax:18
swp: 3 er = 1.87584E-12 rmax:17
Total number of matvecs: 0
>>> print ev
[ 0.00044828 0.00089654 0.00089654 0.00089654 0.0013448 0.0013448
0.0013448 0.00164356] | Below is the the instruction that describes the task:
### Input:
Approximate computation of minimal eigenvalues in tensor train format
This function uses alternating least-squares algorithm for the computation of several
minimal eigenvalues. If you want maximal eigenvalues, just send -A to the function.
:Reference:
S. V. Dolgov, B. N. Khoromskij, I. V. Oseledets, and D. V. Savostyanov.
Computation of extreme eigenvalues in higher dimensions using block tensor train format. Computer Phys. Comm.,
185(4):1207-1216, 2014. http://dx.doi.org/10.1016/j.cpc.2013.12.017
:param A: Matrix in the TT-format
:type A: matrix
:param y0: Initial guess in the block TT-format, r(d+1) is the number of eigenvalues sought
:type y0: tensor
:param eps: Accuracy required
:type eps: float
:param rmax: Maximal rank
:type rmax: int
:param kickrank: Addition rank, the larger the more robus the method,
:type kickrank: int
:rtype: A tuple (ev, tensor), where ev is a list of eigenvalues, tensor is an approximation to eigenvectors.
:Example:
>>> import tt
>>> import tt.eigb
>>> d = 8; f = 3
>>> r = [8] * (d * f + 1); r[d * f] = 8; r[0] = 1
>>> x = tt.rand(n, d * f, r)
>>> a = tt.qlaplace_dd([8, 8, 8])
>>> sol, ev = tt.eigb.eigb(a, x, 1e-6, verb=0)
Solving a block eigenvalue problem
Looking for 8 eigenvalues with accuracy 1E-06
swp: 1 er = 35.93 rmax:19
swp: 2 er = 4.51015E-04 rmax:18
swp: 3 er = 1.87584E-12 rmax:17
Total number of matvecs: 0
>>> print ev
[ 0.00044828 0.00089654 0.00089654 0.00089654 0.0013448 0.0013448
0.0013448 0.00164356]
### Response:
def eigb(A, y0, eps, rmax=150, nswp=20, max_full_size=1000, verb=1):
""" Approximate computation of minimal eigenvalues in tensor train format
This function uses alternating least-squares algorithm for the computation of several
minimal eigenvalues. If you want maximal eigenvalues, just send -A to the function.
:Reference:
S. V. Dolgov, B. N. Khoromskij, I. V. Oseledets, and D. V. Savostyanov.
Computation of extreme eigenvalues in higher dimensions using block tensor train format. Computer Phys. Comm.,
185(4):1207-1216, 2014. http://dx.doi.org/10.1016/j.cpc.2013.12.017
:param A: Matrix in the TT-format
:type A: matrix
:param y0: Initial guess in the block TT-format, r(d+1) is the number of eigenvalues sought
:type y0: tensor
:param eps: Accuracy required
:type eps: float
:param rmax: Maximal rank
:type rmax: int
:param kickrank: Addition rank, the larger the more robus the method,
:type kickrank: int
:rtype: A tuple (ev, tensor), where ev is a list of eigenvalues, tensor is an approximation to eigenvectors.
:Example:
>>> import tt
>>> import tt.eigb
>>> d = 8; f = 3
>>> r = [8] * (d * f + 1); r[d * f] = 8; r[0] = 1
>>> x = tt.rand(n, d * f, r)
>>> a = tt.qlaplace_dd([8, 8, 8])
>>> sol, ev = tt.eigb.eigb(a, x, 1e-6, verb=0)
Solving a block eigenvalue problem
Looking for 8 eigenvalues with accuracy 1E-06
swp: 1 er = 35.93 rmax:19
swp: 2 er = 4.51015E-04 rmax:18
swp: 3 er = 1.87584E-12 rmax:17
Total number of matvecs: 0
>>> print ev
[ 0.00044828 0.00089654 0.00089654 0.00089654 0.0013448 0.0013448
0.0013448 0.00164356]
"""
ry = y0.r.copy()
lam = tt_eigb.tt_block_eig.tt_eigb(y0.d, A.n, A.m, A.tt.r, A.tt.core, y0.core, ry, eps,
rmax, ry[y0.d], 0, nswp, max_full_size, verb)
y = tensor()
y.d = y0.d
y.n = A.n.copy()
y.r = ry
y.core = tt_eigb.tt_block_eig.result_core.copy()
tt_eigb.tt_block_eig.deallocate_result()
y.get_ps()
return y, lam |
def process_quotes(self, text):
"""Process quotes."""
escaped = False
in_quotes = False
current = []
quoted = []
i = _util.StringIter(text)
iter(i)
for t in i:
if not escaped and t == "\\":
escaped = True
elif escaped:
escaped = False
if t == "E":
if in_quotes:
current.append(_re.escape("".join(quoted)))
quoted = []
in_quotes = False
elif t == "Q" and not in_quotes:
in_quotes = True
elif in_quotes:
quoted.extend(["\\", t])
else:
current.extend(["\\", t])
elif in_quotes:
quoted.extend(t)
else:
current.append(t)
if in_quotes and escaped:
quoted.append("\\")
elif escaped:
current.append("\\")
if quoted:
current.append(_re.escape("".join(quoted)))
return "".join(current) | Process quotes. | Below is the the instruction that describes the task:
### Input:
Process quotes.
### Response:
def process_quotes(self, text):
"""Process quotes."""
escaped = False
in_quotes = False
current = []
quoted = []
i = _util.StringIter(text)
iter(i)
for t in i:
if not escaped and t == "\\":
escaped = True
elif escaped:
escaped = False
if t == "E":
if in_quotes:
current.append(_re.escape("".join(quoted)))
quoted = []
in_quotes = False
elif t == "Q" and not in_quotes:
in_quotes = True
elif in_quotes:
quoted.extend(["\\", t])
else:
current.extend(["\\", t])
elif in_quotes:
quoted.extend(t)
else:
current.append(t)
if in_quotes and escaped:
quoted.append("\\")
elif escaped:
current.append("\\")
if quoted:
current.append(_re.escape("".join(quoted)))
return "".join(current) |
def _dump(f, obj, flip_faces=False, ungroup=False, comments=None, split_normals=False, write_mtl=True): # pylint: disable=redefined-outer-name
'''
write_mtl: When True and mesh has a texture, includes a mtllib
reference in the .obj and writes a .mtl alongside.
'''
import os
import numpy as np
from baiji import s3
ff = -1 if flip_faces else 1
def write_face_to_obj_file(obj, faces, face_index, obj_file):
vertex_indices = faces[face_index][::ff] + 1
write_normals = obj.fn is not None or (obj.vn is not None and obj.vn.shape == obj.v.shape)
write_texture = obj.ft is not None and obj.vt is not None
if write_normals and obj.fn is not None:
normal_indices = obj.fn[face_index][::ff] + 1
assert len(normal_indices) == len(vertex_indices)
elif write_normals: # unspecified fn but per-vertex normals, assume ordering is same as for v
normal_indices = faces[face_index][::ff] + 1
if write_texture:
texture_indices = obj.ft[face_index][::ff] + 1
assert len(texture_indices) == len(vertex_indices)
# Valid obj face lines are: v, v/vt, v//vn, v/vt/vn
if write_normals and write_texture:
pattern = '%d/%d/%d'
value = tuple(np.array([vertex_indices, texture_indices, normal_indices]).T.flatten())
elif write_normals:
pattern = '%d//%d'
value = tuple(np.array([vertex_indices, normal_indices]).T.flatten())
elif write_texture:
pattern = '%d/%d'
value = tuple(np.array([vertex_indices, texture_indices]).T.flatten())
else:
pattern = '%d'
value = tuple(vertex_indices)
obj_file.write(('f ' + ' '.join([pattern]*len(vertex_indices)) + '\n') % value)
if comments != None:
if isinstance(comments, basestring):
comments = [comments]
for comment in comments:
for line in comment.split("\n"):
f.write("# %s\n" % line)
if write_mtl and hasattr(obj, 'texture_filepath') and obj.texture_filepath is not None:
save_to = s3.path.dirname(f.name)
mtl_name = os.path.splitext(s3.path.basename(f.name))[0]
mtl_filename = mtl_name + '.mtl'
f.write('mtllib %s\n' % mtl_filename)
f.write('usemtl %s\n' % mtl_name)
texture_filename = mtl_name + os.path.splitext(obj.texture_filepath)[1]
if not s3.exists(s3.path.join(save_to, texture_filename)):
s3.cp(obj.texture_filepath, s3.path.join(save_to, texture_filename))
obj.write_mtl(s3.path.join(save_to, mtl_filename), mtl_name, texture_filename)
if obj.vc is not None:
for r, c in zip(obj.v, obj.vc):
f.write('v %f %f %f %f %f %f\n' % (r[0], r[1], r[2], c[0], c[1], c[2]))
elif obj.v is not None:
for r in obj.v:
f.write('v %f %f %f\n' % (r[0], r[1], r[2]))
if obj.vn is not None:
if split_normals:
for vn_idx in obj.fn:
r = obj.vn[vn_idx[0]]
f.write('vn %f %f %f\n' % (r[0], r[1], r[2]))
r = obj.vn[vn_idx[1]]
f.write('vn %f %f %f\n' % (r[0], r[1], r[2]))
r = obj.vn[vn_idx[2]]
f.write('vn %f %f %f\n' % (r[0], r[1], r[2]))
else:
for r in obj.vn:
f.write('vn %f %f %f\n' % (r[0], r[1], r[2]))
if obj.ft is not None and obj.vt is not None:
for r in obj.vt:
if len(r) == 3:
f.write('vt %f %f %f\n' % (r[0], r[1], r[2]))
else:
f.write('vt %f %f\n' % (r[0], r[1]))
if obj.f4 is not None:
faces = obj.f4
elif obj.f is not None:
faces = obj.f
else:
faces = None
if obj.segm is not None and not ungroup:
if faces is not None:
# An array of strings.
group_names = np.array(obj.segm.keys())
# A 2d array of booleans indicating which face is in which group.
group_mask = np.zeros((len(group_names), len(faces)), dtype=bool)
for i, segm_faces in enumerate(obj.segm.itervalues()):
group_mask[i][segm_faces] = True
# In an OBJ file, "g" changes the current state. This is a slice of
# group_mask that represents the current state.
current_group_mask = np.zeros((len(group_names),), dtype=bool)
for face_index in range(len(faces)):
# If the group has changed from the previous face, write the
# group entry.
this_group_mask = group_mask[:, face_index]
if any(current_group_mask != this_group_mask):
current_group_mask = this_group_mask
f.write('g %s\n' % ' '.join(group_names[current_group_mask]))
write_face_to_obj_file(obj, faces, face_index, f)
else:
if faces is not None:
for face_index in range(len(faces)):
write_face_to_obj_file(obj, faces, face_index, f) | write_mtl: When True and mesh has a texture, includes a mtllib
reference in the .obj and writes a .mtl alongside. | Below is the the instruction that describes the task:
### Input:
write_mtl: When True and mesh has a texture, includes a mtllib
reference in the .obj and writes a .mtl alongside.
### Response:
def _dump(f, obj, flip_faces=False, ungroup=False, comments=None, split_normals=False, write_mtl=True): # pylint: disable=redefined-outer-name
'''
write_mtl: When True and mesh has a texture, includes a mtllib
reference in the .obj and writes a .mtl alongside.
'''
import os
import numpy as np
from baiji import s3
ff = -1 if flip_faces else 1
def write_face_to_obj_file(obj, faces, face_index, obj_file):
vertex_indices = faces[face_index][::ff] + 1
write_normals = obj.fn is not None or (obj.vn is not None and obj.vn.shape == obj.v.shape)
write_texture = obj.ft is not None and obj.vt is not None
if write_normals and obj.fn is not None:
normal_indices = obj.fn[face_index][::ff] + 1
assert len(normal_indices) == len(vertex_indices)
elif write_normals: # unspecified fn but per-vertex normals, assume ordering is same as for v
normal_indices = faces[face_index][::ff] + 1
if write_texture:
texture_indices = obj.ft[face_index][::ff] + 1
assert len(texture_indices) == len(vertex_indices)
# Valid obj face lines are: v, v/vt, v//vn, v/vt/vn
if write_normals and write_texture:
pattern = '%d/%d/%d'
value = tuple(np.array([vertex_indices, texture_indices, normal_indices]).T.flatten())
elif write_normals:
pattern = '%d//%d'
value = tuple(np.array([vertex_indices, normal_indices]).T.flatten())
elif write_texture:
pattern = '%d/%d'
value = tuple(np.array([vertex_indices, texture_indices]).T.flatten())
else:
pattern = '%d'
value = tuple(vertex_indices)
obj_file.write(('f ' + ' '.join([pattern]*len(vertex_indices)) + '\n') % value)
if comments != None:
if isinstance(comments, basestring):
comments = [comments]
for comment in comments:
for line in comment.split("\n"):
f.write("# %s\n" % line)
if write_mtl and hasattr(obj, 'texture_filepath') and obj.texture_filepath is not None:
save_to = s3.path.dirname(f.name)
mtl_name = os.path.splitext(s3.path.basename(f.name))[0]
mtl_filename = mtl_name + '.mtl'
f.write('mtllib %s\n' % mtl_filename)
f.write('usemtl %s\n' % mtl_name)
texture_filename = mtl_name + os.path.splitext(obj.texture_filepath)[1]
if not s3.exists(s3.path.join(save_to, texture_filename)):
s3.cp(obj.texture_filepath, s3.path.join(save_to, texture_filename))
obj.write_mtl(s3.path.join(save_to, mtl_filename), mtl_name, texture_filename)
if obj.vc is not None:
for r, c in zip(obj.v, obj.vc):
f.write('v %f %f %f %f %f %f\n' % (r[0], r[1], r[2], c[0], c[1], c[2]))
elif obj.v is not None:
for r in obj.v:
f.write('v %f %f %f\n' % (r[0], r[1], r[2]))
if obj.vn is not None:
if split_normals:
for vn_idx in obj.fn:
r = obj.vn[vn_idx[0]]
f.write('vn %f %f %f\n' % (r[0], r[1], r[2]))
r = obj.vn[vn_idx[1]]
f.write('vn %f %f %f\n' % (r[0], r[1], r[2]))
r = obj.vn[vn_idx[2]]
f.write('vn %f %f %f\n' % (r[0], r[1], r[2]))
else:
for r in obj.vn:
f.write('vn %f %f %f\n' % (r[0], r[1], r[2]))
if obj.ft is not None and obj.vt is not None:
for r in obj.vt:
if len(r) == 3:
f.write('vt %f %f %f\n' % (r[0], r[1], r[2]))
else:
f.write('vt %f %f\n' % (r[0], r[1]))
if obj.f4 is not None:
faces = obj.f4
elif obj.f is not None:
faces = obj.f
else:
faces = None
if obj.segm is not None and not ungroup:
if faces is not None:
# An array of strings.
group_names = np.array(obj.segm.keys())
# A 2d array of booleans indicating which face is in which group.
group_mask = np.zeros((len(group_names), len(faces)), dtype=bool)
for i, segm_faces in enumerate(obj.segm.itervalues()):
group_mask[i][segm_faces] = True
# In an OBJ file, "g" changes the current state. This is a slice of
# group_mask that represents the current state.
current_group_mask = np.zeros((len(group_names),), dtype=bool)
for face_index in range(len(faces)):
# If the group has changed from the previous face, write the
# group entry.
this_group_mask = group_mask[:, face_index]
if any(current_group_mask != this_group_mask):
current_group_mask = this_group_mask
f.write('g %s\n' % ' '.join(group_names[current_group_mask]))
write_face_to_obj_file(obj, faces, face_index, f)
else:
if faces is not None:
for face_index in range(len(faces)):
write_face_to_obj_file(obj, faces, face_index, f) |
def set_stack(self, stack_dump, stack_top):
"""
Stack dump is a dump of the stack from gdb, i.e. the result of the following gdb command :
``dump binary memory [stack_dump] [begin_addr] [end_addr]``
We set the stack to the same addresses as the gdb session to avoid pointers corruption.
:param stack_dump: The dump file.
:param stack_top: The address of the top of the stack in the gdb session.
"""
data = self._read_data(stack_dump)
self.real_stack_top = stack_top
addr = stack_top - len(data) # Address of the bottom of the stack
l.info("Setting stack from 0x%x up to %#x", addr, stack_top)
#FIXME: we should probably make we don't overwrite other stuff loaded there
self._write(addr, data) | Stack dump is a dump of the stack from gdb, i.e. the result of the following gdb command :
``dump binary memory [stack_dump] [begin_addr] [end_addr]``
We set the stack to the same addresses as the gdb session to avoid pointers corruption.
:param stack_dump: The dump file.
:param stack_top: The address of the top of the stack in the gdb session. | Below is the the instruction that describes the task:
### Input:
Stack dump is a dump of the stack from gdb, i.e. the result of the following gdb command :
``dump binary memory [stack_dump] [begin_addr] [end_addr]``
We set the stack to the same addresses as the gdb session to avoid pointers corruption.
:param stack_dump: The dump file.
:param stack_top: The address of the top of the stack in the gdb session.
### Response:
def set_stack(self, stack_dump, stack_top):
"""
Stack dump is a dump of the stack from gdb, i.e. the result of the following gdb command :
``dump binary memory [stack_dump] [begin_addr] [end_addr]``
We set the stack to the same addresses as the gdb session to avoid pointers corruption.
:param stack_dump: The dump file.
:param stack_top: The address of the top of the stack in the gdb session.
"""
data = self._read_data(stack_dump)
self.real_stack_top = stack_top
addr = stack_top - len(data) # Address of the bottom of the stack
l.info("Setting stack from 0x%x up to %#x", addr, stack_top)
#FIXME: we should probably make we don't overwrite other stuff loaded there
self._write(addr, data) |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._id_ is not None:
return False
if self._country is not None:
return False
if self._expiry_time is not None:
return False
return True | :rtype: bool | Below is the the instruction that describes the task:
### Input:
:rtype: bool
### Response:
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._id_ is not None:
return False
if self._country is not None:
return False
if self._expiry_time is not None:
return False
return True |
def _set_authentication_key(self, v, load=False):
"""
Setter method for authentication_key, mapped from YANG variable /ntp/authentication_key (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_authentication_key is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_authentication_key() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("keyid",authentication_key.authentication_key, yang_name="authentication-key", rest_name="authentication-key", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='keyid', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'authentication key', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'28', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'ntp-key'}}), is_container='list', yang_name="authentication-key", rest_name="authentication-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'authentication key', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'28', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'ntp-key'}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """authentication_key must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("keyid",authentication_key.authentication_key, yang_name="authentication-key", rest_name="authentication-key", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='keyid', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'authentication key', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'28', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'ntp-key'}}), is_container='list', yang_name="authentication-key", rest_name="authentication-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'authentication key', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'28', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'ntp-key'}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='list', is_config=True)""",
})
self.__authentication_key = t
if hasattr(self, '_set'):
self._set() | Setter method for authentication_key, mapped from YANG variable /ntp/authentication_key (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_authentication_key is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_authentication_key() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for authentication_key, mapped from YANG variable /ntp/authentication_key (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_authentication_key is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_authentication_key() directly.
### Response:
def _set_authentication_key(self, v, load=False):
"""
Setter method for authentication_key, mapped from YANG variable /ntp/authentication_key (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_authentication_key is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_authentication_key() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("keyid",authentication_key.authentication_key, yang_name="authentication-key", rest_name="authentication-key", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='keyid', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'authentication key', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'28', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'ntp-key'}}), is_container='list', yang_name="authentication-key", rest_name="authentication-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'authentication key', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'28', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'ntp-key'}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """authentication_key must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("keyid",authentication_key.authentication_key, yang_name="authentication-key", rest_name="authentication-key", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='keyid', extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'authentication key', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'28', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'ntp-key'}}), is_container='list', yang_name="authentication-key", rest_name="authentication-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-suppress-key-sort': None, u'info': u'authentication key', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'28', u'cli-suppress-list-no': None, u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'ntp-key'}}, namespace='urn:brocade.com:mgmt:brocade-ntp', defining_module='brocade-ntp', yang_type='list', is_config=True)""",
})
self.__authentication_key = t
if hasattr(self, '_set'):
self._set() |
def from_pyvalue(name, value, **kwargs):
"""
Convenience wrapper for new_param() that constructs a Param element
from an instance of a Python builtin type. See new_param() for a
description of the valid keyword arguments.
"""
return new_param(name, ligolwtypes.FromPyType[type(value)], value, **kwargs) | Convenience wrapper for new_param() that constructs a Param element
from an instance of a Python builtin type. See new_param() for a
description of the valid keyword arguments. | Below is the the instruction that describes the task:
### Input:
Convenience wrapper for new_param() that constructs a Param element
from an instance of a Python builtin type. See new_param() for a
description of the valid keyword arguments.
### Response:
def from_pyvalue(name, value, **kwargs):
"""
Convenience wrapper for new_param() that constructs a Param element
from an instance of a Python builtin type. See new_param() for a
description of the valid keyword arguments.
"""
return new_param(name, ligolwtypes.FromPyType[type(value)], value, **kwargs) |
def make_attributes(s, gff3=True, keep_attr_order=True):
"""
In GFF3, the last column is typically:
ID=cds00002;Parent=mRNA00002;
In GFF2, the last column is typically:
Gene 22240.t000374; Note "Carbonic anhydrase"
"""
if gff3:
"""
hack: temporarily replace the '+' sign in the attributes column
with the string 'PlusSign' to prevent urlparse.parse_qsl() from
replacing the '+' sign with a space
"""
s = s.replace('+', 'PlusSign')
d = parse_qs(s, keep_attr_order=keep_attr_order)
for key in d:
d[key][0] = unquote(d[key][0].replace('PlusSign', '+').replace('"', ''))
else:
attributes = s.split(";")
d = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list)
for a in attributes:
a = a.strip()
if ' ' not in a:
continue
key, val = a.split(' ', 1)
val = unquote(val.replace('"', '').replace('=', ' ').strip())
d[key].append(val)
for key, val in d.items():
d[key] = list(flatten([v.split(",") for v in val]))
return d | In GFF3, the last column is typically:
ID=cds00002;Parent=mRNA00002;
In GFF2, the last column is typically:
Gene 22240.t000374; Note "Carbonic anhydrase" | Below is the the instruction that describes the task:
### Input:
In GFF3, the last column is typically:
ID=cds00002;Parent=mRNA00002;
In GFF2, the last column is typically:
Gene 22240.t000374; Note "Carbonic anhydrase"
### Response:
def make_attributes(s, gff3=True, keep_attr_order=True):
"""
In GFF3, the last column is typically:
ID=cds00002;Parent=mRNA00002;
In GFF2, the last column is typically:
Gene 22240.t000374; Note "Carbonic anhydrase"
"""
if gff3:
"""
hack: temporarily replace the '+' sign in the attributes column
with the string 'PlusSign' to prevent urlparse.parse_qsl() from
replacing the '+' sign with a space
"""
s = s.replace('+', 'PlusSign')
d = parse_qs(s, keep_attr_order=keep_attr_order)
for key in d:
d[key][0] = unquote(d[key][0].replace('PlusSign', '+').replace('"', ''))
else:
attributes = s.split(";")
d = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list)
for a in attributes:
a = a.strip()
if ' ' not in a:
continue
key, val = a.split(' ', 1)
val = unquote(val.replace('"', '').replace('=', ' ').strip())
d[key].append(val)
for key, val in d.items():
d[key] = list(flatten([v.split(",") for v in val]))
return d |
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents) | A function to be called after the augmentation of images was
performed.
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified. | Below is the the instruction that describes the task:
### Input:
A function to be called after the augmentation of images was
performed.
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
### Response:
def postprocess(self, images, augmenter, parents):
"""
A function to be called after the augmentation of images was
performed.
Returns
-------
(N,H,W,C) ndarray or (N,H,W) ndarray or list of (H,W,C) ndarray or list of (H,W) ndarray
The input images, optionally modified.
"""
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents) |
def _set_hundredgigabitethernet(self, v, load=False):
"""
Setter method for hundredgigabitethernet, mapped from YANG variable /interface/hundredgigabitethernet (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_hundredgigabitethernet is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hundredgigabitethernet() directly.
YANG Description: The list of HundredGigabitEthernet interfaces in the
managed device. Each row represents a HundredGigabitEthernet
interface. The list provides a way to discover all the
100G physical interfaces in a managed device.
In case of logical-switch (VCS cluster), this list
comprises of all the 100G physical interfaces across
all the rbridges in the cluster.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",hundredgigabitethernet.hundredgigabitethernet, yang_name="hundredgigabitethernet", rest_name="HundredGigabitEthernet", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of HundredGigabitEthernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'HundredGigabitEthernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_hundredgigabit', u'cli-mode-name': u'conf-if-hu-$(name)'}}), is_container='list', yang_name="hundredgigabitethernet", rest_name="HundredGigabitEthernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of HundredGigabitEthernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'HundredGigabitEthernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_hundredgigabit', u'cli-mode-name': u'conf-if-hu-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hundredgigabitethernet must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",hundredgigabitethernet.hundredgigabitethernet, yang_name="hundredgigabitethernet", rest_name="HundredGigabitEthernet", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of HundredGigabitEthernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'HundredGigabitEthernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_hundredgigabit', u'cli-mode-name': u'conf-if-hu-$(name)'}}), is_container='list', yang_name="hundredgigabitethernet", rest_name="HundredGigabitEthernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of HundredGigabitEthernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'HundredGigabitEthernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_hundredgigabit', u'cli-mode-name': u'conf-if-hu-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""",
})
self.__hundredgigabitethernet = t
if hasattr(self, '_set'):
self._set() | Setter method for hundredgigabitethernet, mapped from YANG variable /interface/hundredgigabitethernet (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_hundredgigabitethernet is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hundredgigabitethernet() directly.
YANG Description: The list of HundredGigabitEthernet interfaces in the
managed device. Each row represents a HundredGigabitEthernet
interface. The list provides a way to discover all the
100G physical interfaces in a managed device.
In case of logical-switch (VCS cluster), this list
comprises of all the 100G physical interfaces across
all the rbridges in the cluster. | Below is the the instruction that describes the task:
### Input:
Setter method for hundredgigabitethernet, mapped from YANG variable /interface/hundredgigabitethernet (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_hundredgigabitethernet is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hundredgigabitethernet() directly.
YANG Description: The list of HundredGigabitEthernet interfaces in the
managed device. Each row represents a HundredGigabitEthernet
interface. The list provides a way to discover all the
100G physical interfaces in a managed device.
In case of logical-switch (VCS cluster), this list
comprises of all the 100G physical interfaces across
all the rbridges in the cluster.
### Response:
def _set_hundredgigabitethernet(self, v, load=False):
"""
Setter method for hundredgigabitethernet, mapped from YANG variable /interface/hundredgigabitethernet (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_hundredgigabitethernet is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hundredgigabitethernet() directly.
YANG Description: The list of HundredGigabitEthernet interfaces in the
managed device. Each row represents a HundredGigabitEthernet
interface. The list provides a way to discover all the
100G physical interfaces in a managed device.
In case of logical-switch (VCS cluster), this list
comprises of all the 100G physical interfaces across
all the rbridges in the cluster.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",hundredgigabitethernet.hundredgigabitethernet, yang_name="hundredgigabitethernet", rest_name="HundredGigabitEthernet", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of HundredGigabitEthernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'HundredGigabitEthernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_hundredgigabit', u'cli-mode-name': u'conf-if-hu-$(name)'}}), is_container='list', yang_name="hundredgigabitethernet", rest_name="HundredGigabitEthernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of HundredGigabitEthernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'HundredGigabitEthernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_hundredgigabit', u'cli-mode-name': u'conf-if-hu-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hundredgigabitethernet must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",hundredgigabitethernet.hundredgigabitethernet, yang_name="hundredgigabitethernet", rest_name="HundredGigabitEthernet", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of HundredGigabitEthernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'HundredGigabitEthernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_hundredgigabit', u'cli-mode-name': u'conf-if-hu-$(name)'}}), is_container='list', yang_name="hundredgigabitethernet", rest_name="HundredGigabitEthernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of HundredGigabitEthernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'HundredGigabitEthernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'interface_hundredgigabit', u'cli-mode-name': u'conf-if-hu-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""",
})
self.__hundredgigabitethernet = t
if hasattr(self, '_set'):
self._set() |
def build_mismatched_common_meta_report(common_meta_df_shapes, sources, all_meta_df, all_meta_df_with_dups):
"""
Generate a report (dataframe) that indicates for the common metadata that does not match across the common metadata
which source file had which of the different mismatch values
Args:
common_meta_df_shapes: list of tuples that are the shapes of the common meta dataframes
sources: list of the source files that the dataframes were loaded from
all_meta_df: produced from build_common_all_meta_df
all_meta_df_with_dups: produced from build_common_all_meta_df
Returns:
all_report_df: dataframe indicating the mismatched row metadata values and the corresponding source file
"""
expanded_sources = []
for (i, shape) in enumerate(common_meta_df_shapes):
src = sources[i]
expanded_sources.extend([src for i in range(shape[0])])
expanded_sources = numpy.array(expanded_sources)
logger.debug("len(expanded_sources): {}".format(len(expanded_sources)))
duplicate_ids = all_meta_df.index[all_meta_df.index.duplicated(keep=False)]
unique_duplicate_ids = duplicate_ids.unique()
logger.debug("unique_duplicate_ids: {}".format(unique_duplicate_ids))
duplicate_ids_meta_df = all_meta_df.loc[unique_duplicate_ids]
report_df_list = []
for unique_dup_id in unique_duplicate_ids:
rows = duplicate_ids_meta_df.loc[unique_dup_id]
matching_row_locs = numpy.array([False for i in range(all_meta_df_with_dups.shape[0])])
for i in range(rows.shape[0]):
r = rows.iloc[i]
row_comparison = r == all_meta_df_with_dups
matching_row_locs = matching_row_locs | row_comparison.all(axis=1).values
report_df = all_meta_df_with_dups.loc[matching_row_locs].copy()
report_df["source_file"] = expanded_sources[matching_row_locs]
logger.debug("report_df.shape: {}".format(report_df.shape))
report_df_list.append(report_df)
all_report_df = pd.concat(report_df_list, axis=0)
all_report_df["orig_rid"] = all_report_df.index
all_report_df.index = pd.Index(range(all_report_df.shape[0]), name="index")
logger.debug("all_report_df.shape: {}".format(all_report_df.shape))
logger.debug("all_report_df.index: {}".format(all_report_df.index))
logger.debug("all_report_df.columns: {}".format(all_report_df.columns))
return all_report_df | Generate a report (dataframe) that indicates for the common metadata that does not match across the common metadata
which source file had which of the different mismatch values
Args:
common_meta_df_shapes: list of tuples that are the shapes of the common meta dataframes
sources: list of the source files that the dataframes were loaded from
all_meta_df: produced from build_common_all_meta_df
all_meta_df_with_dups: produced from build_common_all_meta_df
Returns:
all_report_df: dataframe indicating the mismatched row metadata values and the corresponding source file | Below is the the instruction that describes the task:
### Input:
Generate a report (dataframe) that indicates for the common metadata that does not match across the common metadata
which source file had which of the different mismatch values
Args:
common_meta_df_shapes: list of tuples that are the shapes of the common meta dataframes
sources: list of the source files that the dataframes were loaded from
all_meta_df: produced from build_common_all_meta_df
all_meta_df_with_dups: produced from build_common_all_meta_df
Returns:
all_report_df: dataframe indicating the mismatched row metadata values and the corresponding source file
### Response:
def build_mismatched_common_meta_report(common_meta_df_shapes, sources, all_meta_df, all_meta_df_with_dups):
"""
Generate a report (dataframe) that indicates for the common metadata that does not match across the common metadata
which source file had which of the different mismatch values
Args:
common_meta_df_shapes: list of tuples that are the shapes of the common meta dataframes
sources: list of the source files that the dataframes were loaded from
all_meta_df: produced from build_common_all_meta_df
all_meta_df_with_dups: produced from build_common_all_meta_df
Returns:
all_report_df: dataframe indicating the mismatched row metadata values and the corresponding source file
"""
expanded_sources = []
for (i, shape) in enumerate(common_meta_df_shapes):
src = sources[i]
expanded_sources.extend([src for i in range(shape[0])])
expanded_sources = numpy.array(expanded_sources)
logger.debug("len(expanded_sources): {}".format(len(expanded_sources)))
duplicate_ids = all_meta_df.index[all_meta_df.index.duplicated(keep=False)]
unique_duplicate_ids = duplicate_ids.unique()
logger.debug("unique_duplicate_ids: {}".format(unique_duplicate_ids))
duplicate_ids_meta_df = all_meta_df.loc[unique_duplicate_ids]
report_df_list = []
for unique_dup_id in unique_duplicate_ids:
rows = duplicate_ids_meta_df.loc[unique_dup_id]
matching_row_locs = numpy.array([False for i in range(all_meta_df_with_dups.shape[0])])
for i in range(rows.shape[0]):
r = rows.iloc[i]
row_comparison = r == all_meta_df_with_dups
matching_row_locs = matching_row_locs | row_comparison.all(axis=1).values
report_df = all_meta_df_with_dups.loc[matching_row_locs].copy()
report_df["source_file"] = expanded_sources[matching_row_locs]
logger.debug("report_df.shape: {}".format(report_df.shape))
report_df_list.append(report_df)
all_report_df = pd.concat(report_df_list, axis=0)
all_report_df["orig_rid"] = all_report_df.index
all_report_df.index = pd.Index(range(all_report_df.shape[0]), name="index")
logger.debug("all_report_df.shape: {}".format(all_report_df.shape))
logger.debug("all_report_df.index: {}".format(all_report_df.index))
logger.debug("all_report_df.columns: {}".format(all_report_df.columns))
return all_report_df |
def renegotiate_keys(self):
"""
Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
@raise SSHException: if the key renegotiation failed (which causes the
session to end)
"""
self.completion_event = threading.Event()
self._send_kex_init()
while True:
self.completion_event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if self.completion_event.isSet():
break
return | Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
@raise SSHException: if the key renegotiation failed (which causes the
session to end) | Below is the the instruction that describes the task:
### Input:
Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
@raise SSHException: if the key renegotiation failed (which causes the
session to end)
### Response:
def renegotiate_keys(self):
"""
Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
@raise SSHException: if the key renegotiation failed (which causes the
session to end)
"""
self.completion_event = threading.Event()
self._send_kex_init()
while True:
self.completion_event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if self.completion_event.isSet():
break
return |
def get_t(self, var, coords=None):
"""
Get the time coordinate of a variable
This method searches for the time coordinate in the :attr:`ds`. It
first checks whether there is one dimension that holds an ``'axis'``
attribute with 'T', otherwise it looks whether there is an intersection
between the :attr:`t` attribute and the variables dimensions, otherwise
it returns the coordinate corresponding to the first dimension of `var`
Possible types
--------------
var: xarray.Variable
The variable to get the time coordinate for
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The time coordinate or None if no time coordinate could be found"""
coords = coords or self.ds.coords
coord = self.get_variable_by_axis(var, 't', coords)
if coord is not None:
return coord
dimlist = list(self.t.intersection(var.dims).intersection(coords))
if dimlist:
if len(dimlist) > 1:
warn("Found multiple matches for time coordinate in the "
"variable: %s. I use %s" % (
', '.join(dimlist), dimlist[0]),
PsyPlotRuntimeWarning)
return coords[dimlist[0]]
tname = self.get_tname(var)
if tname is not None:
return coords.get(tname)
return None | Get the time coordinate of a variable
This method searches for the time coordinate in the :attr:`ds`. It
first checks whether there is one dimension that holds an ``'axis'``
attribute with 'T', otherwise it looks whether there is an intersection
between the :attr:`t` attribute and the variables dimensions, otherwise
it returns the coordinate corresponding to the first dimension of `var`
Possible types
--------------
var: xarray.Variable
The variable to get the time coordinate for
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The time coordinate or None if no time coordinate could be found | Below is the the instruction that describes the task:
### Input:
Get the time coordinate of a variable
This method searches for the time coordinate in the :attr:`ds`. It
first checks whether there is one dimension that holds an ``'axis'``
attribute with 'T', otherwise it looks whether there is an intersection
between the :attr:`t` attribute and the variables dimensions, otherwise
it returns the coordinate corresponding to the first dimension of `var`
Possible types
--------------
var: xarray.Variable
The variable to get the time coordinate for
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The time coordinate or None if no time coordinate could be found
### Response:
def get_t(self, var, coords=None):
"""
Get the time coordinate of a variable
This method searches for the time coordinate in the :attr:`ds`. It
first checks whether there is one dimension that holds an ``'axis'``
attribute with 'T', otherwise it looks whether there is an intersection
between the :attr:`t` attribute and the variables dimensions, otherwise
it returns the coordinate corresponding to the first dimension of `var`
Possible types
--------------
var: xarray.Variable
The variable to get the time coordinate for
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The time coordinate or None if no time coordinate could be found"""
coords = coords or self.ds.coords
coord = self.get_variable_by_axis(var, 't', coords)
if coord is not None:
return coord
dimlist = list(self.t.intersection(var.dims).intersection(coords))
if dimlist:
if len(dimlist) > 1:
warn("Found multiple matches for time coordinate in the "
"variable: %s. I use %s" % (
', '.join(dimlist), dimlist[0]),
PsyPlotRuntimeWarning)
return coords[dimlist[0]]
tname = self.get_tname(var)
if tname is not None:
return coords.get(tname)
return None |
def get_display_names_metadata(self):
"""Gets the metadata for all display_names.
return: (osid.Metadata) - metadata for the display_names
*compliance: mandatory -- This method must be implemented.*
"""
metadata = dict(self._display_names_metadata)
metadata.update({'existing_string_values': [t['text'] for t in self.my_osid_object_form._my_map['displayNames']]})
return Metadata(**metadata) | Gets the metadata for all display_names.
return: (osid.Metadata) - metadata for the display_names
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the metadata for all display_names.
return: (osid.Metadata) - metadata for the display_names
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_display_names_metadata(self):
"""Gets the metadata for all display_names.
return: (osid.Metadata) - metadata for the display_names
*compliance: mandatory -- This method must be implemented.*
"""
metadata = dict(self._display_names_metadata)
metadata.update({'existing_string_values': [t['text'] for t in self.my_osid_object_form._my_map['displayNames']]})
return Metadata(**metadata) |
def ifft(invec, outvec):
""" Inverse fourier transform from invec to outvec.
Perform an inverse fourier transform. The type of transform is determined
by the dtype of invec and outvec.
Parameters
----------
invec : TimeSeries or FrequencySeries
The input vector.
outvec : TimeSeries or FrequencySeries
The output.
"""
prec, itype, otype = _check_fft_args(invec, outvec)
_check_inv_args(invec, itype, outvec, otype, 1, None)
# The following line is where all the work is done:
backend = get_backend()
backend.ifft(invec, outvec, prec, itype, otype)
# For an inverse FFT, the length of the *output* vector is the length
# we should divide by, whether C2C or HC2R transform
if isinstance(invec, _TimeSeries):
outvec._epoch = invec._epoch
outvec._delta_f = 1.0/(invec._delta_t * len(outvec))
outvec *= invec._delta_t
elif isinstance(invec,_FrequencySeries):
outvec._epoch = invec._epoch
outvec._delta_t = 1.0/(invec._delta_f * len(outvec))
outvec *= invec._delta_f | Inverse fourier transform from invec to outvec.
Perform an inverse fourier transform. The type of transform is determined
by the dtype of invec and outvec.
Parameters
----------
invec : TimeSeries or FrequencySeries
The input vector.
outvec : TimeSeries or FrequencySeries
The output. | Below is the the instruction that describes the task:
### Input:
Inverse fourier transform from invec to outvec.
Perform an inverse fourier transform. The type of transform is determined
by the dtype of invec and outvec.
Parameters
----------
invec : TimeSeries or FrequencySeries
The input vector.
outvec : TimeSeries or FrequencySeries
The output.
### Response:
def ifft(invec, outvec):
""" Inverse fourier transform from invec to outvec.
Perform an inverse fourier transform. The type of transform is determined
by the dtype of invec and outvec.
Parameters
----------
invec : TimeSeries or FrequencySeries
The input vector.
outvec : TimeSeries or FrequencySeries
The output.
"""
prec, itype, otype = _check_fft_args(invec, outvec)
_check_inv_args(invec, itype, outvec, otype, 1, None)
# The following line is where all the work is done:
backend = get_backend()
backend.ifft(invec, outvec, prec, itype, otype)
# For an inverse FFT, the length of the *output* vector is the length
# we should divide by, whether C2C or HC2R transform
if isinstance(invec, _TimeSeries):
outvec._epoch = invec._epoch
outvec._delta_f = 1.0/(invec._delta_t * len(outvec))
outvec *= invec._delta_t
elif isinstance(invec,_FrequencySeries):
outvec._epoch = invec._epoch
outvec._delta_t = 1.0/(invec._delta_f * len(outvec))
outvec *= invec._delta_f |
def parse_arguments(argv):
"""Setup argument parser for command line arguments."""
parser = argparse.ArgumentParser(description=__doc__.format(__version__))
parser.add_argument('--application', '-a',
dest='application',
choices=scraper.APPLICATIONS,
default='firefox',
metavar='APPLICATION',
help='The name of the application to download, default: "%(default)s"')
parser.add_argument('--base_url',
dest='base_url',
default=scraper.BASE_URL,
metavar='BASE_URL',
help='The base url to be used, default: "%(default)s"')
parser.add_argument('--build-number',
dest='build_number',
type=int,
metavar='BUILD_NUMBER',
help='Number of the build (for candidate, daily, and tinderbox builds)')
parser.add_argument('--debug-build',
dest='debug_build',
action='store_true',
help='Download a debug build (for tinderbox, and try builds)')
parser.add_argument('--destination', '-d',
dest='destination',
default=os.getcwd(),
metavar='DESTINATION',
help='Directory or file name to download the '
'file to, default: current working directory')
parser.add_argument('--extension',
dest='extension',
metavar='EXTENSION',
help='File extension of the build (e.g. "zip"), default: '
'the standard build extension on the platform.')
parser.add_argument('--locale', '-l',
dest='locale',
metavar='LOCALE',
help='Locale of the application, default: "en-US" or "multi"')
parser.add_argument('--log-level',
action='store',
dest='log_level',
default=logging.INFO,
metavar='LOG_LEVEL',
help='Threshold for log output (default: INFO')
parser.add_argument('--password',
dest='password',
metavar='PASSWORD',
help='Password for basic HTTP authentication.')
parser.add_argument('--platform', '-p',
dest='platform',
choices=scraper.PLATFORM_FRAGMENTS.keys(),
metavar='PLATFORM',
help='Platform of the application')
parser.add_argument('--print-url',
dest='print_url',
action='store_true',
help='Print final URL instead of downloading the file.')
parser.add_argument('--retry-attempts',
dest='retry_attempts',
default=0,
type=int,
metavar='RETRY_ATTEMPTS',
help='Number of times the download will be attempted in '
'the event of a failure, default: %(default)s')
parser.add_argument('--retry-delay',
dest='retry_delay',
default=10.,
type=float,
metavar='RETRY_DELAY',
help='Amount of time (in seconds) to wait between retry '
'attempts, default: %(default)s')
parser.add_argument('--revision',
dest='revision',
help='Revision of the build (for daily, tinderbox, and try builds)')
parser.add_argument('--stub',
dest='is_stub_installer',
action='store_true',
help='Stub installer (Only applicable to Windows builds).')
parser.add_argument('--timeout',
dest='timeout',
type=float,
metavar='TIMEOUT',
help='Amount of time (in seconds) until a download times out.')
parser.add_argument('--type', '-t',
dest='scraper_type',
choices=factory.scraper_types.keys(),
default='release',
metavar='SCRAPER_TYPE',
help='Type of build to download, default: "%(default)s"')
parser.add_argument('--url',
dest='url',
metavar='URL',
help='URL to download. Note: Reserved characters (such '
'as &) must be escaped or put in quotes otherwise '
'CLI output may be abnormal.')
parser.add_argument('--username',
dest='username',
metavar='USERNAME',
help='Username for basic HTTP authentication.')
parser.add_argument('--version', '-v',
dest='version',
metavar='VERSION',
help='Version of the application to be downloaded for release '
'and candidate builds (special values: %s)' % ', '.join(
scraper.RELEASE_AND_CANDIDATE_LATEST_VERSIONS.keys()))
# Group for daily builds
group = parser.add_argument_group('Daily builds', 'Extra options for daily builds.')
group.add_argument('--branch',
dest='branch',
default='mozilla-central',
metavar='BRANCH',
help='Name of the branch, default: "%(default)s"')
group.add_argument('--build-id',
dest='build_id',
metavar='BUILD_ID',
help='ID of the build to download.')
group.add_argument('--date',
dest='date',
metavar='DATE',
help='Date of the build, default: latest build')
return vars(parser.parse_args(argv)) | Setup argument parser for command line arguments. | Below is the the instruction that describes the task:
### Input:
Setup argument parser for command line arguments.
### Response:
def parse_arguments(argv):
"""Setup argument parser for command line arguments."""
parser = argparse.ArgumentParser(description=__doc__.format(__version__))
parser.add_argument('--application', '-a',
dest='application',
choices=scraper.APPLICATIONS,
default='firefox',
metavar='APPLICATION',
help='The name of the application to download, default: "%(default)s"')
parser.add_argument('--base_url',
dest='base_url',
default=scraper.BASE_URL,
metavar='BASE_URL',
help='The base url to be used, default: "%(default)s"')
parser.add_argument('--build-number',
dest='build_number',
type=int,
metavar='BUILD_NUMBER',
help='Number of the build (for candidate, daily, and tinderbox builds)')
parser.add_argument('--debug-build',
dest='debug_build',
action='store_true',
help='Download a debug build (for tinderbox, and try builds)')
parser.add_argument('--destination', '-d',
dest='destination',
default=os.getcwd(),
metavar='DESTINATION',
help='Directory or file name to download the '
'file to, default: current working directory')
parser.add_argument('--extension',
dest='extension',
metavar='EXTENSION',
help='File extension of the build (e.g. "zip"), default: '
'the standard build extension on the platform.')
parser.add_argument('--locale', '-l',
dest='locale',
metavar='LOCALE',
help='Locale of the application, default: "en-US" or "multi"')
parser.add_argument('--log-level',
action='store',
dest='log_level',
default=logging.INFO,
metavar='LOG_LEVEL',
help='Threshold for log output (default: INFO')
parser.add_argument('--password',
dest='password',
metavar='PASSWORD',
help='Password for basic HTTP authentication.')
parser.add_argument('--platform', '-p',
dest='platform',
choices=scraper.PLATFORM_FRAGMENTS.keys(),
metavar='PLATFORM',
help='Platform of the application')
parser.add_argument('--print-url',
dest='print_url',
action='store_true',
help='Print final URL instead of downloading the file.')
parser.add_argument('--retry-attempts',
dest='retry_attempts',
default=0,
type=int,
metavar='RETRY_ATTEMPTS',
help='Number of times the download will be attempted in '
'the event of a failure, default: %(default)s')
parser.add_argument('--retry-delay',
dest='retry_delay',
default=10.,
type=float,
metavar='RETRY_DELAY',
help='Amount of time (in seconds) to wait between retry '
'attempts, default: %(default)s')
parser.add_argument('--revision',
dest='revision',
help='Revision of the build (for daily, tinderbox, and try builds)')
parser.add_argument('--stub',
dest='is_stub_installer',
action='store_true',
help='Stub installer (Only applicable to Windows builds).')
parser.add_argument('--timeout',
dest='timeout',
type=float,
metavar='TIMEOUT',
help='Amount of time (in seconds) until a download times out.')
parser.add_argument('--type', '-t',
dest='scraper_type',
choices=factory.scraper_types.keys(),
default='release',
metavar='SCRAPER_TYPE',
help='Type of build to download, default: "%(default)s"')
parser.add_argument('--url',
dest='url',
metavar='URL',
help='URL to download. Note: Reserved characters (such '
'as &) must be escaped or put in quotes otherwise '
'CLI output may be abnormal.')
parser.add_argument('--username',
dest='username',
metavar='USERNAME',
help='Username for basic HTTP authentication.')
parser.add_argument('--version', '-v',
dest='version',
metavar='VERSION',
help='Version of the application to be downloaded for release '
'and candidate builds (special values: %s)' % ', '.join(
scraper.RELEASE_AND_CANDIDATE_LATEST_VERSIONS.keys()))
# Group for daily builds
group = parser.add_argument_group('Daily builds', 'Extra options for daily builds.')
group.add_argument('--branch',
dest='branch',
default='mozilla-central',
metavar='BRANCH',
help='Name of the branch, default: "%(default)s"')
group.add_argument('--build-id',
dest='build_id',
metavar='BUILD_ID',
help='ID of the build to download.')
group.add_argument('--date',
dest='date',
metavar='DATE',
help='Date of the build, default: latest build')
return vars(parser.parse_args(argv)) |
def order_by_line_nos(objs, line_nos):
"""Orders the set of `objs` by `line_nos`
"""
ordering = sorted(range(len(line_nos)), key=line_nos.__getitem__)
return [objs[i] for i in ordering] | Orders the set of `objs` by `line_nos` | Below is the the instruction that describes the task:
### Input:
Orders the set of `objs` by `line_nos`
### Response:
def order_by_line_nos(objs, line_nos):
"""Orders the set of `objs` by `line_nos`
"""
ordering = sorted(range(len(line_nos)), key=line_nos.__getitem__)
return [objs[i] for i in ordering] |
def has_header_encryption(self):
"""Returns True if headers are encrypted
"""
if self._hdrenc_main:
return True
if self._main:
if self._main.flags & RAR_MAIN_PASSWORD:
return True
return False | Returns True if headers are encrypted | Below is the the instruction that describes the task:
### Input:
Returns True if headers are encrypted
### Response:
def has_header_encryption(self):
"""Returns True if headers are encrypted
"""
if self._hdrenc_main:
return True
if self._main:
if self._main.flags & RAR_MAIN_PASSWORD:
return True
return False |
def handleResponse(self, response):
"""Handle the response string received by KafkaProtocol.
Ok, we've received the response from the broker. Find the requestId
in the message, lookup & fire the deferred with the response.
"""
requestId = KafkaCodec.get_response_correlation_id(response)
# Protect against responses coming back we didn't expect
tReq = self.requests.pop(requestId, None)
if tReq is None:
# This could happen if we've sent it, are waiting on the response
# when it's cancelled, causing us to remove it from self.requests
log.warning('Unexpected response with correlationId=%d: %r',
requestId, reprlib.repr(response))
else:
tReq.d.callback(response) | Handle the response string received by KafkaProtocol.
Ok, we've received the response from the broker. Find the requestId
in the message, lookup & fire the deferred with the response. | Below is the the instruction that describes the task:
### Input:
Handle the response string received by KafkaProtocol.
Ok, we've received the response from the broker. Find the requestId
in the message, lookup & fire the deferred with the response.
### Response:
def handleResponse(self, response):
"""Handle the response string received by KafkaProtocol.
Ok, we've received the response from the broker. Find the requestId
in the message, lookup & fire the deferred with the response.
"""
requestId = KafkaCodec.get_response_correlation_id(response)
# Protect against responses coming back we didn't expect
tReq = self.requests.pop(requestId, None)
if tReq is None:
# This could happen if we've sent it, are waiting on the response
# when it's cancelled, causing us to remove it from self.requests
log.warning('Unexpected response with correlationId=%d: %r',
requestId, reprlib.repr(response))
else:
tReq.d.callback(response) |
async def shutdown(self, timeout: Optional[float]=15.0) -> None:
"""Worker process is about to exit, we need cleanup everything and
stop accepting requests. It is especially important for keep-alive
connections."""
self._force_close = True
if self._keepalive_handle is not None:
self._keepalive_handle.cancel()
if self._waiter:
self._waiter.cancel()
# wait for handlers
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with CeilTimeout(timeout, loop=self._loop):
if (self._error_handler is not None and
not self._error_handler.done()):
await self._error_handler
if (self._task_handler is not None and
not self._task_handler.done()):
await self._task_handler
# force-close non-idle handler
if self._task_handler is not None:
self._task_handler.cancel()
if self.transport is not None:
self.transport.close()
self.transport = None | Worker process is about to exit, we need cleanup everything and
stop accepting requests. It is especially important for keep-alive
connections. | Below is the the instruction that describes the task:
### Input:
Worker process is about to exit, we need cleanup everything and
stop accepting requests. It is especially important for keep-alive
connections.
### Response:
async def shutdown(self, timeout: Optional[float]=15.0) -> None:
"""Worker process is about to exit, we need cleanup everything and
stop accepting requests. It is especially important for keep-alive
connections."""
self._force_close = True
if self._keepalive_handle is not None:
self._keepalive_handle.cancel()
if self._waiter:
self._waiter.cancel()
# wait for handlers
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with CeilTimeout(timeout, loop=self._loop):
if (self._error_handler is not None and
not self._error_handler.done()):
await self._error_handler
if (self._task_handler is not None and
not self._task_handler.done()):
await self._task_handler
# force-close non-idle handler
if self._task_handler is not None:
self._task_handler.cancel()
if self.transport is not None:
self.transport.close()
self.transport = None |
def from_str(cls, input_string, fmt, primitive=False, sort=False,
merge_tol=0.0):
"""
Reads a structure from a string.
Args:
input_string (str): String to parse.
fmt (str): A format specification.
primitive (bool): Whether to find a primitive cell. Defaults to
False.
sort (bool): Whether to sort the sites in accordance to the default
ordering criteria, i.e., electronegativity.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
IStructure / Structure
"""
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp import Poscar
from pymatgen.io.cssr import Cssr
from pymatgen.io.xcrysden import XSF
from pymatgen.io.atat import Mcsqs
fmt = fmt.lower()
if fmt == "cif":
parser = CifParser.from_string(input_string)
s = parser.get_structures(primitive=primitive)[0]
elif fmt == "poscar":
s = Poscar.from_string(input_string, False,
read_velocities=False).structure
elif fmt == "cssr":
cssr = Cssr.from_string(input_string)
s = cssr.structure
elif fmt == "json":
d = json.loads(input_string)
s = Structure.from_dict(d)
elif fmt == "yaml":
import ruamel.yaml as yaml
d = yaml.safe_load(input_string)
s = Structure.from_dict(d)
elif fmt == "xsf":
s = XSF.from_string(input_string).structure
elif fmt == "mcsqs":
s = Mcsqs.structure_from_string(input_string)
else:
raise ValueError("Unrecognized format `%s`!" % fmt)
if sort:
s = s.get_sorted_structure()
if merge_tol:
s.merge_sites(merge_tol)
return cls.from_sites(s) | Reads a structure from a string.
Args:
input_string (str): String to parse.
fmt (str): A format specification.
primitive (bool): Whether to find a primitive cell. Defaults to
False.
sort (bool): Whether to sort the sites in accordance to the default
ordering criteria, i.e., electronegativity.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
IStructure / Structure | Below is the the instruction that describes the task:
### Input:
Reads a structure from a string.
Args:
input_string (str): String to parse.
fmt (str): A format specification.
primitive (bool): Whether to find a primitive cell. Defaults to
False.
sort (bool): Whether to sort the sites in accordance to the default
ordering criteria, i.e., electronegativity.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
IStructure / Structure
### Response:
def from_str(cls, input_string, fmt, primitive=False, sort=False,
merge_tol=0.0):
"""
Reads a structure from a string.
Args:
input_string (str): String to parse.
fmt (str): A format specification.
primitive (bool): Whether to find a primitive cell. Defaults to
False.
sort (bool): Whether to sort the sites in accordance to the default
ordering criteria, i.e., electronegativity.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
IStructure / Structure
"""
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp import Poscar
from pymatgen.io.cssr import Cssr
from pymatgen.io.xcrysden import XSF
from pymatgen.io.atat import Mcsqs
fmt = fmt.lower()
if fmt == "cif":
parser = CifParser.from_string(input_string)
s = parser.get_structures(primitive=primitive)[0]
elif fmt == "poscar":
s = Poscar.from_string(input_string, False,
read_velocities=False).structure
elif fmt == "cssr":
cssr = Cssr.from_string(input_string)
s = cssr.structure
elif fmt == "json":
d = json.loads(input_string)
s = Structure.from_dict(d)
elif fmt == "yaml":
import ruamel.yaml as yaml
d = yaml.safe_load(input_string)
s = Structure.from_dict(d)
elif fmt == "xsf":
s = XSF.from_string(input_string).structure
elif fmt == "mcsqs":
s = Mcsqs.structure_from_string(input_string)
else:
raise ValueError("Unrecognized format `%s`!" % fmt)
if sort:
s = s.get_sorted_structure()
if merge_tol:
s.merge_sites(merge_tol)
return cls.from_sites(s) |
async def presentProof(self, proofRequest: ProofRequest) -> FullProof:
"""
Presents a proof to the verifier.
:param proofRequest: description of a proof to be presented (revealed
attributes, predicates, timestamps for non-revocation)
:return: a proof (both primary and non-revocation) and revealed attributes (initial non-encoded values)
"""
claims, requestedProof = await self._findClaims(proofRequest)
proof = await self._prepareProof(claims, proofRequest.nonce, requestedProof)
return proof | Presents a proof to the verifier.
:param proofRequest: description of a proof to be presented (revealed
attributes, predicates, timestamps for non-revocation)
:return: a proof (both primary and non-revocation) and revealed attributes (initial non-encoded values) | Below is the the instruction that describes the task:
### Input:
Presents a proof to the verifier.
:param proofRequest: description of a proof to be presented (revealed
attributes, predicates, timestamps for non-revocation)
:return: a proof (both primary and non-revocation) and revealed attributes (initial non-encoded values)
### Response:
async def presentProof(self, proofRequest: ProofRequest) -> FullProof:
"""
Presents a proof to the verifier.
:param proofRequest: description of a proof to be presented (revealed
attributes, predicates, timestamps for non-revocation)
:return: a proof (both primary and non-revocation) and revealed attributes (initial non-encoded values)
"""
claims, requestedProof = await self._findClaims(proofRequest)
proof = await self._prepareProof(claims, proofRequest.nonce, requestedProof)
return proof |
def collapse_to_consensus(seqrecords, strict=False, do_iron=True):
"""Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus.
"""
level = 0
name = seqrecords[0].id
# If this is a CMA alignment, extract additional info:
if hasattr(seqrecords, '_records'):
if hasattr(seqrecords, 'level'):
level = seqrecords.level
if hasattr(seqrecords, 'name'):
name = seqrecords.name
seqrecords = seqrecords._records
consensus = seqrecords.pop(0)
cons_length = len(consensus)
for i, s in enumerate(seqrecords):
if len(s) != cons_length:
raise ValueError(
"Sequence #%d has length %d, consensus is %d"
% (i+2, len(s), cons_length))
if '.' in str(consensus.seq):
# Strict -- error if there's a '-'
if '-' in str(consensus.seq):
if strict:
raise ValueError("Consensus contains '-' gap characters")
logging.warn("Consensus sequence contains both '.' and '-' gap "
"characters -- is it really the consensus?")
aligned_cols = [(c not in '.-') for c in str(consensus.seq)]
else:
aligned_cols = [c != '.' for c in str(consensus.seq)]
else:
# A little more ambiguous...
aligned_cols = [c != '-' for c in str(consensus.seq)]
consensus.seq = replace_asterisks(consensus.seq, 'consensus')
# Start a block with the consensus sequence
block = consensus2block(consensus, level=level, name=name)
qlen = block['query_length']
# Collapse & add remaining sequences to the block
for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords):
# Collapse rec.seq down to aligned size
new_mol_seq = []
is_beginning = True
for aligned_col, char in zip(aligned_cols,
replace_asterisks(rec.seq, index)):
if aligned_col:
is_beginning = False
if char in '-.':
# deletion
new_mol_seq.append('-')
else:
# aligned character
new_mol_seq.append(char.upper())
else:
# it's an insert or nothing
# (also, skip any left-side inserts)
if char not in '-.' and not is_beginning:
new_mol_seq.append(char.lower())
rec.seq = ''.join(new_mol_seq)
if do_iron:
rec.seq = iron(rec.seq)
block['sequences'].append(seqrecord2sequence(rec, qlen, index))
return block | Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus. | Below is the the instruction that describes the task:
### Input:
Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus.
### Response:
def collapse_to_consensus(seqrecords, strict=False, do_iron=True):
"""Opposite of realign_seqs.
Input sequences should all be the same length.
The first record must be the consensus.
"""
level = 0
name = seqrecords[0].id
# If this is a CMA alignment, extract additional info:
if hasattr(seqrecords, '_records'):
if hasattr(seqrecords, 'level'):
level = seqrecords.level
if hasattr(seqrecords, 'name'):
name = seqrecords.name
seqrecords = seqrecords._records
consensus = seqrecords.pop(0)
cons_length = len(consensus)
for i, s in enumerate(seqrecords):
if len(s) != cons_length:
raise ValueError(
"Sequence #%d has length %d, consensus is %d"
% (i+2, len(s), cons_length))
if '.' in str(consensus.seq):
# Strict -- error if there's a '-'
if '-' in str(consensus.seq):
if strict:
raise ValueError("Consensus contains '-' gap characters")
logging.warn("Consensus sequence contains both '.' and '-' gap "
"characters -- is it really the consensus?")
aligned_cols = [(c not in '.-') for c in str(consensus.seq)]
else:
aligned_cols = [c != '.' for c in str(consensus.seq)]
else:
# A little more ambiguous...
aligned_cols = [c != '-' for c in str(consensus.seq)]
consensus.seq = replace_asterisks(consensus.seq, 'consensus')
# Start a block with the consensus sequence
block = consensus2block(consensus, level=level, name=name)
qlen = block['query_length']
# Collapse & add remaining sequences to the block
for index, rec in zip(xrange(2, len(seqrecords)+2), seqrecords):
# Collapse rec.seq down to aligned size
new_mol_seq = []
is_beginning = True
for aligned_col, char in zip(aligned_cols,
replace_asterisks(rec.seq, index)):
if aligned_col:
is_beginning = False
if char in '-.':
# deletion
new_mol_seq.append('-')
else:
# aligned character
new_mol_seq.append(char.upper())
else:
# it's an insert or nothing
# (also, skip any left-side inserts)
if char not in '-.' and not is_beginning:
new_mol_seq.append(char.lower())
rec.seq = ''.join(new_mol_seq)
if do_iron:
rec.seq = iron(rec.seq)
block['sequences'].append(seqrecord2sequence(rec, qlen, index))
return block |
def checkout(self, _hash: str) -> None:
"""
Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout
"""
with self.lock:
self._delete_tmp_branch()
self.git.checkout('-f', _hash, b='_PD') | Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout | Below is the the instruction that describes the task:
### Input:
Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout
### Response:
def checkout(self, _hash: str) -> None:
"""
Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout
"""
with self.lock:
self._delete_tmp_branch()
self.git.checkout('-f', _hash, b='_PD') |
def weingarten_image_curvature(image, sigma=1.0, opt='mean'):
"""
Uses the weingarten map to estimate image mean or gaussian curvature
ANTsR function: `weingartenImageCurvature`
Arguments
---------
image : ANTsImage
image from which curvature is calculated
sigma : scalar
smoothing parameter
opt : string
mean by default, otherwise `gaussian` or `characterize`
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('mni')).resample_image((3,3,3))
>>> imagecurv = ants.weingarten_image_curvature(image)
"""
if image.dimension not in {2,3}:
raise ValueError('image must be 2D or 3D')
if image.dimension == 2:
d = image.shape
temp = np.zeros(list(d)+[10])
for k in range(1,7):
voxvals = image[:d[0],:d[1]]
temp[:d[0],:d[1],k] = voxvals
temp = core.from_numpy(temp)
myspc = image.spacing
myspc = list(myspc) + [min(myspc)]
temp.set_spacing(myspc)
temp = temp.clone('float')
else:
temp = image.clone('float')
optnum = 0
if opt == 'gaussian':
optnum = 6
if opt == 'characterize':
optnum = 5
libfn = utils.get_lib_fn('weingartenImageCurvature')
mykout = libfn(temp.pointer, sigma, optnum)
mykout = iio.ANTsImage(pixeltype=image.pixeltype, dimension=3,
components=image.components, pointer=mykout)
if image.dimension == 3:
return mykout
elif image.dimension == 2:
subarr = core.from_numpy(mykout.numpy()[:,:,4])
return core.copy_image_info(image, subarr) | Uses the weingarten map to estimate image mean or gaussian curvature
ANTsR function: `weingartenImageCurvature`
Arguments
---------
image : ANTsImage
image from which curvature is calculated
sigma : scalar
smoothing parameter
opt : string
mean by default, otherwise `gaussian` or `characterize`
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('mni')).resample_image((3,3,3))
>>> imagecurv = ants.weingarten_image_curvature(image) | Below is the the instruction that describes the task:
### Input:
Uses the weingarten map to estimate image mean or gaussian curvature
ANTsR function: `weingartenImageCurvature`
Arguments
---------
image : ANTsImage
image from which curvature is calculated
sigma : scalar
smoothing parameter
opt : string
mean by default, otherwise `gaussian` or `characterize`
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('mni')).resample_image((3,3,3))
>>> imagecurv = ants.weingarten_image_curvature(image)
### Response:
def weingarten_image_curvature(image, sigma=1.0, opt='mean'):
"""
Uses the weingarten map to estimate image mean or gaussian curvature
ANTsR function: `weingartenImageCurvature`
Arguments
---------
image : ANTsImage
image from which curvature is calculated
sigma : scalar
smoothing parameter
opt : string
mean by default, otherwise `gaussian` or `characterize`
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('mni')).resample_image((3,3,3))
>>> imagecurv = ants.weingarten_image_curvature(image)
"""
if image.dimension not in {2,3}:
raise ValueError('image must be 2D or 3D')
if image.dimension == 2:
d = image.shape
temp = np.zeros(list(d)+[10])
for k in range(1,7):
voxvals = image[:d[0],:d[1]]
temp[:d[0],:d[1],k] = voxvals
temp = core.from_numpy(temp)
myspc = image.spacing
myspc = list(myspc) + [min(myspc)]
temp.set_spacing(myspc)
temp = temp.clone('float')
else:
temp = image.clone('float')
optnum = 0
if opt == 'gaussian':
optnum = 6
if opt == 'characterize':
optnum = 5
libfn = utils.get_lib_fn('weingartenImageCurvature')
mykout = libfn(temp.pointer, sigma, optnum)
mykout = iio.ANTsImage(pixeltype=image.pixeltype, dimension=3,
components=image.components, pointer=mykout)
if image.dimension == 3:
return mykout
elif image.dimension == 2:
subarr = core.from_numpy(mykout.numpy()[:,:,4])
return core.copy_image_info(image, subarr) |
def cursor_before(self):
"""Return the cursor before the current item.
You must pass a QueryOptions object with produce_cursors=True
for this to work.
If there is no cursor or no current item, raise BadArgumentError.
Before next() has returned there is no cursor. Once the loop is
exhausted, this returns the cursor after the last item.
"""
if self._exhausted:
return self.cursor_after()
if isinstance(self._cursor_before, BaseException):
raise self._cursor_before
return self._cursor_before | Return the cursor before the current item.
You must pass a QueryOptions object with produce_cursors=True
for this to work.
If there is no cursor or no current item, raise BadArgumentError.
Before next() has returned there is no cursor. Once the loop is
exhausted, this returns the cursor after the last item. | Below is the the instruction that describes the task:
### Input:
Return the cursor before the current item.
You must pass a QueryOptions object with produce_cursors=True
for this to work.
If there is no cursor or no current item, raise BadArgumentError.
Before next() has returned there is no cursor. Once the loop is
exhausted, this returns the cursor after the last item.
### Response:
def cursor_before(self):
"""Return the cursor before the current item.
You must pass a QueryOptions object with produce_cursors=True
for this to work.
If there is no cursor or no current item, raise BadArgumentError.
Before next() has returned there is no cursor. Once the loop is
exhausted, this returns the cursor after the last item.
"""
if self._exhausted:
return self.cursor_after()
if isinstance(self._cursor_before, BaseException):
raise self._cursor_before
return self._cursor_before |
def marginal_distribution(self, variables, inplace=True):
"""
Returns the marginal distribution over variables.
Parameters
----------
variables: string, list, tuple, set, dict
Variable or list of variables over which marginal distribution needs
to be calculated
inplace: Boolean (default True)
If False return a new instance of JointProbabilityDistribution
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
>>> values = np.random.rand(12)
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], values/np.sum(values))
>>> prob.marginal_distribution(['x1', 'x2'])
>>> print(prob)
x1 x2 P(x1,x2)
---- ---- ----------
x1_0 x2_0 0.1502
x1_0 x2_1 0.1626
x1_0 x2_2 0.1197
x1_1 x2_0 0.2339
x1_1 x2_1 0.1996
x1_1 x2_2 0.1340
"""
return self.marginalize(list(set(list(self.variables)) -
set(variables if isinstance(
variables, (list, set, dict, tuple)) else [variables])),
inplace=inplace) | Returns the marginal distribution over variables.
Parameters
----------
variables: string, list, tuple, set, dict
Variable or list of variables over which marginal distribution needs
to be calculated
inplace: Boolean (default True)
If False return a new instance of JointProbabilityDistribution
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
>>> values = np.random.rand(12)
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], values/np.sum(values))
>>> prob.marginal_distribution(['x1', 'x2'])
>>> print(prob)
x1 x2 P(x1,x2)
---- ---- ----------
x1_0 x2_0 0.1502
x1_0 x2_1 0.1626
x1_0 x2_2 0.1197
x1_1 x2_0 0.2339
x1_1 x2_1 0.1996
x1_1 x2_2 0.1340 | Below is the the instruction that describes the task:
### Input:
Returns the marginal distribution over variables.
Parameters
----------
variables: string, list, tuple, set, dict
Variable or list of variables over which marginal distribution needs
to be calculated
inplace: Boolean (default True)
If False return a new instance of JointProbabilityDistribution
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
>>> values = np.random.rand(12)
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], values/np.sum(values))
>>> prob.marginal_distribution(['x1', 'x2'])
>>> print(prob)
x1 x2 P(x1,x2)
---- ---- ----------
x1_0 x2_0 0.1502
x1_0 x2_1 0.1626
x1_0 x2_2 0.1197
x1_1 x2_0 0.2339
x1_1 x2_1 0.1996
x1_1 x2_2 0.1340
### Response:
def marginal_distribution(self, variables, inplace=True):
"""
Returns the marginal distribution over variables.
Parameters
----------
variables: string, list, tuple, set, dict
Variable or list of variables over which marginal distribution needs
to be calculated
inplace: Boolean (default True)
If False return a new instance of JointProbabilityDistribution
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
>>> values = np.random.rand(12)
>>> prob = JointProbabilityDistribution(['x1', 'x2', 'x3'], [2, 3, 2], values/np.sum(values))
>>> prob.marginal_distribution(['x1', 'x2'])
>>> print(prob)
x1 x2 P(x1,x2)
---- ---- ----------
x1_0 x2_0 0.1502
x1_0 x2_1 0.1626
x1_0 x2_2 0.1197
x1_1 x2_0 0.2339
x1_1 x2_1 0.1996
x1_1 x2_2 0.1340
"""
return self.marginalize(list(set(list(self.variables)) -
set(variables if isinstance(
variables, (list, set, dict, tuple)) else [variables])),
inplace=inplace) |
def _render_headers(self, sheet):
"""
Write the headers row
:param obj sheet: an odswriter Sheet object
"""
headers = getattr(self, 'headers', ())
labels = [header['label'] for header in headers]
extra_headers = getattr(self, "extra_headers", ())
labels.extend([header['label'] for header in extra_headers])
sheet.writerow(labels) | Write the headers row
:param obj sheet: an odswriter Sheet object | Below is the the instruction that describes the task:
### Input:
Write the headers row
:param obj sheet: an odswriter Sheet object
### Response:
def _render_headers(self, sheet):
"""
Write the headers row
:param obj sheet: an odswriter Sheet object
"""
headers = getattr(self, 'headers', ())
labels = [header['label'] for header in headers]
extra_headers = getattr(self, "extra_headers", ())
labels.extend([header['label'] for header in extra_headers])
sheet.writerow(labels) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.