code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def request_param_update(self, complete_name):
"""
Request an update of the value for the supplied parameter.
"""
self.param_updater.request_param_update(
self.toc.get_element_id(complete_name)) | Request an update of the value for the supplied parameter. | Below is the the instruction that describes the task:
### Input:
Request an update of the value for the supplied parameter.
### Response:
def request_param_update(self, complete_name):
"""
Request an update of the value for the supplied parameter.
"""
self.param_updater.request_param_update(
self.toc.get_element_id(complete_name)) |
def get_json(identifier, namespace='cid', domain='compound', operation=None, searchtype=None, **kwargs):
"""Request wrapper that automatically parses JSON response and supresses NotFoundError."""
try:
return json.loads(get(identifier, namespace, domain, operation, 'JSON', searchtype, **kwargs).decode())
except NotFoundError as e:
log.info(e)
return None | Request wrapper that automatically parses JSON response and supresses NotFoundError. | Below is the the instruction that describes the task:
### Input:
Request wrapper that automatically parses JSON response and supresses NotFoundError.
### Response:
def get_json(identifier, namespace='cid', domain='compound', operation=None, searchtype=None, **kwargs):
"""Request wrapper that automatically parses JSON response and supresses NotFoundError."""
try:
return json.loads(get(identifier, namespace, domain, operation, 'JSON', searchtype, **kwargs).decode())
except NotFoundError as e:
log.info(e)
return None |
def build_tqdm_outer(self, desc, total):
"""
Extension point. Override to provide custom options to outer progress bars (Epoch loop)
:param desc: Description
:param total: Number of epochs
:return: new progress bar
"""
return self.tqdm(desc=desc, total=total, leave=self.leave_outer, initial=self.initial) | Extension point. Override to provide custom options to outer progress bars (Epoch loop)
:param desc: Description
:param total: Number of epochs
:return: new progress bar | Below is the the instruction that describes the task:
### Input:
Extension point. Override to provide custom options to outer progress bars (Epoch loop)
:param desc: Description
:param total: Number of epochs
:return: new progress bar
### Response:
def build_tqdm_outer(self, desc, total):
"""
Extension point. Override to provide custom options to outer progress bars (Epoch loop)
:param desc: Description
:param total: Number of epochs
:return: new progress bar
"""
return self.tqdm(desc=desc, total=total, leave=self.leave_outer, initial=self.initial) |
def decimal_to_alpha(dec):
"""
expects: decimal between 0 and 100
returns: alpha value for rgba
"""
dec /= 100.0
alpha = hex(int(dec*65535))[2:]
while len(alpha) < 4:
alpha = '0' + alpha
return alpha | expects: decimal between 0 and 100
returns: alpha value for rgba | Below is the the instruction that describes the task:
### Input:
expects: decimal between 0 and 100
returns: alpha value for rgba
### Response:
def decimal_to_alpha(dec):
"""
expects: decimal between 0 and 100
returns: alpha value for rgba
"""
dec /= 100.0
alpha = hex(int(dec*65535))[2:]
while len(alpha) < 4:
alpha = '0' + alpha
return alpha |
def load(self, *relations):
"""
Load a set of relationships onto the collection.
"""
if len(self._items) > 0:
query = self.first().new_query().with_(*relations)
self._items = query.eager_load_relations(self._items)
return self | Load a set of relationships onto the collection. | Below is the the instruction that describes the task:
### Input:
Load a set of relationships onto the collection.
### Response:
def load(self, *relations):
"""
Load a set of relationships onto the collection.
"""
if len(self._items) > 0:
query = self.first().new_query().with_(*relations)
self._items = query.eager_load_relations(self._items)
return self |
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if len(script.upgrade_ops_list) >= len(bind_names) + 1:
empty = True
for upgrade_ops in script.upgrade_ops_list:
if not upgrade_ops.is_empty():
empty = False
if empty:
directives[:] = []
logger.info('No changes in schema detected.')
# for the direct-to-DB use case, start a transaction on all
# engines, then run all migrations, then commit all transactions.
engines = {
'': {
'engine': engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool,
)
}
}
for name in bind_names:
engines[name] = rec = {}
rec['engine'] = engine_from_config(
context.config.get_section(name),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
for name, rec in engines.items():
engine = rec['engine']
rec['connection'] = conn = engine.connect()
if USE_TWOPHASE:
rec['transaction'] = conn.begin_twophase()
else:
rec['transaction'] = conn.begin()
try:
for name, rec in engines.items():
logger.info("Migrating database %s" % (name or '<default>'))
context.configure(
connection=rec['connection'],
upgrade_token="%s_upgrades" % name,
downgrade_token="%s_downgrades" % name,
target_metadata=get_metadata(name),
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args
)
context.run_migrations(engine_name=name)
if USE_TWOPHASE:
for rec in engines.values():
rec['transaction'].prepare()
for rec in engines.values():
rec['transaction'].commit()
except:
for rec in engines.values():
rec['transaction'].rollback()
raise
finally:
for rec in engines.values():
rec['connection'].close() | Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context. | Below is the the instruction that describes the task:
### Input:
Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
### Response:
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if len(script.upgrade_ops_list) >= len(bind_names) + 1:
empty = True
for upgrade_ops in script.upgrade_ops_list:
if not upgrade_ops.is_empty():
empty = False
if empty:
directives[:] = []
logger.info('No changes in schema detected.')
# for the direct-to-DB use case, start a transaction on all
# engines, then run all migrations, then commit all transactions.
engines = {
'': {
'engine': engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool,
)
}
}
for name in bind_names:
engines[name] = rec = {}
rec['engine'] = engine_from_config(
context.config.get_section(name),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
for name, rec in engines.items():
engine = rec['engine']
rec['connection'] = conn = engine.connect()
if USE_TWOPHASE:
rec['transaction'] = conn.begin_twophase()
else:
rec['transaction'] = conn.begin()
try:
for name, rec in engines.items():
logger.info("Migrating database %s" % (name or '<default>'))
context.configure(
connection=rec['connection'],
upgrade_token="%s_upgrades" % name,
downgrade_token="%s_downgrades" % name,
target_metadata=get_metadata(name),
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args
)
context.run_migrations(engine_name=name)
if USE_TWOPHASE:
for rec in engines.values():
rec['transaction'].prepare()
for rec in engines.values():
rec['transaction'].commit()
except:
for rec in engines.values():
rec['transaction'].rollback()
raise
finally:
for rec in engines.values():
rec['connection'].close() |
def register_flag_by_module(self, module_name, flag):
"""Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: str, the name of a Python module.
flag: Flag, the Flag instance that is key to the module.
"""
flags_by_module = self.flags_by_module_dict()
flags_by_module.setdefault(module_name, []).append(flag) | Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: str, the name of a Python module.
flag: Flag, the Flag instance that is key to the module. | Below is the the instruction that describes the task:
### Input:
Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: str, the name of a Python module.
flag: Flag, the Flag instance that is key to the module.
### Response:
def register_flag_by_module(self, module_name, flag):
"""Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: str, the name of a Python module.
flag: Flag, the Flag instance that is key to the module.
"""
flags_by_module = self.flags_by_module_dict()
flags_by_module.setdefault(module_name, []).append(flag) |
def get_tables_for_query(query):
"""
Takes a Django 'query' object and returns all tables that will be used in
that query as a list. Note that where clauses can have their own
querysets with their own dependent queries, etc.
"""
from django.db.models.sql.where import WhereNode, SubqueryConstraint
from django.db.models.query import QuerySet
tables = set([v[0] for v in getattr(query,'alias_map',{}).values()])
def get_sub_query_tables(node):
query = node.query_object
if not hasattr(query, 'field_names'):
query = query.values(*node.targets)
else:
query = query._clone()
query = query.query
return set(v[0] for v in getattr(query, 'alias_map',{}).values())
def get_tables(node, tables):
if isinstance(node, SubqueryConstraint):
return get_sub_query_tables(node)
for child in node.children:
if isinstance(child, WhereNode): # and child.children:
tables |= set(get_tables(child, tables))
elif not hasattr(child, '__iter__'):
continue
else:
for item in (c for c in child if isinstance(c, QuerySet)):
tables |= get_tables_for_query(item.query)
return tables
if query.where and query.where.children:
where_nodes = [c for c in query.where.children if isinstance(c, (WhereNode, SubqueryConstraint))]
for node in where_nodes:
tables |= get_tables(node, tables)
return list(tables) | Takes a Django 'query' object and returns all tables that will be used in
that query as a list. Note that where clauses can have their own
querysets with their own dependent queries, etc. | Below is the the instruction that describes the task:
### Input:
Takes a Django 'query' object and returns all tables that will be used in
that query as a list. Note that where clauses can have their own
querysets with their own dependent queries, etc.
### Response:
def get_tables_for_query(query):
"""
Takes a Django 'query' object and returns all tables that will be used in
that query as a list. Note that where clauses can have their own
querysets with their own dependent queries, etc.
"""
from django.db.models.sql.where import WhereNode, SubqueryConstraint
from django.db.models.query import QuerySet
tables = set([v[0] for v in getattr(query,'alias_map',{}).values()])
def get_sub_query_tables(node):
query = node.query_object
if not hasattr(query, 'field_names'):
query = query.values(*node.targets)
else:
query = query._clone()
query = query.query
return set(v[0] for v in getattr(query, 'alias_map',{}).values())
def get_tables(node, tables):
if isinstance(node, SubqueryConstraint):
return get_sub_query_tables(node)
for child in node.children:
if isinstance(child, WhereNode): # and child.children:
tables |= set(get_tables(child, tables))
elif not hasattr(child, '__iter__'):
continue
else:
for item in (c for c in child if isinstance(c, QuerySet)):
tables |= get_tables_for_query(item.query)
return tables
if query.where and query.where.children:
where_nodes = [c for c in query.where.children if isinstance(c, (WhereNode, SubqueryConstraint))]
for node in where_nodes:
tables |= get_tables(node, tables)
return list(tables) |
def _general_init(self, opts, out=None):
"""
Initializes a variety of variables depending on user input.
@return: a tuple containing a boolean value indicating whether
progressbars should be hidden, functionality and enabled
functionality.
"""
self.session = Session()
if out:
self.out = out
else:
self.out = self._output(opts)
is_cms_plugin = self._meta.label != "scan"
if is_cms_plugin:
self.vf = VersionsFile(self.versions_file)
# http://stackoverflow.com/questions/23632794/in-requests-library-how-can-i-avoid-httpconnectionpool-is-full-discarding-con
try:
a = requests.adapters.HTTPAdapter(pool_maxsize=5000)
self.session.mount('http://', a)
self.session.mount('https://', a)
self.session.cookies.set_policy(BlockAll())
except AttributeError:
old_req = """Running a very old version of requests! Please `pip
install -U requests`."""
self.out.warn(old_req)
self.session.verify = False
self.session.headers['User-Agent'] = self.DEFAULT_UA
debug_requests = opts['debug_requests']
if debug_requests:
hide_progressbar = True
opts['threads_identify'] = 1
opts['threads_scan'] = 1
opts['threads_enumerate'] = 1
self.session = RequestsLogger(self.session)
else:
if opts['hide_progressbar']:
hide_progressbar = True
else:
hide_progressbar = False
functionality = self._functionality(opts)
enabled_functionality = self._enabled_functionality(functionality, opts)
return (hide_progressbar, functionality, enabled_functionality) | Initializes a variety of variables depending on user input.
@return: a tuple containing a boolean value indicating whether
progressbars should be hidden, functionality and enabled
functionality. | Below is the the instruction that describes the task:
### Input:
Initializes a variety of variables depending on user input.
@return: a tuple containing a boolean value indicating whether
progressbars should be hidden, functionality and enabled
functionality.
### Response:
def _general_init(self, opts, out=None):
"""
Initializes a variety of variables depending on user input.
@return: a tuple containing a boolean value indicating whether
progressbars should be hidden, functionality and enabled
functionality.
"""
self.session = Session()
if out:
self.out = out
else:
self.out = self._output(opts)
is_cms_plugin = self._meta.label != "scan"
if is_cms_plugin:
self.vf = VersionsFile(self.versions_file)
# http://stackoverflow.com/questions/23632794/in-requests-library-how-can-i-avoid-httpconnectionpool-is-full-discarding-con
try:
a = requests.adapters.HTTPAdapter(pool_maxsize=5000)
self.session.mount('http://', a)
self.session.mount('https://', a)
self.session.cookies.set_policy(BlockAll())
except AttributeError:
old_req = """Running a very old version of requests! Please `pip
install -U requests`."""
self.out.warn(old_req)
self.session.verify = False
self.session.headers['User-Agent'] = self.DEFAULT_UA
debug_requests = opts['debug_requests']
if debug_requests:
hide_progressbar = True
opts['threads_identify'] = 1
opts['threads_scan'] = 1
opts['threads_enumerate'] = 1
self.session = RequestsLogger(self.session)
else:
if opts['hide_progressbar']:
hide_progressbar = True
else:
hide_progressbar = False
functionality = self._functionality(opts)
enabled_functionality = self._enabled_functionality(functionality, opts)
return (hide_progressbar, functionality, enabled_functionality) |
def get_datasets(self):
# type: () -> List[hdx.data.dataset.Dataset]
"""Get any datasets in the showcase
Returns:
List[Dataset]: List of datasets
"""
assoc_result, datasets_dicts = self._read_from_hdx('showcase', self.data['id'], fieldname='showcase_id',
action=self.actions()['list_datasets'])
datasets = list()
if assoc_result:
for dataset_dict in datasets_dicts:
dataset = hdx.data.dataset.Dataset(dataset_dict, configuration=self.configuration)
datasets.append(dataset)
return datasets | Get any datasets in the showcase
Returns:
List[Dataset]: List of datasets | Below is the the instruction that describes the task:
### Input:
Get any datasets in the showcase
Returns:
List[Dataset]: List of datasets
### Response:
def get_datasets(self):
# type: () -> List[hdx.data.dataset.Dataset]
"""Get any datasets in the showcase
Returns:
List[Dataset]: List of datasets
"""
assoc_result, datasets_dicts = self._read_from_hdx('showcase', self.data['id'], fieldname='showcase_id',
action=self.actions()['list_datasets'])
datasets = list()
if assoc_result:
for dataset_dict in datasets_dicts:
dataset = hdx.data.dataset.Dataset(dataset_dict, configuration=self.configuration)
datasets.append(dataset)
return datasets |
def articles(self):
''' Tries to scrape the correct articles for singular and plural from uitmuntend.nl. '''
result = [None, None]
element = self._first('NN')
if element:
element = element.split('\r\n')[0]
if ' | ' in element:
# This means there is a plural
singular, plural = element.split(' | ')
singular, plural = singular.strip(), plural.strip()
else:
# This means there is no plural
singular, plural = element.strip(), ''
result[1] = ''
if singular:
result[0] = singular.split(' ')[0].split('/')
if plural:
result[1] = plural.split(' ')[0].split('/')
return result | Tries to scrape the correct articles for singular and plural from uitmuntend.nl. | Below is the the instruction that describes the task:
### Input:
Tries to scrape the correct articles for singular and plural from uitmuntend.nl.
### Response:
def articles(self):
''' Tries to scrape the correct articles for singular and plural from uitmuntend.nl. '''
result = [None, None]
element = self._first('NN')
if element:
element = element.split('\r\n')[0]
if ' | ' in element:
# This means there is a plural
singular, plural = element.split(' | ')
singular, plural = singular.strip(), plural.strip()
else:
# This means there is no plural
singular, plural = element.strip(), ''
result[1] = ''
if singular:
result[0] = singular.split(' ')[0].split('/')
if plural:
result[1] = plural.split(' ')[0].split('/')
return result |
def add_rel(self, rId, reltype, target, is_external=False):
"""
Add a child ``<Relationship>`` element with attributes set according
to parameter values.
"""
target_mode = RTM.EXTERNAL if is_external else RTM.INTERNAL
relationship = CT_Relationship.new(rId, reltype, target, target_mode)
self.append(relationship) | Add a child ``<Relationship>`` element with attributes set according
to parameter values. | Below is the the instruction that describes the task:
### Input:
Add a child ``<Relationship>`` element with attributes set according
to parameter values.
### Response:
def add_rel(self, rId, reltype, target, is_external=False):
"""
Add a child ``<Relationship>`` element with attributes set according
to parameter values.
"""
target_mode = RTM.EXTERNAL if is_external else RTM.INTERNAL
relationship = CT_Relationship.new(rId, reltype, target, target_mode)
self.append(relationship) |
def get_public_agents_public_ip():
"""Provides a list public IPs for public agents in the cluster"""
public_ip_list = []
agents = get_public_agents()
for agent in agents:
status, public_ip = shakedown.run_command_on_agent(agent, "/opt/mesosphere/bin/detect_ip_public")
public_ip_list.append(public_ip)
return public_ip_list | Provides a list public IPs for public agents in the cluster | Below is the the instruction that describes the task:
### Input:
Provides a list public IPs for public agents in the cluster
### Response:
def get_public_agents_public_ip():
"""Provides a list public IPs for public agents in the cluster"""
public_ip_list = []
agents = get_public_agents()
for agent in agents:
status, public_ip = shakedown.run_command_on_agent(agent, "/opt/mesosphere/bin/detect_ip_public")
public_ip_list.append(public_ip)
return public_ip_list |
def start_watching(self):
""" Begins watching etcd for changes. """
# Don't create a new watcher thread if we already have one running
if self.watcher and self.watcher.is_alive():
return
# Create a new watcher thread and start it
self.watcher = Watcher()
self.watcher.start() | Begins watching etcd for changes. | Below is the the instruction that describes the task:
### Input:
Begins watching etcd for changes.
### Response:
def start_watching(self):
""" Begins watching etcd for changes. """
# Don't create a new watcher thread if we already have one running
if self.watcher and self.watcher.is_alive():
return
# Create a new watcher thread and start it
self.watcher = Watcher()
self.watcher.start() |
def _query_iterator(cursor, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates) | Return generator through chunked result set | Below is the the instruction that describes the task:
### Input:
Return generator through chunked result set
### Response:
def _query_iterator(cursor, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates) |
def create(self, bucket, descriptor, force=False):
"""https://github.com/frictionlessdata/tableschema-pandas-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
# Check buckets for existence
for bucket in buckets:
if bucket in self.buckets:
if not force:
message = 'Bucket "%s" already exists' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Define dataframes
for bucket, descriptor in zip(buckets, descriptors):
tableschema.validate(descriptor)
self.__descriptors[bucket] = descriptor
self.__dataframes[bucket] = pd.DataFrame() | https://github.com/frictionlessdata/tableschema-pandas-py#storage | Below is the the instruction that describes the task:
### Input:
https://github.com/frictionlessdata/tableschema-pandas-py#storage
### Response:
def create(self, bucket, descriptor, force=False):
"""https://github.com/frictionlessdata/tableschema-pandas-py#storage
"""
# Make lists
buckets = bucket
if isinstance(bucket, six.string_types):
buckets = [bucket]
descriptors = descriptor
if isinstance(descriptor, dict):
descriptors = [descriptor]
# Check buckets for existence
for bucket in buckets:
if bucket in self.buckets:
if not force:
message = 'Bucket "%s" already exists' % bucket
raise tableschema.exceptions.StorageError(message)
self.delete(bucket)
# Define dataframes
for bucket, descriptor in zip(buckets, descriptors):
tableschema.validate(descriptor)
self.__descriptors[bucket] = descriptor
self.__dataframes[bucket] = pd.DataFrame() |
def find_objects(self, ObjectClass, **kwargs):
""" Retrieve all objects of type ``ObjectClass``,
matching the specified filters in ``**kwargs`` -- case sensitive.
"""
filter = None
for k, v in kwargs.items():
cond = ObjectClass.getattr(k) == v
filter = cond if filter is None else filter & cond
return ObjectClass.scan(filter) | Retrieve all objects of type ``ObjectClass``,
matching the specified filters in ``**kwargs`` -- case sensitive. | Below is the the instruction that describes the task:
### Input:
Retrieve all objects of type ``ObjectClass``,
matching the specified filters in ``**kwargs`` -- case sensitive.
### Response:
def find_objects(self, ObjectClass, **kwargs):
""" Retrieve all objects of type ``ObjectClass``,
matching the specified filters in ``**kwargs`` -- case sensitive.
"""
filter = None
for k, v in kwargs.items():
cond = ObjectClass.getattr(k) == v
filter = cond if filter is None else filter & cond
return ObjectClass.scan(filter) |
def field_stats(self, indices=''):
"""
Retrieve the field data stats for one or more indices
(See :ref:'es-guide-reference-api-admin-cluster-nodes-stats')
:keyword indices: an index or a list of indices
"""
path = self.conn._make_path(indices, (), '_stats','fielddata')
return self.conn._send_request('GET', path) | Retrieve the field data stats for one or more indices
(See :ref:'es-guide-reference-api-admin-cluster-nodes-stats')
:keyword indices: an index or a list of indices | Below is the the instruction that describes the task:
### Input:
Retrieve the field data stats for one or more indices
(See :ref:'es-guide-reference-api-admin-cluster-nodes-stats')
:keyword indices: an index or a list of indices
### Response:
def field_stats(self, indices=''):
"""
Retrieve the field data stats for one or more indices
(See :ref:'es-guide-reference-api-admin-cluster-nodes-stats')
:keyword indices: an index or a list of indices
"""
path = self.conn._make_path(indices, (), '_stats','fielddata')
return self.conn._send_request('GET', path) |
def UpsertUserDefinedFunction(self, collection_link, udf, options=None):
"""Upserts a user defined function in a collection.
:param str collection_link:
The link to the collection.
:param str udf:
:param dict options:
The request options for the request.
:return:
The upserted UDF.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, udf = self._GetContainerIdWithPathForUDF(collection_link, udf)
return self.Upsert(udf,
path,
'udfs',
collection_id,
None,
options) | Upserts a user defined function in a collection.
:param str collection_link:
The link to the collection.
:param str udf:
:param dict options:
The request options for the request.
:return:
The upserted UDF.
:rtype:
dict | Below is the the instruction that describes the task:
### Input:
Upserts a user defined function in a collection.
:param str collection_link:
The link to the collection.
:param str udf:
:param dict options:
The request options for the request.
:return:
The upserted UDF.
:rtype:
dict
### Response:
def UpsertUserDefinedFunction(self, collection_link, udf, options=None):
"""Upserts a user defined function in a collection.
:param str collection_link:
The link to the collection.
:param str udf:
:param dict options:
The request options for the request.
:return:
The upserted UDF.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, udf = self._GetContainerIdWithPathForUDF(collection_link, udf)
return self.Upsert(udf,
path,
'udfs',
collection_id,
None,
options) |
def getShutdownArgs(self):
"""return command line arguments for shutting down the
server; this command line is built from the name server
startup arguments."""
shutdownArgs = []
if self.host:
shutdownArgs += ['-h', self.host]
if self.bcport:
shutdownArgs += ['-p', self.bcport]
if self.bcaddr:
shutdownArgs += ['-c', self.bcaddr]
if self.identification:
shutdownArgs += ['-i', self.identification]
return shutdownArgs | return command line arguments for shutting down the
server; this command line is built from the name server
startup arguments. | Below is the the instruction that describes the task:
### Input:
return command line arguments for shutting down the
server; this command line is built from the name server
startup arguments.
### Response:
def getShutdownArgs(self):
"""return command line arguments for shutting down the
server; this command line is built from the name server
startup arguments."""
shutdownArgs = []
if self.host:
shutdownArgs += ['-h', self.host]
if self.bcport:
shutdownArgs += ['-p', self.bcport]
if self.bcaddr:
shutdownArgs += ['-c', self.bcaddr]
if self.identification:
shutdownArgs += ['-i', self.identification]
return shutdownArgs |
def ckinv(self,oo):
""" check the value is date or not
檢查是否為日期格式
"""
pattern = re.compile(r"[0-9]{2}/[0-9]{2}/[0-9]{2}")
b = re.search(pattern, oo[0])
try:
b.group()
return True
except:
return False | check the value is date or not
檢查是否為日期格式 | Below is the the instruction that describes the task:
### Input:
check the value is date or not
檢查是否為日期格式
### Response:
def ckinv(self,oo):
""" check the value is date or not
檢查是否為日期格式
"""
pattern = re.compile(r"[0-9]{2}/[0-9]{2}/[0-9]{2}")
b = re.search(pattern, oo[0])
try:
b.group()
return True
except:
return False |
def rows_to_dicts(self, serialize_cell=None):
"""Generates a sequence of dictionaries of {header[i] => row[i]} for each row."""
if serialize_cell is None:
serialize_cell = self.get_cell_value
# keys = [serialize_cell(cell) for cell in self.rows[0]]
keys = self.headers(serialize_cell)
for row in self.rows[1:]:
yield dict(zip(keys, [serialize_cell(cell) for cell in row])) | Generates a sequence of dictionaries of {header[i] => row[i]} for each row. | Below is the the instruction that describes the task:
### Input:
Generates a sequence of dictionaries of {header[i] => row[i]} for each row.
### Response:
def rows_to_dicts(self, serialize_cell=None):
"""Generates a sequence of dictionaries of {header[i] => row[i]} for each row."""
if serialize_cell is None:
serialize_cell = self.get_cell_value
# keys = [serialize_cell(cell) for cell in self.rows[0]]
keys = self.headers(serialize_cell)
for row in self.rows[1:]:
yield dict(zip(keys, [serialize_cell(cell) for cell in row])) |
def get_datatype(self, table: str, column: str) -> str:
"""Returns database SQL datatype for a column: e.g. VARCHAR."""
return self.flavour.get_datatype(self, table, column).upper() | Returns database SQL datatype for a column: e.g. VARCHAR. | Below is the the instruction that describes the task:
### Input:
Returns database SQL datatype for a column: e.g. VARCHAR.
### Response:
def get_datatype(self, table: str, column: str) -> str:
"""Returns database SQL datatype for a column: e.g. VARCHAR."""
return self.flavour.get_datatype(self, table, column).upper() |
def vb_wait_for_session_state(xp_session, state='Unlocked', timeout=10, step=None):
'''
Waits until a session state has been reached, checking at regular intervals.
@param xp_session:
@type xp_session: ISession from the Virtualbox API
@param state: The constant descriptor according to the docs
@type state: str
@param timeout: in seconds
@type timeout: int | float
@param step: Intervals at which the value is checked
@type step: int | float
@return: Did we reach the state?
@rtype: bool
'''
args = (xp_session, state)
wait_for(_check_session_state, timeout=timeout, step=step, default=False, func_args=args) | Waits until a session state has been reached, checking at regular intervals.
@param xp_session:
@type xp_session: ISession from the Virtualbox API
@param state: The constant descriptor according to the docs
@type state: str
@param timeout: in seconds
@type timeout: int | float
@param step: Intervals at which the value is checked
@type step: int | float
@return: Did we reach the state?
@rtype: bool | Below is the the instruction that describes the task:
### Input:
Waits until a session state has been reached, checking at regular intervals.
@param xp_session:
@type xp_session: ISession from the Virtualbox API
@param state: The constant descriptor according to the docs
@type state: str
@param timeout: in seconds
@type timeout: int | float
@param step: Intervals at which the value is checked
@type step: int | float
@return: Did we reach the state?
@rtype: bool
### Response:
def vb_wait_for_session_state(xp_session, state='Unlocked', timeout=10, step=None):
'''
Waits until a session state has been reached, checking at regular intervals.
@param xp_session:
@type xp_session: ISession from the Virtualbox API
@param state: The constant descriptor according to the docs
@type state: str
@param timeout: in seconds
@type timeout: int | float
@param step: Intervals at which the value is checked
@type step: int | float
@return: Did we reach the state?
@rtype: bool
'''
args = (xp_session, state)
wait_for(_check_session_state, timeout=timeout, step=step, default=False, func_args=args) |
def API520_SH(T1, P1):
r'''Calculates correction due to steam superheat for steam flow for use in
API 520 relief valve sizing. 2D interpolation among a table with 28
pressures and 10 temperatures is performed.
Parameters
----------
T1 : float
Temperature of the fluid entering the valve [K]
P1 : float
Upstream relieving pressure; the set pressure plus the allowable
overpressure, plus atmospheric pressure, [Pa]
Returns
-------
KSH : float
Correction due to steam superheat [-]
Notes
-----
For P above 20679 kPag, use the critical flow model.
Superheat cannot be above 649 degrees Celsius.
If T1 is above 149 degrees Celsius, returns 1.
Examples
--------
Custom example from table 9:
>>> API520_SH(593+273.15, 1066.325E3)
0.7201800000000002
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
'''
if P1 > 20780325.0: # 20679E3+atm
raise Exception('For P above 20679 kPag, use the critical flow model')
if T1 > 922.15:
raise Exception('Superheat cannot be above 649 degrees Celcius')
if T1 < 422.15:
return 1. # No superheat under 15 psig
return float(bisplev(T1, P1, API520_KSH_tck)) | r'''Calculates correction due to steam superheat for steam flow for use in
API 520 relief valve sizing. 2D interpolation among a table with 28
pressures and 10 temperatures is performed.
Parameters
----------
T1 : float
Temperature of the fluid entering the valve [K]
P1 : float
Upstream relieving pressure; the set pressure plus the allowable
overpressure, plus atmospheric pressure, [Pa]
Returns
-------
KSH : float
Correction due to steam superheat [-]
Notes
-----
For P above 20679 kPag, use the critical flow model.
Superheat cannot be above 649 degrees Celsius.
If T1 is above 149 degrees Celsius, returns 1.
Examples
--------
Custom example from table 9:
>>> API520_SH(593+273.15, 1066.325E3)
0.7201800000000002
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection. | Below is the the instruction that describes the task:
### Input:
r'''Calculates correction due to steam superheat for steam flow for use in
API 520 relief valve sizing. 2D interpolation among a table with 28
pressures and 10 temperatures is performed.
Parameters
----------
T1 : float
Temperature of the fluid entering the valve [K]
P1 : float
Upstream relieving pressure; the set pressure plus the allowable
overpressure, plus atmospheric pressure, [Pa]
Returns
-------
KSH : float
Correction due to steam superheat [-]
Notes
-----
For P above 20679 kPag, use the critical flow model.
Superheat cannot be above 649 degrees Celsius.
If T1 is above 149 degrees Celsius, returns 1.
Examples
--------
Custom example from table 9:
>>> API520_SH(593+273.15, 1066.325E3)
0.7201800000000002
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
### Response:
def API520_SH(T1, P1):
r'''Calculates correction due to steam superheat for steam flow for use in
API 520 relief valve sizing. 2D interpolation among a table with 28
pressures and 10 temperatures is performed.
Parameters
----------
T1 : float
Temperature of the fluid entering the valve [K]
P1 : float
Upstream relieving pressure; the set pressure plus the allowable
overpressure, plus atmospheric pressure, [Pa]
Returns
-------
KSH : float
Correction due to steam superheat [-]
Notes
-----
For P above 20679 kPag, use the critical flow model.
Superheat cannot be above 649 degrees Celsius.
If T1 is above 149 degrees Celsius, returns 1.
Examples
--------
Custom example from table 9:
>>> API520_SH(593+273.15, 1066.325E3)
0.7201800000000002
References
----------
.. [1] API Standard 520, Part 1 - Sizing and Selection.
'''
if P1 > 20780325.0: # 20679E3+atm
raise Exception('For P above 20679 kPag, use the critical flow model')
if T1 > 922.15:
raise Exception('Superheat cannot be above 649 degrees Celcius')
if T1 < 422.15:
return 1. # No superheat under 15 psig
return float(bisplev(T1, P1, API520_KSH_tck)) |
def get_entity_meta(self, datastream):
"""
To add entity meta data to a datastream
:param datastream: string
:param options: dict
"""
url = '/datastream/' + str(datastream) + '/entityMeta'
response = self.http.get(url)
entityMetaList = []
for entityMeta in response:
entityMetaList.append(Schemas.EntityMeta(entityMeta=entityMeta))
return entityMetaList | To add entity meta data to a datastream
:param datastream: string
:param options: dict | Below is the the instruction that describes the task:
### Input:
To add entity meta data to a datastream
:param datastream: string
:param options: dict
### Response:
def get_entity_meta(self, datastream):
"""
To add entity meta data to a datastream
:param datastream: string
:param options: dict
"""
url = '/datastream/' + str(datastream) + '/entityMeta'
response = self.http.get(url)
entityMetaList = []
for entityMeta in response:
entityMetaList.append(Schemas.EntityMeta(entityMeta=entityMeta))
return entityMetaList |
def __cutline(self, oiraw):
'''对单行进行分词,这段函数包含前处理preprogress.py以及一系列后处理,将分词结果返回为一个map'''
oiraw = decode(oiraw)
vec = []
if(len(oiraw) < self.__maxLength):
vec.append(oiraw)
else:
vec = self.__cutRaw(oiraw, self.__maxLength)
ans = []
for oiraw in vec:
if(self.__useT2S):
traw, __poc_cands = self.__preprocesserpreprocesser.clean(oiraw)
raw = self.__preprocesserpreprocesser.T2S(traw)
else:
raw, __poc_cands = self.__preprocesserpreprocesser.clean(oiraw)
# raw = oiraw
if(len(raw) > 0):
if(self.__seg_only):
tmp, tagged = self.__cws_tagging_decoder.segmentTag(raw, __poc_cands)
segged = self.__cws_tagging_decoder.get_seg_result()
if(self.__userDict is not None):
self.__userDict.adjustSeg(segged)
if(self.__use_filter):
self.__myfilter.adjustSeg(segged)
self.__nsDict.adjustSeg(segged)
self.__idiomDict.adjustSeg(segged)
self.__timeword.adjustSeg(segged)
self.__punctuation.adjustSeg(segged)
ans.extend(segged)
# return list(map(lambda x: encode(x), segged))
else:
tmp, tagged = self.__tagging_decoder.segmentTag(raw, __poc_cands)
if(self.__userDict is not None):
self.__userDict.adjustTag(tagged)
if(self.__use_filter):
self.__myfilter.adjustTag(tagged)
self.__nsDict.adjustTag(tagged)
self.__idiomDict.adjustTag(tagged)
self.__timeword.adjustTag(tagged)
self.__punctuation.adjustTag(tagged)
ans.extend(tagged)
if(self.__seg_only):
return map(lambda x: encode(x), ans)
else:
return map(lambda x: (encode(x[0]), encode(x[1]), encode(x[2])), ans) | 对单行进行分词,这段函数包含前处理preprogress.py以及一系列后处理,将分词结果返回为一个map | Below is the the instruction that describes the task:
### Input:
对单行进行分词,这段函数包含前处理preprogress.py以及一系列后处理,将分词结果返回为一个map
### Response:
def __cutline(self, oiraw):
'''对单行进行分词,这段函数包含前处理preprogress.py以及一系列后处理,将分词结果返回为一个map'''
oiraw = decode(oiraw)
vec = []
if(len(oiraw) < self.__maxLength):
vec.append(oiraw)
else:
vec = self.__cutRaw(oiraw, self.__maxLength)
ans = []
for oiraw in vec:
if(self.__useT2S):
traw, __poc_cands = self.__preprocesserpreprocesser.clean(oiraw)
raw = self.__preprocesserpreprocesser.T2S(traw)
else:
raw, __poc_cands = self.__preprocesserpreprocesser.clean(oiraw)
# raw = oiraw
if(len(raw) > 0):
if(self.__seg_only):
tmp, tagged = self.__cws_tagging_decoder.segmentTag(raw, __poc_cands)
segged = self.__cws_tagging_decoder.get_seg_result()
if(self.__userDict is not None):
self.__userDict.adjustSeg(segged)
if(self.__use_filter):
self.__myfilter.adjustSeg(segged)
self.__nsDict.adjustSeg(segged)
self.__idiomDict.adjustSeg(segged)
self.__timeword.adjustSeg(segged)
self.__punctuation.adjustSeg(segged)
ans.extend(segged)
# return list(map(lambda x: encode(x), segged))
else:
tmp, tagged = self.__tagging_decoder.segmentTag(raw, __poc_cands)
if(self.__userDict is not None):
self.__userDict.adjustTag(tagged)
if(self.__use_filter):
self.__myfilter.adjustTag(tagged)
self.__nsDict.adjustTag(tagged)
self.__idiomDict.adjustTag(tagged)
self.__timeword.adjustTag(tagged)
self.__punctuation.adjustTag(tagged)
ans.extend(tagged)
if(self.__seg_only):
return map(lambda x: encode(x), ans)
else:
return map(lambda x: (encode(x[0]), encode(x[1]), encode(x[2])), ans) |
def addup_fluxes(self):
"""Add up the sum of the fluxes calculated so far.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> fluxes.fastaccess._q_sum = 1.0
>>> fluxes.q(2.0)
>>> model.addup_fluxes()
>>> fluxes.fastaccess._q_sum
3.0
"""
fluxes = self.sequences.fluxes
for flux in fluxes.numerics:
sum_ = getattr(fluxes.fastaccess, '_%s_sum' % flux.name)
sum_ += flux
if flux.NDIM == 0:
setattr(fluxes.fastaccess, '_%s_sum' % flux.name, sum_) | Add up the sum of the fluxes calculated so far.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> fluxes.fastaccess._q_sum = 1.0
>>> fluxes.q(2.0)
>>> model.addup_fluxes()
>>> fluxes.fastaccess._q_sum
3.0 | Below is the the instruction that describes the task:
### Input:
Add up the sum of the fluxes calculated so far.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> fluxes.fastaccess._q_sum = 1.0
>>> fluxes.q(2.0)
>>> model.addup_fluxes()
>>> fluxes.fastaccess._q_sum
3.0
### Response:
def addup_fluxes(self):
"""Add up the sum of the fluxes calculated so far.
>>> from hydpy.models.test_v1 import *
>>> parameterstep()
>>> fluxes.fastaccess._q_sum = 1.0
>>> fluxes.q(2.0)
>>> model.addup_fluxes()
>>> fluxes.fastaccess._q_sum
3.0
"""
fluxes = self.sequences.fluxes
for flux in fluxes.numerics:
sum_ = getattr(fluxes.fastaccess, '_%s_sum' % flux.name)
sum_ += flux
if flux.NDIM == 0:
setattr(fluxes.fastaccess, '_%s_sum' % flux.name, sum_) |
def moveTo(self, newX=0, newY=0):
"""!
\~english
Move vertex of rectangles to new point (x,y)
@param newX: Coordinated X value
@param newY: Coordinated Y value
\~chinese
移动矩形到新坐标点 (x,y)
@param newX: 坐标 X
@param newY: 坐标 Y
"""
self.x = newX
self.y = newY | !
\~english
Move vertex of rectangles to new point (x,y)
@param newX: Coordinated X value
@param newY: Coordinated Y value
\~chinese
移动矩形到新坐标点 (x,y)
@param newX: 坐标 X
@param newY: 坐标 Y | Below is the the instruction that describes the task:
### Input:
!
\~english
Move vertex of rectangles to new point (x,y)
@param newX: Coordinated X value
@param newY: Coordinated Y value
\~chinese
移动矩形到新坐标点 (x,y)
@param newX: 坐标 X
@param newY: 坐标 Y
### Response:
def moveTo(self, newX=0, newY=0):
"""!
\~english
Move vertex of rectangles to new point (x,y)
@param newX: Coordinated X value
@param newY: Coordinated Y value
\~chinese
移动矩形到新坐标点 (x,y)
@param newX: 坐标 X
@param newY: 坐标 Y
"""
self.x = newX
self.y = newY |
def scalarcoords(self):
"""A dictionary of values that don't label any axes (point-like)."""
return {k: v.values for k, v in self.coords.items() if v.dims==()} | A dictionary of values that don't label any axes (point-like). | Below is the the instruction that describes the task:
### Input:
A dictionary of values that don't label any axes (point-like).
### Response:
def scalarcoords(self):
"""A dictionary of values that don't label any axes (point-like)."""
return {k: v.values for k, v in self.coords.items() if v.dims==()} |
def _run_single(self, thread_id, agent, environment, deterministic=False,
max_episode_timesteps=-1, episode_finished=None, testing=False, sleep=None):
"""
The target function for a thread, runs an agent and environment until signaled to stop.
Adds rewards to shared episode rewards list.
Args:
thread_id (int): The ID of the thread that's running this target function.
agent (Agent): The Agent object that this particular thread uses.
environment (Environment): The Environment object that this particular thread uses.
max_episode_timesteps (int): Max. number of timesteps per episode. Use -1 or 0 for non-limited episodes.
episode_finished (callable): Function called after each episode that takes an episode summary spec and
returns False, if this single run should terminate after this episode.
Can be used e.g. to set a particular mean reward threshold.
"""
# figure out whether we are using the deprecated way of "episode_finished" reporting
old_episode_finished = False
if episode_finished is not None and len(getargspec(episode_finished).args) == 1:
old_episode_finished = True
episode = 0
# Run this single worker (episode loop) as long as global count thresholds have not been reached.
while not self.should_stop:
state = environment.reset()
agent.reset()
self.global_timestep, self.global_episode = agent.timestep, agent.episode
episode_reward = 0
# Time step (within episode) loop
time_step = 0
time_start = time.time()
while True:
action, internals, states = agent.act(states=state, deterministic=deterministic, buffered=False)
reward = 0
for repeat in xrange(self.repeat_actions):
state, terminal, step_reward = environment.execute(action=action)
reward += step_reward
if terminal:
break
if not testing:
# agent.observe(reward=reward, terminal=terminal)
# Insert everything at once.
agent.atomic_observe(
states=state,
actions=action,
internals=internals,
reward=reward,
terminal=terminal
)
if sleep is not None:
time.sleep(sleep)
time_step += 1
episode_reward += reward
if terminal or time_step == max_episode_timesteps:
break
# Abort the episode (discard its results) when global says so.
if self.should_stop:
return
self.global_timestep += time_step
# Avoid race condition where order in episode_rewards won't match order in episode_timesteps.
self.episode_list_lock.acquire()
self.episode_rewards.append(episode_reward)
self.episode_timesteps.append(time_step)
self.episode_times.append(time.time() - time_start)
self.episode_list_lock.release()
if episode_finished is not None:
# old way of calling episode_finished
if old_episode_finished:
summary_data = {
"thread_id": thread_id,
"episode": episode,
"timestep": time_step,
"episode_reward": episode_reward
}
if not episode_finished(summary_data):
return
# New way with BasicRunner (self) and thread-id.
elif not episode_finished(self, thread_id):
return
episode += 1 | The target function for a thread, runs an agent and environment until signaled to stop.
Adds rewards to shared episode rewards list.
Args:
thread_id (int): The ID of the thread that's running this target function.
agent (Agent): The Agent object that this particular thread uses.
environment (Environment): The Environment object that this particular thread uses.
max_episode_timesteps (int): Max. number of timesteps per episode. Use -1 or 0 for non-limited episodes.
episode_finished (callable): Function called after each episode that takes an episode summary spec and
returns False, if this single run should terminate after this episode.
Can be used e.g. to set a particular mean reward threshold. | Below is the the instruction that describes the task:
### Input:
The target function for a thread, runs an agent and environment until signaled to stop.
Adds rewards to shared episode rewards list.
Args:
thread_id (int): The ID of the thread that's running this target function.
agent (Agent): The Agent object that this particular thread uses.
environment (Environment): The Environment object that this particular thread uses.
max_episode_timesteps (int): Max. number of timesteps per episode. Use -1 or 0 for non-limited episodes.
episode_finished (callable): Function called after each episode that takes an episode summary spec and
returns False, if this single run should terminate after this episode.
Can be used e.g. to set a particular mean reward threshold.
### Response:
def _run_single(self, thread_id, agent, environment, deterministic=False,
max_episode_timesteps=-1, episode_finished=None, testing=False, sleep=None):
"""
The target function for a thread, runs an agent and environment until signaled to stop.
Adds rewards to shared episode rewards list.
Args:
thread_id (int): The ID of the thread that's running this target function.
agent (Agent): The Agent object that this particular thread uses.
environment (Environment): The Environment object that this particular thread uses.
max_episode_timesteps (int): Max. number of timesteps per episode. Use -1 or 0 for non-limited episodes.
episode_finished (callable): Function called after each episode that takes an episode summary spec and
returns False, if this single run should terminate after this episode.
Can be used e.g. to set a particular mean reward threshold.
"""
# figure out whether we are using the deprecated way of "episode_finished" reporting
old_episode_finished = False
if episode_finished is not None and len(getargspec(episode_finished).args) == 1:
old_episode_finished = True
episode = 0
# Run this single worker (episode loop) as long as global count thresholds have not been reached.
while not self.should_stop:
state = environment.reset()
agent.reset()
self.global_timestep, self.global_episode = agent.timestep, agent.episode
episode_reward = 0
# Time step (within episode) loop
time_step = 0
time_start = time.time()
while True:
action, internals, states = agent.act(states=state, deterministic=deterministic, buffered=False)
reward = 0
for repeat in xrange(self.repeat_actions):
state, terminal, step_reward = environment.execute(action=action)
reward += step_reward
if terminal:
break
if not testing:
# agent.observe(reward=reward, terminal=terminal)
# Insert everything at once.
agent.atomic_observe(
states=state,
actions=action,
internals=internals,
reward=reward,
terminal=terminal
)
if sleep is not None:
time.sleep(sleep)
time_step += 1
episode_reward += reward
if terminal or time_step == max_episode_timesteps:
break
# Abort the episode (discard its results) when global says so.
if self.should_stop:
return
self.global_timestep += time_step
# Avoid race condition where order in episode_rewards won't match order in episode_timesteps.
self.episode_list_lock.acquire()
self.episode_rewards.append(episode_reward)
self.episode_timesteps.append(time_step)
self.episode_times.append(time.time() - time_start)
self.episode_list_lock.release()
if episode_finished is not None:
# old way of calling episode_finished
if old_episode_finished:
summary_data = {
"thread_id": thread_id,
"episode": episode,
"timestep": time_step,
"episode_reward": episode_reward
}
if not episode_finished(summary_data):
return
# New way with BasicRunner (self) and thread-id.
elif not episode_finished(self, thread_id):
return
episode += 1 |
def bsp_traverse_in_order(
node: tcod.bsp.BSP,
callback: Callable[[tcod.bsp.BSP, Any], None],
userData: Any = 0,
) -> None:
"""Traverse this nodes hierarchy with a callback.
.. deprecated:: 2.0
Use :any:`BSP.in_order` instead.
"""
_bsp_traverse(node.in_order(), callback, userData) | Traverse this nodes hierarchy with a callback.
.. deprecated:: 2.0
Use :any:`BSP.in_order` instead. | Below is the the instruction that describes the task:
### Input:
Traverse this nodes hierarchy with a callback.
.. deprecated:: 2.0
Use :any:`BSP.in_order` instead.
### Response:
def bsp_traverse_in_order(
node: tcod.bsp.BSP,
callback: Callable[[tcod.bsp.BSP, Any], None],
userData: Any = 0,
) -> None:
"""Traverse this nodes hierarchy with a callback.
.. deprecated:: 2.0
Use :any:`BSP.in_order` instead.
"""
_bsp_traverse(node.in_order(), callback, userData) |
def get_intra_edges(self, time_slice=0):
"""
Returns the intra slice edges present in the 2-TBN.
Parameter
---------
time_slice: int (whole number)
The time slice for which to get intra edges. The timeslice
should be a positive value or zero.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L'])
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0)),
... (('G', 0), ('L', 0)), (('D', 0), ('D', 1)),
... (('I', 0), ('I', 1)), (('G', 0), ('G', 1)),
... (('G', 0), ('L', 1)), (('L', 0), ('L', 1))])
>>> dbn.get_intra_edges()
[(('D', 0), ('G', 0)), (('G', 0), ('L', 0)), (('I', 0), ('G', 0))
"""
if not isinstance(time_slice, int) or time_slice < 0:
raise ValueError("The timeslice should be a positive value greater than or equal to zero")
return [tuple((x[0], time_slice) for x in edge) for edge in self.edges() if edge[0][1] == edge[1][1] == 0] | Returns the intra slice edges present in the 2-TBN.
Parameter
---------
time_slice: int (whole number)
The time slice for which to get intra edges. The timeslice
should be a positive value or zero.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L'])
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0)),
... (('G', 0), ('L', 0)), (('D', 0), ('D', 1)),
... (('I', 0), ('I', 1)), (('G', 0), ('G', 1)),
... (('G', 0), ('L', 1)), (('L', 0), ('L', 1))])
>>> dbn.get_intra_edges()
[(('D', 0), ('G', 0)), (('G', 0), ('L', 0)), (('I', 0), ('G', 0)) | Below is the the instruction that describes the task:
### Input:
Returns the intra slice edges present in the 2-TBN.
Parameter
---------
time_slice: int (whole number)
The time slice for which to get intra edges. The timeslice
should be a positive value or zero.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L'])
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0)),
... (('G', 0), ('L', 0)), (('D', 0), ('D', 1)),
... (('I', 0), ('I', 1)), (('G', 0), ('G', 1)),
... (('G', 0), ('L', 1)), (('L', 0), ('L', 1))])
>>> dbn.get_intra_edges()
[(('D', 0), ('G', 0)), (('G', 0), ('L', 0)), (('I', 0), ('G', 0))
### Response:
def get_intra_edges(self, time_slice=0):
"""
Returns the intra slice edges present in the 2-TBN.
Parameter
---------
time_slice: int (whole number)
The time slice for which to get intra edges. The timeslice
should be a positive value or zero.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L'])
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0)),
... (('G', 0), ('L', 0)), (('D', 0), ('D', 1)),
... (('I', 0), ('I', 1)), (('G', 0), ('G', 1)),
... (('G', 0), ('L', 1)), (('L', 0), ('L', 1))])
>>> dbn.get_intra_edges()
[(('D', 0), ('G', 0)), (('G', 0), ('L', 0)), (('I', 0), ('G', 0))
"""
if not isinstance(time_slice, int) or time_slice < 0:
raise ValueError("The timeslice should be a positive value greater than or equal to zero")
return [tuple((x[0], time_slice) for x in edge) for edge in self.edges() if edge[0][1] == edge[1][1] == 0] |
def del_register_user(self, register_user):
"""
Deletes registration object from database
:param register_user: RegisterUser object to delete
"""
try:
self.get_session.delete(register_user)
self.get_session.commit()
return True
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_REGISTER_USER.format(str(e)))
self.get_session.rollback()
return False | Deletes registration object from database
:param register_user: RegisterUser object to delete | Below is the the instruction that describes the task:
### Input:
Deletes registration object from database
:param register_user: RegisterUser object to delete
### Response:
def del_register_user(self, register_user):
"""
Deletes registration object from database
:param register_user: RegisterUser object to delete
"""
try:
self.get_session.delete(register_user)
self.get_session.commit()
return True
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_DEL_REGISTER_USER.format(str(e)))
self.get_session.rollback()
return False |
def _write_flush(self, fd, payload=None):
"""Write a payload to a given fd (if provided) and flush the fd."""
try:
if payload:
fd.write(ensure_binary(payload))
fd.flush()
except (IOError, OSError) as e:
# If a `Broken Pipe` is encountered during a stdio fd write, we're headless - bail.
if e.errno == errno.EPIPE and self._exit_on_broken_pipe:
sys.exit()
# Otherwise, re-raise.
raise | Write a payload to a given fd (if provided) and flush the fd. | Below is the the instruction that describes the task:
### Input:
Write a payload to a given fd (if provided) and flush the fd.
### Response:
def _write_flush(self, fd, payload=None):
"""Write a payload to a given fd (if provided) and flush the fd."""
try:
if payload:
fd.write(ensure_binary(payload))
fd.flush()
except (IOError, OSError) as e:
# If a `Broken Pipe` is encountered during a stdio fd write, we're headless - bail.
if e.errno == errno.EPIPE and self._exit_on_broken_pipe:
sys.exit()
# Otherwise, re-raise.
raise |
def convert(self, args, handler=None):
"""Prepare filters."""
name = args
field = attr = None
opts = ()
if isinstance(args, (list, tuple)):
name, *opts = args
if opts:
attr = opts.pop()
if opts:
field = opts.pop()
if not field and handler and handler.Schema:
field = handler.Schema._declared_fields.get(attr or name) or \
self.FILTER_CLASS.field_cls()
field.attribute = field.attribute or attr or name
return self.FILTER_CLASS(name, attr=attr, field=field, *opts) | Prepare filters. | Below is the the instruction that describes the task:
### Input:
Prepare filters.
### Response:
def convert(self, args, handler=None):
"""Prepare filters."""
name = args
field = attr = None
opts = ()
if isinstance(args, (list, tuple)):
name, *opts = args
if opts:
attr = opts.pop()
if opts:
field = opts.pop()
if not field and handler and handler.Schema:
field = handler.Schema._declared_fields.get(attr or name) or \
self.FILTER_CLASS.field_cls()
field.attribute = field.attribute or attr or name
return self.FILTER_CLASS(name, attr=attr, field=field, *opts) |
def submit(self, map, method, postfix):
'''Realiza um requisição HTTP para a networkAPI.
:param map: Dicionário com os dados para gerar o XML enviado no corpo da requisição HTTP.
:param method: Método da requisição HTTP ('GET', 'POST', 'PUT' ou 'DELETE').
:param postfix: Posfixo a ser colocado na URL básica de acesso à networkAPI. Ex: /ambiente
:return: Tupla com o código e o corpo da resposta HTTP:
(< codigo>, < descricao>)
:raise NetworkAPIClientError: Erro durante a chamada HTTP para acesso à networkAPI.
'''
try:
rest_request = RestRequest(
self.get_url(postfix),
method,
self.user,
self.password,
self.user_ldap)
return rest_request.submit(map)
except RestError as e:
raise ErrorHandler.handle(None, str(e)) | Realiza um requisição HTTP para a networkAPI.
:param map: Dicionário com os dados para gerar o XML enviado no corpo da requisição HTTP.
:param method: Método da requisição HTTP ('GET', 'POST', 'PUT' ou 'DELETE').
:param postfix: Posfixo a ser colocado na URL básica de acesso à networkAPI. Ex: /ambiente
:return: Tupla com o código e o corpo da resposta HTTP:
(< codigo>, < descricao>)
:raise NetworkAPIClientError: Erro durante a chamada HTTP para acesso à networkAPI. | Below is the the instruction that describes the task:
### Input:
Realiza um requisição HTTP para a networkAPI.
:param map: Dicionário com os dados para gerar o XML enviado no corpo da requisição HTTP.
:param method: Método da requisição HTTP ('GET', 'POST', 'PUT' ou 'DELETE').
:param postfix: Posfixo a ser colocado na URL básica de acesso à networkAPI. Ex: /ambiente
:return: Tupla com o código e o corpo da resposta HTTP:
(< codigo>, < descricao>)
:raise NetworkAPIClientError: Erro durante a chamada HTTP para acesso à networkAPI.
### Response:
def submit(self, map, method, postfix):
'''Realiza um requisição HTTP para a networkAPI.
:param map: Dicionário com os dados para gerar o XML enviado no corpo da requisição HTTP.
:param method: Método da requisição HTTP ('GET', 'POST', 'PUT' ou 'DELETE').
:param postfix: Posfixo a ser colocado na URL básica de acesso à networkAPI. Ex: /ambiente
:return: Tupla com o código e o corpo da resposta HTTP:
(< codigo>, < descricao>)
:raise NetworkAPIClientError: Erro durante a chamada HTTP para acesso à networkAPI.
'''
try:
rest_request = RestRequest(
self.get_url(postfix),
method,
self.user,
self.password,
self.user_ldap)
return rest_request.submit(map)
except RestError as e:
raise ErrorHandler.handle(None, str(e)) |
def jieba_tokenize(text, external_wordlist=False):
"""
Tokenize the given text into tokens whose word frequencies can probably
be looked up. This uses Jieba, a word-frequency-based tokenizer.
If `external_wordlist` is False, we tell Jieba to default to using
wordfreq's own Chinese wordlist, and not to infer unknown words using a
hidden Markov model. This ensures that the multi-character tokens that it
outputs will be ones whose word frequencies we can look up.
If `external_wordlist` is True, this will use the largest version of
Jieba's original dictionary, with HMM enabled, so its results will be
independent of the data in wordfreq. These results will be better optimized
for purposes that aren't looking up word frequencies, such as general-
purpose tokenization, or collecting word frequencies in the first place.
"""
global jieba_tokenizer, jieba_orig_tokenizer
if external_wordlist:
if jieba_orig_tokenizer is None:
jieba_orig_tokenizer = jieba.Tokenizer(dictionary=ORIG_DICT_FILENAME)
return jieba_orig_tokenizer.lcut(text)
else:
if jieba_tokenizer is None:
jieba_tokenizer = jieba.Tokenizer(dictionary=DICT_FILENAME)
# Tokenize the Simplified Chinese version of the text, but return
# those spans from the original text, even if it's in Traditional
# Chinese
tokens = []
for _token, start, end in jieba_tokenizer.tokenize(simplify_chinese(text), HMM=False):
tokens.append(text[start:end])
return tokens | Tokenize the given text into tokens whose word frequencies can probably
be looked up. This uses Jieba, a word-frequency-based tokenizer.
If `external_wordlist` is False, we tell Jieba to default to using
wordfreq's own Chinese wordlist, and not to infer unknown words using a
hidden Markov model. This ensures that the multi-character tokens that it
outputs will be ones whose word frequencies we can look up.
If `external_wordlist` is True, this will use the largest version of
Jieba's original dictionary, with HMM enabled, so its results will be
independent of the data in wordfreq. These results will be better optimized
for purposes that aren't looking up word frequencies, such as general-
purpose tokenization, or collecting word frequencies in the first place. | Below is the the instruction that describes the task:
### Input:
Tokenize the given text into tokens whose word frequencies can probably
be looked up. This uses Jieba, a word-frequency-based tokenizer.
If `external_wordlist` is False, we tell Jieba to default to using
wordfreq's own Chinese wordlist, and not to infer unknown words using a
hidden Markov model. This ensures that the multi-character tokens that it
outputs will be ones whose word frequencies we can look up.
If `external_wordlist` is True, this will use the largest version of
Jieba's original dictionary, with HMM enabled, so its results will be
independent of the data in wordfreq. These results will be better optimized
for purposes that aren't looking up word frequencies, such as general-
purpose tokenization, or collecting word frequencies in the first place.
### Response:
def jieba_tokenize(text, external_wordlist=False):
"""
Tokenize the given text into tokens whose word frequencies can probably
be looked up. This uses Jieba, a word-frequency-based tokenizer.
If `external_wordlist` is False, we tell Jieba to default to using
wordfreq's own Chinese wordlist, and not to infer unknown words using a
hidden Markov model. This ensures that the multi-character tokens that it
outputs will be ones whose word frequencies we can look up.
If `external_wordlist` is True, this will use the largest version of
Jieba's original dictionary, with HMM enabled, so its results will be
independent of the data in wordfreq. These results will be better optimized
for purposes that aren't looking up word frequencies, such as general-
purpose tokenization, or collecting word frequencies in the first place.
"""
global jieba_tokenizer, jieba_orig_tokenizer
if external_wordlist:
if jieba_orig_tokenizer is None:
jieba_orig_tokenizer = jieba.Tokenizer(dictionary=ORIG_DICT_FILENAME)
return jieba_orig_tokenizer.lcut(text)
else:
if jieba_tokenizer is None:
jieba_tokenizer = jieba.Tokenizer(dictionary=DICT_FILENAME)
# Tokenize the Simplified Chinese version of the text, but return
# those spans from the original text, even if it's in Traditional
# Chinese
tokens = []
for _token, start, end in jieba_tokenizer.tokenize(simplify_chinese(text), HMM=False):
tokens.append(text[start:end])
return tokens |
def get_size(self, value=None):
"""Return struct size.
Returns:
int: Returns the struct size based on inner attributes.
"""
if isinstance(value, type(self)):
return value.get_size()
return 2 + self.length | Return struct size.
Returns:
int: Returns the struct size based on inner attributes. | Below is the the instruction that describes the task:
### Input:
Return struct size.
Returns:
int: Returns the struct size based on inner attributes.
### Response:
def get_size(self, value=None):
"""Return struct size.
Returns:
int: Returns the struct size based on inner attributes.
"""
if isinstance(value, type(self)):
return value.get_size()
return 2 + self.length |
def new(self):
# type: () -> None
'''
A method to create a new UDF Logical Volume Implementation Use.
Parameters:
None.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Implementation Use already initialized')
self.impl_id = UDFEntityID()
self.impl_id.new(0, b'*pycdlib')
self.num_files = 0
self.num_dirs = 1
self.min_udf_read_revision = 258
self.min_udf_write_revision = 258
self.max_udf_write_revision = 258
self.impl_use = b'\x00' * 378 # FIXME: let the user set this
self._initialized = True | A method to create a new UDF Logical Volume Implementation Use.
Parameters:
None.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
A method to create a new UDF Logical Volume Implementation Use.
Parameters:
None.
Returns:
Nothing.
### Response:
def new(self):
# type: () -> None
'''
A method to create a new UDF Logical Volume Implementation Use.
Parameters:
None.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Logical Volume Implementation Use already initialized')
self.impl_id = UDFEntityID()
self.impl_id.new(0, b'*pycdlib')
self.num_files = 0
self.num_dirs = 1
self.min_udf_read_revision = 258
self.min_udf_write_revision = 258
self.max_udf_write_revision = 258
self.impl_use = b'\x00' * 378 # FIXME: let the user set this
self._initialized = True |
def set_interface_altsetting(self, interface = None, alternate_setting = None):
r"""Set the alternate setting for an interface.
When you want to use an interface and it has more than one alternate
setting, you should call this method to select the appropriate
alternate setting. If you call the method without one or the two
parameters, it will be selected the first one found in the Device in
the same way of the set_configuration method.
Commonly, an interface has only one alternate setting and this call is
not necessary. For most devices, either it has more than one
alternate setting or not, it is not harmful to make a call to this
method with no arguments, as devices will silently ignore the request
when there is only one alternate setting, though the USB Spec allows
devices with no additional alternate setting return an error to the
Host in response to a SET_INTERFACE request.
If you are in doubt, you may want to call it with no arguments wrapped
by a try/except clause:
>>> try:
>>> dev.set_interface_altsetting()
>>> except usb.core.USBError:
>>> pass
"""
self._ctx.managed_set_interface(self, interface, alternate_setting) | r"""Set the alternate setting for an interface.
When you want to use an interface and it has more than one alternate
setting, you should call this method to select the appropriate
alternate setting. If you call the method without one or the two
parameters, it will be selected the first one found in the Device in
the same way of the set_configuration method.
Commonly, an interface has only one alternate setting and this call is
not necessary. For most devices, either it has more than one
alternate setting or not, it is not harmful to make a call to this
method with no arguments, as devices will silently ignore the request
when there is only one alternate setting, though the USB Spec allows
devices with no additional alternate setting return an error to the
Host in response to a SET_INTERFACE request.
If you are in doubt, you may want to call it with no arguments wrapped
by a try/except clause:
>>> try:
>>> dev.set_interface_altsetting()
>>> except usb.core.USBError:
>>> pass | Below is the the instruction that describes the task:
### Input:
r"""Set the alternate setting for an interface.
When you want to use an interface and it has more than one alternate
setting, you should call this method to select the appropriate
alternate setting. If you call the method without one or the two
parameters, it will be selected the first one found in the Device in
the same way of the set_configuration method.
Commonly, an interface has only one alternate setting and this call is
not necessary. For most devices, either it has more than one
alternate setting or not, it is not harmful to make a call to this
method with no arguments, as devices will silently ignore the request
when there is only one alternate setting, though the USB Spec allows
devices with no additional alternate setting return an error to the
Host in response to a SET_INTERFACE request.
If you are in doubt, you may want to call it with no arguments wrapped
by a try/except clause:
>>> try:
>>> dev.set_interface_altsetting()
>>> except usb.core.USBError:
>>> pass
### Response:
def set_interface_altsetting(self, interface = None, alternate_setting = None):
r"""Set the alternate setting for an interface.
When you want to use an interface and it has more than one alternate
setting, you should call this method to select the appropriate
alternate setting. If you call the method without one or the two
parameters, it will be selected the first one found in the Device in
the same way of the set_configuration method.
Commonly, an interface has only one alternate setting and this call is
not necessary. For most devices, either it has more than one
alternate setting or not, it is not harmful to make a call to this
method with no arguments, as devices will silently ignore the request
when there is only one alternate setting, though the USB Spec allows
devices with no additional alternate setting return an error to the
Host in response to a SET_INTERFACE request.
If you are in doubt, you may want to call it with no arguments wrapped
by a try/except clause:
>>> try:
>>> dev.set_interface_altsetting()
>>> except usb.core.USBError:
>>> pass
"""
self._ctx.managed_set_interface(self, interface, alternate_setting) |
async def forget_ticket(self, request):
"""Called to forget the ticket data a request
Args:
request: aiohttp Request object.
"""
session = await get_session(request)
session.pop(self.cookie_name, '') | Called to forget the ticket data a request
Args:
request: aiohttp Request object. | Below is the the instruction that describes the task:
### Input:
Called to forget the ticket data a request
Args:
request: aiohttp Request object.
### Response:
async def forget_ticket(self, request):
"""Called to forget the ticket data a request
Args:
request: aiohttp Request object.
"""
session = await get_session(request)
session.pop(self.cookie_name, '') |
def check_attr(self, repo_abspath, attrs):
"""
Generator that returns attributes for given paths relative to repo_abspath.
>>> g = GitArchiver.check_attr('repo_path', ['export-ignore'])
>>> next(g)
>>> attrs = g.send('relative_path')
>>> print(attrs['export-ignore'])
@param repo_abspath: Absolute path to a git repository.
@type repo_abspath: str
@param attrs: Attributes to check.
@type attrs: [str]
@rtype: generator
"""
def make_process():
env = dict(environ, GIT_FLUSH='1')
cmd = 'git check-attr --stdin -z {0}'.format(' '.join(attrs))
return Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, cwd=repo_abspath, env=env)
def read_attrs(process, repo_file_path):
process.stdin.write(repo_file_path.encode('utf-8') + b'\0')
process.stdin.flush()
# For every attribute check-attr will output: <path> NUL <attribute> NUL <info> NUL
path, attr, info = b'', b'', b''
nuls_count = 0
nuls_expected = 3 * len(attrs)
while nuls_count != nuls_expected:
b = process.stdout.read(1)
if b == b'' and process.poll() is not None:
raise RuntimeError("check-attr exited prematurely")
elif b == b'\0':
nuls_count += 1
if nuls_count % 3 == 0:
yield map(self.decode_git_output, (path, attr, info))
path, attr, info = b'', b'', b''
elif nuls_count % 3 == 0:
path += b
elif nuls_count % 3 == 1:
attr += b
elif nuls_count % 3 == 2:
info += b
def read_attrs_old(process, repo_file_path):
"""
Compatibility with versions 1.8.5 and below that do not recognize -z for output.
"""
process.stdin.write(repo_file_path.encode('utf-8') + b'\0')
process.stdin.flush()
# For every attribute check-attr will output: <path>: <attribute>: <info>\n
# where <path> is c-quoted
path, attr, info = b'', b'', b''
lines_count = 0
lines_expected = len(attrs)
while lines_count != lines_expected:
line = process.stdout.readline()
info_start = line.rfind(b': ')
if info_start == -1:
raise RuntimeError("unexpected output of check-attr: {0}".format(line))
attr_start = line.rfind(b': ', 0, info_start)
if attr_start == -1:
raise RuntimeError("unexpected output of check-attr: {0}".format(line))
info = line[info_start + 2:len(line) - 1] # trim leading ": " and trailing \n
attr = line[attr_start + 2:info_start] # trim leading ": "
path = line[:attr_start]
yield map(self.decode_git_output, (path, attr, info))
lines_count += 1
if not attrs:
return
process = make_process()
try:
while True:
repo_file_path = yield
repo_file_attrs = {}
if self.git_version is None or self.git_version > (1, 8, 5):
reader = read_attrs
else:
reader = read_attrs_old
for path, attr, value in reader(process, repo_file_path):
repo_file_attrs[attr] = value
yield repo_file_attrs
finally:
process.stdin.close()
process.wait() | Generator that returns attributes for given paths relative to repo_abspath.
>>> g = GitArchiver.check_attr('repo_path', ['export-ignore'])
>>> next(g)
>>> attrs = g.send('relative_path')
>>> print(attrs['export-ignore'])
@param repo_abspath: Absolute path to a git repository.
@type repo_abspath: str
@param attrs: Attributes to check.
@type attrs: [str]
@rtype: generator | Below is the the instruction that describes the task:
### Input:
Generator that returns attributes for given paths relative to repo_abspath.
>>> g = GitArchiver.check_attr('repo_path', ['export-ignore'])
>>> next(g)
>>> attrs = g.send('relative_path')
>>> print(attrs['export-ignore'])
@param repo_abspath: Absolute path to a git repository.
@type repo_abspath: str
@param attrs: Attributes to check.
@type attrs: [str]
@rtype: generator
### Response:
def check_attr(self, repo_abspath, attrs):
"""
Generator that returns attributes for given paths relative to repo_abspath.
>>> g = GitArchiver.check_attr('repo_path', ['export-ignore'])
>>> next(g)
>>> attrs = g.send('relative_path')
>>> print(attrs['export-ignore'])
@param repo_abspath: Absolute path to a git repository.
@type repo_abspath: str
@param attrs: Attributes to check.
@type attrs: [str]
@rtype: generator
"""
def make_process():
env = dict(environ, GIT_FLUSH='1')
cmd = 'git check-attr --stdin -z {0}'.format(' '.join(attrs))
return Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, cwd=repo_abspath, env=env)
def read_attrs(process, repo_file_path):
process.stdin.write(repo_file_path.encode('utf-8') + b'\0')
process.stdin.flush()
# For every attribute check-attr will output: <path> NUL <attribute> NUL <info> NUL
path, attr, info = b'', b'', b''
nuls_count = 0
nuls_expected = 3 * len(attrs)
while nuls_count != nuls_expected:
b = process.stdout.read(1)
if b == b'' and process.poll() is not None:
raise RuntimeError("check-attr exited prematurely")
elif b == b'\0':
nuls_count += 1
if nuls_count % 3 == 0:
yield map(self.decode_git_output, (path, attr, info))
path, attr, info = b'', b'', b''
elif nuls_count % 3 == 0:
path += b
elif nuls_count % 3 == 1:
attr += b
elif nuls_count % 3 == 2:
info += b
def read_attrs_old(process, repo_file_path):
"""
Compatibility with versions 1.8.5 and below that do not recognize -z for output.
"""
process.stdin.write(repo_file_path.encode('utf-8') + b'\0')
process.stdin.flush()
# For every attribute check-attr will output: <path>: <attribute>: <info>\n
# where <path> is c-quoted
path, attr, info = b'', b'', b''
lines_count = 0
lines_expected = len(attrs)
while lines_count != lines_expected:
line = process.stdout.readline()
info_start = line.rfind(b': ')
if info_start == -1:
raise RuntimeError("unexpected output of check-attr: {0}".format(line))
attr_start = line.rfind(b': ', 0, info_start)
if attr_start == -1:
raise RuntimeError("unexpected output of check-attr: {0}".format(line))
info = line[info_start + 2:len(line) - 1] # trim leading ": " and trailing \n
attr = line[attr_start + 2:info_start] # trim leading ": "
path = line[:attr_start]
yield map(self.decode_git_output, (path, attr, info))
lines_count += 1
if not attrs:
return
process = make_process()
try:
while True:
repo_file_path = yield
repo_file_attrs = {}
if self.git_version is None or self.git_version > (1, 8, 5):
reader = read_attrs
else:
reader = read_attrs_old
for path, attr, value in reader(process, repo_file_path):
repo_file_attrs[attr] = value
yield repo_file_attrs
finally:
process.stdin.close()
process.wait() |
def scale(self, percent, scaleFromCenter=True):
"""scales an Image object
Parameters:
| percent - a percent of the original size
| numbers bigger than 100 scale up
| numbers less than 100 scale down
| 100 scales to the original size
Optional keyword parameters:
| scaleFromCenter - should the image scale from the center or from the upper left hand corner
| (default is True, scale from the center)
"""
self._transmogrophy(self.angle, percent, scaleFromCenter, self.flipH, self.flipV) | scales an Image object
Parameters:
| percent - a percent of the original size
| numbers bigger than 100 scale up
| numbers less than 100 scale down
| 100 scales to the original size
Optional keyword parameters:
| scaleFromCenter - should the image scale from the center or from the upper left hand corner
| (default is True, scale from the center) | Below is the the instruction that describes the task:
### Input:
scales an Image object
Parameters:
| percent - a percent of the original size
| numbers bigger than 100 scale up
| numbers less than 100 scale down
| 100 scales to the original size
Optional keyword parameters:
| scaleFromCenter - should the image scale from the center or from the upper left hand corner
| (default is True, scale from the center)
### Response:
def scale(self, percent, scaleFromCenter=True):
"""scales an Image object
Parameters:
| percent - a percent of the original size
| numbers bigger than 100 scale up
| numbers less than 100 scale down
| 100 scales to the original size
Optional keyword parameters:
| scaleFromCenter - should the image scale from the center or from the upper left hand corner
| (default is True, scale from the center)
"""
self._transmogrophy(self.angle, percent, scaleFromCenter, self.flipH, self.flipV) |
def create_child_folder(self, name, description=None):
""" Creates a Child Folder
:param str name: the name of the new child folder
:param str description: the description of the new child folder
:return: newly created folder
:rtype: drive.Folder
"""
if not self.object_id:
return None
url = self.build_url(
self._endpoints.get('list_items').format(id=self.object_id))
data = {'name': name, 'folder': {}}
if description:
data['description'] = description
response = self.con.post(url, data=data)
if not response:
return None
folder = response.json()
return self._classifier(folder)(parent=self,
**{self._cloud_data_key: folder}) | Creates a Child Folder
:param str name: the name of the new child folder
:param str description: the description of the new child folder
:return: newly created folder
:rtype: drive.Folder | Below is the the instruction that describes the task:
### Input:
Creates a Child Folder
:param str name: the name of the new child folder
:param str description: the description of the new child folder
:return: newly created folder
:rtype: drive.Folder
### Response:
def create_child_folder(self, name, description=None):
""" Creates a Child Folder
:param str name: the name of the new child folder
:param str description: the description of the new child folder
:return: newly created folder
:rtype: drive.Folder
"""
if not self.object_id:
return None
url = self.build_url(
self._endpoints.get('list_items').format(id=self.object_id))
data = {'name': name, 'folder': {}}
if description:
data['description'] = description
response = self.con.post(url, data=data)
if not response:
return None
folder = response.json()
return self._classifier(folder)(parent=self,
**{self._cloud_data_key: folder}) |
def error(self, msg):
"""Override/enhance default error method to display tracebacks."""
print("***", msg, file=self.stdout)
if not self.config.show_traceback_on_error:
return
etype, evalue, tb = sys.exc_info()
if tb and tb.tb_frame.f_code.co_name == "default":
tb = tb.tb_next
if tb and tb.tb_frame.f_code.co_filename == "<stdin>":
tb = tb.tb_next
if tb: # only display with actual traceback.
self._remove_bdb_context(evalue)
tb_limit = self.config.show_traceback_on_error_limit
fmt_exc = traceback.format_exception(
etype, evalue, tb, limit=tb_limit
)
# Remove last line (exception string again).
if len(fmt_exc) > 1 and fmt_exc[-1][0] != " ":
fmt_exc.pop()
print("".join(fmt_exc).rstrip(), file=self.stdout) | Override/enhance default error method to display tracebacks. | Below is the the instruction that describes the task:
### Input:
Override/enhance default error method to display tracebacks.
### Response:
def error(self, msg):
"""Override/enhance default error method to display tracebacks."""
print("***", msg, file=self.stdout)
if not self.config.show_traceback_on_error:
return
etype, evalue, tb = sys.exc_info()
if tb and tb.tb_frame.f_code.co_name == "default":
tb = tb.tb_next
if tb and tb.tb_frame.f_code.co_filename == "<stdin>":
tb = tb.tb_next
if tb: # only display with actual traceback.
self._remove_bdb_context(evalue)
tb_limit = self.config.show_traceback_on_error_limit
fmt_exc = traceback.format_exception(
etype, evalue, tb, limit=tb_limit
)
# Remove last line (exception string again).
if len(fmt_exc) > 1 and fmt_exc[-1][0] != " ":
fmt_exc.pop()
print("".join(fmt_exc).rstrip(), file=self.stdout) |
def var_quadratic_sum(A, C, H, beta, x0):
r"""
Computes the expected discounted quadratic sum
.. math::
q(x_0) = \mathbb{E} \Big[ \sum_{t=0}^{\infty} \beta^t x_t' H x_t \Big]
Here :math:`{x_t}` is the VAR process :math:`x_{t+1} = A x_t + C w_t`
with :math:`{x_t}` standard normal and :math:`x_0` the initial condition.
Parameters
----------
A : array_like(float, ndim=2)
The matrix described above in description. Should be n x n
C : array_like(float, ndim=2)
The matrix described above in description. Should be n x n
H : array_like(float, ndim=2)
The matrix described above in description. Should be n x n
beta: scalar(float)
Should take a value in (0, 1)
x_0: array_like(float, ndim=1)
The initial condtion. A conformable array (of length n, or with
n rows)
Returns
-------
q0: scalar(float)
Represents the value :math:`q(x_0)`
Remarks: The formula for computing :math:`q(x_0)` is
:math:`q(x_0) = x_0' Q x_0 + v`
where
* :math:`Q` is the solution to :math:`Q = H + \beta A' Q A`, and
* :math:`v = \frac{trace(C' Q C) \beta}{(1 - \beta)}`
"""
# == Make sure that A, C, H and x0 are array_like == #
A, C, H = list(map(np.atleast_2d, (A, C, H)))
x0 = np.atleast_1d(x0)
# == Start computations == #
Q = scipy.linalg.solve_discrete_lyapunov(sqrt(beta) * A.T, H)
cq = dot(dot(C.T, Q), C)
v = np.trace(cq) * beta / (1 - beta)
q0 = dot(dot(x0.T, Q), x0) + v
return q0 | r"""
Computes the expected discounted quadratic sum
.. math::
q(x_0) = \mathbb{E} \Big[ \sum_{t=0}^{\infty} \beta^t x_t' H x_t \Big]
Here :math:`{x_t}` is the VAR process :math:`x_{t+1} = A x_t + C w_t`
with :math:`{x_t}` standard normal and :math:`x_0` the initial condition.
Parameters
----------
A : array_like(float, ndim=2)
The matrix described above in description. Should be n x n
C : array_like(float, ndim=2)
The matrix described above in description. Should be n x n
H : array_like(float, ndim=2)
The matrix described above in description. Should be n x n
beta: scalar(float)
Should take a value in (0, 1)
x_0: array_like(float, ndim=1)
The initial condtion. A conformable array (of length n, or with
n rows)
Returns
-------
q0: scalar(float)
Represents the value :math:`q(x_0)`
Remarks: The formula for computing :math:`q(x_0)` is
:math:`q(x_0) = x_0' Q x_0 + v`
where
* :math:`Q` is the solution to :math:`Q = H + \beta A' Q A`, and
* :math:`v = \frac{trace(C' Q C) \beta}{(1 - \beta)}` | Below is the the instruction that describes the task:
### Input:
r"""
Computes the expected discounted quadratic sum
.. math::
q(x_0) = \mathbb{E} \Big[ \sum_{t=0}^{\infty} \beta^t x_t' H x_t \Big]
Here :math:`{x_t}` is the VAR process :math:`x_{t+1} = A x_t + C w_t`
with :math:`{x_t}` standard normal and :math:`x_0` the initial condition.
Parameters
----------
A : array_like(float, ndim=2)
The matrix described above in description. Should be n x n
C : array_like(float, ndim=2)
The matrix described above in description. Should be n x n
H : array_like(float, ndim=2)
The matrix described above in description. Should be n x n
beta: scalar(float)
Should take a value in (0, 1)
x_0: array_like(float, ndim=1)
The initial condtion. A conformable array (of length n, or with
n rows)
Returns
-------
q0: scalar(float)
Represents the value :math:`q(x_0)`
Remarks: The formula for computing :math:`q(x_0)` is
:math:`q(x_0) = x_0' Q x_0 + v`
where
* :math:`Q` is the solution to :math:`Q = H + \beta A' Q A`, and
* :math:`v = \frac{trace(C' Q C) \beta}{(1 - \beta)}`
### Response:
def var_quadratic_sum(A, C, H, beta, x0):
r"""
Computes the expected discounted quadratic sum
.. math::
q(x_0) = \mathbb{E} \Big[ \sum_{t=0}^{\infty} \beta^t x_t' H x_t \Big]
Here :math:`{x_t}` is the VAR process :math:`x_{t+1} = A x_t + C w_t`
with :math:`{x_t}` standard normal and :math:`x_0` the initial condition.
Parameters
----------
A : array_like(float, ndim=2)
The matrix described above in description. Should be n x n
C : array_like(float, ndim=2)
The matrix described above in description. Should be n x n
H : array_like(float, ndim=2)
The matrix described above in description. Should be n x n
beta: scalar(float)
Should take a value in (0, 1)
x_0: array_like(float, ndim=1)
The initial condtion. A conformable array (of length n, or with
n rows)
Returns
-------
q0: scalar(float)
Represents the value :math:`q(x_0)`
Remarks: The formula for computing :math:`q(x_0)` is
:math:`q(x_0) = x_0' Q x_0 + v`
where
* :math:`Q` is the solution to :math:`Q = H + \beta A' Q A`, and
* :math:`v = \frac{trace(C' Q C) \beta}{(1 - \beta)}`
"""
# == Make sure that A, C, H and x0 are array_like == #
A, C, H = list(map(np.atleast_2d, (A, C, H)))
x0 = np.atleast_1d(x0)
# == Start computations == #
Q = scipy.linalg.solve_discrete_lyapunov(sqrt(beta) * A.T, H)
cq = dot(dot(C.T, Q), C)
v = np.trace(cq) * beta / (1 - beta)
q0 = dot(dot(x0.T, Q), x0) + v
return q0 |
def _parse_doms(self):
"""Extract dom information from detector file"""
self.print("Reading PMT information...")
self._det_file.seek(0, 0)
self._readline()
pmts = defaultdict(list)
pmt_index = 0
while True:
line = self._readline()
if line == '':
self.print("Done.")
break
try:
dom_id, du, floor, n_pmts = split(line, int)
except ValueError:
continue
if du != self._current_du:
log.debug("Next DU, resetting floor to 1.")
self._current_du = du
self.dus.append(du)
self._current_floor = 1
if du == 1 and floor == -1:
log.warning(
"Floor ID is -1 (Jpp conversion bug), "
"using our own floor ID!"
)
else:
self._current_floor += 1
if floor == -1:
log.debug("Setting floor ID to our own ID")
floor = self._current_floor
self.doms[dom_id] = (du, floor, n_pmts)
if self.n_pmts_per_dom is None:
self.n_pmts_per_dom = n_pmts
if self.n_pmts_per_dom != n_pmts:
log.warning(
"DOMs with different number of PMTs are "
"detected, this can cause some unexpected "
"behaviour."
)
for i in range(n_pmts):
raw_pmt_info = self._readline()
pmt_info = raw_pmt_info.split()
pmt_id, x, y, z, rest = unpack_nfirst(pmt_info, 4)
dx, dy, dz, t0, rest = unpack_nfirst(rest, 4)
pmt_id = int(pmt_id)
omkey = (du, floor, i)
pmts['pmt_id'].append(int(pmt_id))
pmts['pos_x'].append(float(x))
pmts['pos_y'].append(float(y))
pmts['pos_z'].append(float(z))
pmts['dir_x'].append(float(dx))
pmts['dir_y'].append(float(dy))
pmts['dir_z'].append(float(dz))
pmts['t0'].append(float(t0))
pmts['du'].append(int(du))
pmts['floor'].append(int(floor))
pmts['channel_id'].append(int(i))
pmts['dom_id'].append(int(dom_id))
if self.version == 'v3' and rest:
status, rest = unpack_nfirst(rest, 1)
pmts['status'].append(int(status))
if rest:
log.warning("Unexpected PMT values: {0}".format(rest))
self._pmt_index_by_omkey[omkey] = pmt_index
self._pmt_index_by_pmt_id[pmt_id] = pmt_index
pmt_index += 1
self.pmts = Table(pmts, name='PMT') | Extract dom information from detector file | Below is the the instruction that describes the task:
### Input:
Extract dom information from detector file
### Response:
def _parse_doms(self):
"""Extract dom information from detector file"""
self.print("Reading PMT information...")
self._det_file.seek(0, 0)
self._readline()
pmts = defaultdict(list)
pmt_index = 0
while True:
line = self._readline()
if line == '':
self.print("Done.")
break
try:
dom_id, du, floor, n_pmts = split(line, int)
except ValueError:
continue
if du != self._current_du:
log.debug("Next DU, resetting floor to 1.")
self._current_du = du
self.dus.append(du)
self._current_floor = 1
if du == 1 and floor == -1:
log.warning(
"Floor ID is -1 (Jpp conversion bug), "
"using our own floor ID!"
)
else:
self._current_floor += 1
if floor == -1:
log.debug("Setting floor ID to our own ID")
floor = self._current_floor
self.doms[dom_id] = (du, floor, n_pmts)
if self.n_pmts_per_dom is None:
self.n_pmts_per_dom = n_pmts
if self.n_pmts_per_dom != n_pmts:
log.warning(
"DOMs with different number of PMTs are "
"detected, this can cause some unexpected "
"behaviour."
)
for i in range(n_pmts):
raw_pmt_info = self._readline()
pmt_info = raw_pmt_info.split()
pmt_id, x, y, z, rest = unpack_nfirst(pmt_info, 4)
dx, dy, dz, t0, rest = unpack_nfirst(rest, 4)
pmt_id = int(pmt_id)
omkey = (du, floor, i)
pmts['pmt_id'].append(int(pmt_id))
pmts['pos_x'].append(float(x))
pmts['pos_y'].append(float(y))
pmts['pos_z'].append(float(z))
pmts['dir_x'].append(float(dx))
pmts['dir_y'].append(float(dy))
pmts['dir_z'].append(float(dz))
pmts['t0'].append(float(t0))
pmts['du'].append(int(du))
pmts['floor'].append(int(floor))
pmts['channel_id'].append(int(i))
pmts['dom_id'].append(int(dom_id))
if self.version == 'v3' and rest:
status, rest = unpack_nfirst(rest, 1)
pmts['status'].append(int(status))
if rest:
log.warning("Unexpected PMT values: {0}".format(rest))
self._pmt_index_by_omkey[omkey] = pmt_index
self._pmt_index_by_pmt_id[pmt_id] = pmt_index
pmt_index += 1
self.pmts = Table(pmts, name='PMT') |
def update_ptr_record(self, device, record, domain_name, data=None,
ttl=None, comment=None):
"""
Updates a PTR record with the supplied values.
"""
return self._manager.update_ptr_record(device, record, domain_name,
data=data, ttl=ttl, comment=comment) | Updates a PTR record with the supplied values. | Below is the the instruction that describes the task:
### Input:
Updates a PTR record with the supplied values.
### Response:
def update_ptr_record(self, device, record, domain_name, data=None,
ttl=None, comment=None):
"""
Updates a PTR record with the supplied values.
"""
return self._manager.update_ptr_record(device, record, domain_name,
data=data, ttl=ttl, comment=comment) |
def load_fetched_objects_into_contexts(events, model_data, context_hints_per_source):
"""
Given the fetched model data and the context hints for each source, go through each
event and populate the contexts with the loaded information.
"""
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
model = get_model(hints['app_name'], hints['model_name'])
for d, value in dict_find(event.context, context_key):
if isinstance(value, list):
for i, model_id in enumerate(d[context_key]):
d[context_key][i] = model_data[model].get(model_id)
else:
d[context_key] = model_data[model].get(value) | Given the fetched model data and the context hints for each source, go through each
event and populate the contexts with the loaded information. | Below is the the instruction that describes the task:
### Input:
Given the fetched model data and the context hints for each source, go through each
event and populate the contexts with the loaded information.
### Response:
def load_fetched_objects_into_contexts(events, model_data, context_hints_per_source):
"""
Given the fetched model data and the context hints for each source, go through each
event and populate the contexts with the loaded information.
"""
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
model = get_model(hints['app_name'], hints['model_name'])
for d, value in dict_find(event.context, context_key):
if isinstance(value, list):
for i, model_id in enumerate(d[context_key]):
d[context_key][i] = model_data[model].get(model_id)
else:
d[context_key] = model_data[model].get(value) |
def _get_num_interval(config, num_pre, num_post):
'''
Returns numerical interval based on optionals num_pre, num_post values
'''
post = int(num_post) if num_post else 0
pre = int(num_pre) if num_pre is not None else _get_last_snapshot(config)['id']
return pre, post | Returns numerical interval based on optionals num_pre, num_post values | Below is the the instruction that describes the task:
### Input:
Returns numerical interval based on optionals num_pre, num_post values
### Response:
def _get_num_interval(config, num_pre, num_post):
'''
Returns numerical interval based on optionals num_pre, num_post values
'''
post = int(num_post) if num_post else 0
pre = int(num_pre) if num_pre is not None else _get_last_snapshot(config)['id']
return pre, post |
def get_select_sql(self, columns, order=None, limit=0, skip=0):
"""
Build a SELECT query based on the current state of the builder.
:param columns:
SQL fragment describing which columns to select i.e. 'e.obstoryID, s.statusID'
:param order:
Optional ordering constraint, i.e. 'e.eventTime DESC'
:param limit:
Optional, used to build the 'LIMIT n' clause. If not specified no limit is imposed.
:param skip:
Optional, used to build the 'OFFSET n' clause. If not specified results are returned from the first item
available. Note that this parameter must be combined with 'order', otherwise there's no ordering imposed
on the results and subsequent queries may return overlapping data randomly. It's unlikely that this will
actually happen as almost all databases do in fact create an internal ordering, but there's no guarantee
of this (and some operations such as indexing will definitely break this property unless explicitly set).
:returns:
A SQL SELECT query, which will make use of self.sql_args when executed.
"""
sql = 'SELECT '
sql += '{0} FROM {1} '.format(columns, self.tables)
if len(self.where_clauses) > 0:
sql += ' WHERE '
sql += ' AND '.join(self.where_clauses)
if order is not None:
sql += ' ORDER BY {0}'.format(order)
if limit > 0:
sql += ' LIMIT {0} '.format(limit)
if skip > 0:
sql += ' OFFSET {0} '.format(skip)
return sql | Build a SELECT query based on the current state of the builder.
:param columns:
SQL fragment describing which columns to select i.e. 'e.obstoryID, s.statusID'
:param order:
Optional ordering constraint, i.e. 'e.eventTime DESC'
:param limit:
Optional, used to build the 'LIMIT n' clause. If not specified no limit is imposed.
:param skip:
Optional, used to build the 'OFFSET n' clause. If not specified results are returned from the first item
available. Note that this parameter must be combined with 'order', otherwise there's no ordering imposed
on the results and subsequent queries may return overlapping data randomly. It's unlikely that this will
actually happen as almost all databases do in fact create an internal ordering, but there's no guarantee
of this (and some operations such as indexing will definitely break this property unless explicitly set).
:returns:
A SQL SELECT query, which will make use of self.sql_args when executed. | Below is the the instruction that describes the task:
### Input:
Build a SELECT query based on the current state of the builder.
:param columns:
SQL fragment describing which columns to select i.e. 'e.obstoryID, s.statusID'
:param order:
Optional ordering constraint, i.e. 'e.eventTime DESC'
:param limit:
Optional, used to build the 'LIMIT n' clause. If not specified no limit is imposed.
:param skip:
Optional, used to build the 'OFFSET n' clause. If not specified results are returned from the first item
available. Note that this parameter must be combined with 'order', otherwise there's no ordering imposed
on the results and subsequent queries may return overlapping data randomly. It's unlikely that this will
actually happen as almost all databases do in fact create an internal ordering, but there's no guarantee
of this (and some operations such as indexing will definitely break this property unless explicitly set).
:returns:
A SQL SELECT query, which will make use of self.sql_args when executed.
### Response:
def get_select_sql(self, columns, order=None, limit=0, skip=0):
"""
Build a SELECT query based on the current state of the builder.
:param columns:
SQL fragment describing which columns to select i.e. 'e.obstoryID, s.statusID'
:param order:
Optional ordering constraint, i.e. 'e.eventTime DESC'
:param limit:
Optional, used to build the 'LIMIT n' clause. If not specified no limit is imposed.
:param skip:
Optional, used to build the 'OFFSET n' clause. If not specified results are returned from the first item
available. Note that this parameter must be combined with 'order', otherwise there's no ordering imposed
on the results and subsequent queries may return overlapping data randomly. It's unlikely that this will
actually happen as almost all databases do in fact create an internal ordering, but there's no guarantee
of this (and some operations such as indexing will definitely break this property unless explicitly set).
:returns:
A SQL SELECT query, which will make use of self.sql_args when executed.
"""
sql = 'SELECT '
sql += '{0} FROM {1} '.format(columns, self.tables)
if len(self.where_clauses) > 0:
sql += ' WHERE '
sql += ' AND '.join(self.where_clauses)
if order is not None:
sql += ' ORDER BY {0}'.format(order)
if limit > 0:
sql += ' LIMIT {0} '.format(limit)
if skip > 0:
sql += ' OFFSET {0} '.format(skip)
return sql |
def start_head_processes(self):
"""Start head processes on the node."""
logger.info(
"Process STDOUT and STDERR is being redirected to {}.".format(
self._logs_dir))
assert self._redis_address is None
# If this is the head node, start the relevant head node processes.
self.start_redis()
self.start_monitor()
self.start_raylet_monitor()
# The dashboard is Python3.x only.
if PY3 and self._ray_params.include_webui:
self.start_dashboard() | Start head processes on the node. | Below is the the instruction that describes the task:
### Input:
Start head processes on the node.
### Response:
def start_head_processes(self):
"""Start head processes on the node."""
logger.info(
"Process STDOUT and STDERR is being redirected to {}.".format(
self._logs_dir))
assert self._redis_address is None
# If this is the head node, start the relevant head node processes.
self.start_redis()
self.start_monitor()
self.start_raylet_monitor()
# The dashboard is Python3.x only.
if PY3 and self._ray_params.include_webui:
self.start_dashboard() |
def main(args=None):
"""
Main function.
This function is the command line entry point.
Args:
args (list of str): the arguments passed to the program.
Returns:
int: return code being 0 (OK), 1 (dsm empty) or 2 (error).
"""
parser = get_parser()
args = parser.parse_args(args=args)
if not (args.matrix or args.dependencies or args.treemap or args.graph):
args.matrix = True
# split comma-separated args
packages = []
for arg in args.packages:
if ',' in arg:
for package in arg.split(','):
if package not in packages:
packages.append(package)
elif arg not in packages:
packages.append(arg)
# guess convenient depth
depth = args.depth
if depth is None:
depth = guess_depth(packages)
# open file if not stdout
output = args.output
if isinstance(output, str):
output = open(output, 'w')
dsm = DSM(*packages, build_tree=True, build_dependencies=True,
enforce_init=not args.greedy)
if dsm.empty:
return 1
indent = args.indent
if indent is None:
if args.format == CSV:
indent = 0
else:
indent = 2
elif indent < 0 and args.format == JSON:
# special case for json.dumps indent argument
indent = None
try:
if args.dependencies:
dsm.print(format=args.format, output=output, indent=indent)
elif args.matrix:
dsm.print_matrix(format=args.format, output=output,
depth=depth, indent=indent)
elif args.treemap:
dsm.print_treemap(format=args.format, output=output)
elif args.graph:
dsm.print_graph(format=args.format, output=output,
depth=depth, indent=indent)
except BrokenPipeError:
# avoid traceback
return 2
return 0 | Main function.
This function is the command line entry point.
Args:
args (list of str): the arguments passed to the program.
Returns:
int: return code being 0 (OK), 1 (dsm empty) or 2 (error). | Below is the the instruction that describes the task:
### Input:
Main function.
This function is the command line entry point.
Args:
args (list of str): the arguments passed to the program.
Returns:
int: return code being 0 (OK), 1 (dsm empty) or 2 (error).
### Response:
def main(args=None):
"""
Main function.
This function is the command line entry point.
Args:
args (list of str): the arguments passed to the program.
Returns:
int: return code being 0 (OK), 1 (dsm empty) or 2 (error).
"""
parser = get_parser()
args = parser.parse_args(args=args)
if not (args.matrix or args.dependencies or args.treemap or args.graph):
args.matrix = True
# split comma-separated args
packages = []
for arg in args.packages:
if ',' in arg:
for package in arg.split(','):
if package not in packages:
packages.append(package)
elif arg not in packages:
packages.append(arg)
# guess convenient depth
depth = args.depth
if depth is None:
depth = guess_depth(packages)
# open file if not stdout
output = args.output
if isinstance(output, str):
output = open(output, 'w')
dsm = DSM(*packages, build_tree=True, build_dependencies=True,
enforce_init=not args.greedy)
if dsm.empty:
return 1
indent = args.indent
if indent is None:
if args.format == CSV:
indent = 0
else:
indent = 2
elif indent < 0 and args.format == JSON:
# special case for json.dumps indent argument
indent = None
try:
if args.dependencies:
dsm.print(format=args.format, output=output, indent=indent)
elif args.matrix:
dsm.print_matrix(format=args.format, output=output,
depth=depth, indent=indent)
elif args.treemap:
dsm.print_treemap(format=args.format, output=output)
elif args.graph:
dsm.print_graph(format=args.format, output=output,
depth=depth, indent=indent)
except BrokenPipeError:
# avoid traceback
return 2
return 0 |
def ssim_value(self, target):
"""Compute the SSIM value from the reference image to the target image.
Args:
target (str or PIL.Image): Input image to compare the reference image
to. This may be a PIL Image object or, to save time, an SSIMImage
object (e.g. the img member of another SSIM object).
Returns:
Computed SSIM float value.
"""
# Performance boost if handed a compatible SSIMImage object.
if not isinstance(target, SSIMImage) \
or not np.array_equal(self.gaussian_kernel_1d,
target.gaussian_kernel_1d):
target = SSIMImage(target, self.gaussian_kernel_1d, self.img.size)
img_mat_12 = self.img.img_gray * target.img_gray
img_mat_sigma_12 = convolve_gaussian_2d(
img_mat_12, self.gaussian_kernel_1d)
img_mat_mu_12 = self.img.img_gray_mu * target.img_gray_mu
img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12
# Numerator of SSIM
num_ssim = ((2 * img_mat_mu_12 + self.c_1) *
(2 * img_mat_sigma_12 + self.c_2))
# Denominator of SSIM
den_ssim = (
(self.img.img_gray_mu_squared + target.img_gray_mu_squared +
self.c_1) *
(self.img.img_gray_sigma_squared +
target.img_gray_sigma_squared + self.c_2))
ssim_map = num_ssim / den_ssim
index = np.average(ssim_map)
return index | Compute the SSIM value from the reference image to the target image.
Args:
target (str or PIL.Image): Input image to compare the reference image
to. This may be a PIL Image object or, to save time, an SSIMImage
object (e.g. the img member of another SSIM object).
Returns:
Computed SSIM float value. | Below is the the instruction that describes the task:
### Input:
Compute the SSIM value from the reference image to the target image.
Args:
target (str or PIL.Image): Input image to compare the reference image
to. This may be a PIL Image object or, to save time, an SSIMImage
object (e.g. the img member of another SSIM object).
Returns:
Computed SSIM float value.
### Response:
def ssim_value(self, target):
"""Compute the SSIM value from the reference image to the target image.
Args:
target (str or PIL.Image): Input image to compare the reference image
to. This may be a PIL Image object or, to save time, an SSIMImage
object (e.g. the img member of another SSIM object).
Returns:
Computed SSIM float value.
"""
# Performance boost if handed a compatible SSIMImage object.
if not isinstance(target, SSIMImage) \
or not np.array_equal(self.gaussian_kernel_1d,
target.gaussian_kernel_1d):
target = SSIMImage(target, self.gaussian_kernel_1d, self.img.size)
img_mat_12 = self.img.img_gray * target.img_gray
img_mat_sigma_12 = convolve_gaussian_2d(
img_mat_12, self.gaussian_kernel_1d)
img_mat_mu_12 = self.img.img_gray_mu * target.img_gray_mu
img_mat_sigma_12 = img_mat_sigma_12 - img_mat_mu_12
# Numerator of SSIM
num_ssim = ((2 * img_mat_mu_12 + self.c_1) *
(2 * img_mat_sigma_12 + self.c_2))
# Denominator of SSIM
den_ssim = (
(self.img.img_gray_mu_squared + target.img_gray_mu_squared +
self.c_1) *
(self.img.img_gray_sigma_squared +
target.img_gray_sigma_squared + self.c_2))
ssim_map = num_ssim / den_ssim
index = np.average(ssim_map)
return index |
def create_context(self, message_queue, task_id):
"""
Create data needed by upload_project_run(DukeDS connection info).
:param message_queue: Queue: queue background process can send messages to us on
:param task_id: int: id of this command's task so message will be routed correctly
"""
params = (self.settings.dest_directory, self.file_url.json_data, self.seek_amt, self.bytes_to_read)
return DownloadContext(self.settings, params, message_queue, task_id) | Create data needed by upload_project_run(DukeDS connection info).
:param message_queue: Queue: queue background process can send messages to us on
:param task_id: int: id of this command's task so message will be routed correctly | Below is the the instruction that describes the task:
### Input:
Create data needed by upload_project_run(DukeDS connection info).
:param message_queue: Queue: queue background process can send messages to us on
:param task_id: int: id of this command's task so message will be routed correctly
### Response:
def create_context(self, message_queue, task_id):
"""
Create data needed by upload_project_run(DukeDS connection info).
:param message_queue: Queue: queue background process can send messages to us on
:param task_id: int: id of this command's task so message will be routed correctly
"""
params = (self.settings.dest_directory, self.file_url.json_data, self.seek_amt, self.bytes_to_read)
return DownloadContext(self.settings, params, message_queue, task_id) |
def ndd_prefix_for_region(region_code, strip_non_digits):
"""Returns the national dialling prefix for a specific region.
For example, this would be 1 for the United States, and 0 for New
Zealand. Set strip_non_digits to True to strip symbols like "~" (which
indicates a wait for a dialling tone) from the prefix returned. If no
national prefix is present, we return None.
Warning: Do not use this method for do-your-own formatting - for some
regions, the national dialling prefix is used only for certain types of
numbers. Use the library's formatting functions to prefix the national
prefix when required.
Arguments:
region_code -- The region that we want to get the dialling prefix for.
strip_non_digits -- whether to strip non-digits from the national
dialling prefix.
Returns the dialling prefix for the region denoted by region_code.
"""
if region_code is None:
return None
metadata = PhoneMetadata.metadata_for_region(region_code.upper(), None)
if metadata is None:
return None
national_prefix = metadata.national_prefix
if national_prefix is None or len(national_prefix) == 0:
return None
if strip_non_digits:
# Note: if any other non-numeric symbols are ever used in national
# prefixes, these would have to be removed here as well.
national_prefix = re.sub(U_TILDE, U_EMPTY_STRING, national_prefix)
return national_prefix | Returns the national dialling prefix for a specific region.
For example, this would be 1 for the United States, and 0 for New
Zealand. Set strip_non_digits to True to strip symbols like "~" (which
indicates a wait for a dialling tone) from the prefix returned. If no
national prefix is present, we return None.
Warning: Do not use this method for do-your-own formatting - for some
regions, the national dialling prefix is used only for certain types of
numbers. Use the library's formatting functions to prefix the national
prefix when required.
Arguments:
region_code -- The region that we want to get the dialling prefix for.
strip_non_digits -- whether to strip non-digits from the national
dialling prefix.
Returns the dialling prefix for the region denoted by region_code. | Below is the the instruction that describes the task:
### Input:
Returns the national dialling prefix for a specific region.
For example, this would be 1 for the United States, and 0 for New
Zealand. Set strip_non_digits to True to strip symbols like "~" (which
indicates a wait for a dialling tone) from the prefix returned. If no
national prefix is present, we return None.
Warning: Do not use this method for do-your-own formatting - for some
regions, the national dialling prefix is used only for certain types of
numbers. Use the library's formatting functions to prefix the national
prefix when required.
Arguments:
region_code -- The region that we want to get the dialling prefix for.
strip_non_digits -- whether to strip non-digits from the national
dialling prefix.
Returns the dialling prefix for the region denoted by region_code.
### Response:
def ndd_prefix_for_region(region_code, strip_non_digits):
"""Returns the national dialling prefix for a specific region.
For example, this would be 1 for the United States, and 0 for New
Zealand. Set strip_non_digits to True to strip symbols like "~" (which
indicates a wait for a dialling tone) from the prefix returned. If no
national prefix is present, we return None.
Warning: Do not use this method for do-your-own formatting - for some
regions, the national dialling prefix is used only for certain types of
numbers. Use the library's formatting functions to prefix the national
prefix when required.
Arguments:
region_code -- The region that we want to get the dialling prefix for.
strip_non_digits -- whether to strip non-digits from the national
dialling prefix.
Returns the dialling prefix for the region denoted by region_code.
"""
if region_code is None:
return None
metadata = PhoneMetadata.metadata_for_region(region_code.upper(), None)
if metadata is None:
return None
national_prefix = metadata.national_prefix
if national_prefix is None or len(national_prefix) == 0:
return None
if strip_non_digits:
# Note: if any other non-numeric symbols are ever used in national
# prefixes, these would have to be removed here as well.
national_prefix = re.sub(U_TILDE, U_EMPTY_STRING, national_prefix)
return national_prefix |
def fcsp_sa_fcsp_auth_policy_switch(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcsp_sa = ET.SubElement(config, "fcsp-sa", xmlns="urn:brocade.com:mgmt:brocade-fc-auth")
fcsp = ET.SubElement(fcsp_sa, "fcsp")
auth = ET.SubElement(fcsp, "auth")
policy = ET.SubElement(auth, "policy")
switch = ET.SubElement(policy, "switch")
switch.text = kwargs.pop('switch')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def fcsp_sa_fcsp_auth_policy_switch(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcsp_sa = ET.SubElement(config, "fcsp-sa", xmlns="urn:brocade.com:mgmt:brocade-fc-auth")
fcsp = ET.SubElement(fcsp_sa, "fcsp")
auth = ET.SubElement(fcsp, "auth")
policy = ET.SubElement(auth, "policy")
switch = ET.SubElement(policy, "switch")
switch.text = kwargs.pop('switch')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def create_subnet(context, subnet):
"""Create a subnet.
Create a subnet which represents a range of IP addresses
that can be allocated to devices
: param context: neutron api request context
: param subnet: dictionary describing the subnet, with keys
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py. All keys will be populated.
"""
LOG.info("create_subnet for tenant %s" % context.tenant_id)
net_id = subnet["subnet"]["network_id"]
with context.session.begin():
net = db_api.network_find(context=context, limit=None, sorts=['id'],
marker=None, page_reverse=False, fields=None,
id=net_id, scope=db_api.ONE)
if not net:
raise n_exc.NetworkNotFound(net_id=net_id)
sub_attrs = subnet["subnet"]
always_pop = ["enable_dhcp", "ip_version", "first_ip", "last_ip",
"_cidr"]
admin_only = ["segment_id", "do_not_use", "created_at",
"next_auto_assign_ip"]
utils.filter_body(context, sub_attrs, admin_only, always_pop)
_validate_subnet_cidr(context, net_id, sub_attrs["cidr"])
cidr = netaddr.IPNetwork(sub_attrs["cidr"])
err_vals = {'cidr': sub_attrs["cidr"], 'network_id': net_id}
err = _("Requested subnet with cidr: %(cidr)s for "
"network: %(network_id)s. Prefix is too small, must be a "
"larger subnet. A prefix less than /%(prefix)s is required.")
if cidr.version == 6 and cidr.prefixlen > 64:
err_vals["prefix"] = 65
err_msg = err % err_vals
raise n_exc.InvalidInput(error_message=err_msg)
elif cidr.version == 4 and cidr.prefixlen > 30:
err_vals["prefix"] = 31
err_msg = err % err_vals
raise n_exc.InvalidInput(error_message=err_msg)
# Enforce subnet quotas
net_subnets = get_subnets(context,
filters=dict(network_id=net_id))
if not context.is_admin:
v4_count, v6_count = 0, 0
for subnet in net_subnets:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
v6_count += 1
else:
v4_count += 1
if cidr.version == 6:
tenant_quota_v6 = context.session.query(qdv.Quota).filter_by(
tenant_id=context.tenant_id,
resource='v6_subnets_per_network').first()
if tenant_quota_v6 != -1:
quota.QUOTAS.limit_check(
context, context.tenant_id,
v6_subnets_per_network=v6_count + 1)
else:
tenant_quota_v4 = context.session.query(qdv.Quota).filter_by(
tenant_id=context.tenant_id,
resource='v4_subnets_per_network').first()
if tenant_quota_v4 != -1:
quota.QUOTAS.limit_check(
context, context.tenant_id,
v4_subnets_per_network=v4_count + 1)
# See RM981. The default behavior of setting a gateway unless
# explicitly asked to not is no longer desirable.
gateway_ip = utils.pop_param(sub_attrs, "gateway_ip")
dns_ips = utils.pop_param(sub_attrs, "dns_nameservers", [])
host_routes = utils.pop_param(sub_attrs, "host_routes", [])
allocation_pools = utils.pop_param(sub_attrs, "allocation_pools", None)
sub_attrs["network"] = net
new_subnet = db_api.subnet_create(context, **sub_attrs)
cidrs = []
alloc_pools = allocation_pool.AllocationPools(sub_attrs["cidr"],
allocation_pools)
if isinstance(allocation_pools, list):
cidrs = alloc_pools.get_policy_cidrs()
quota.QUOTAS.limit_check(
context,
context.tenant_id,
alloc_pools_per_subnet=len(alloc_pools))
ip_policies.ensure_default_policy(cidrs, [new_subnet])
new_subnet["ip_policy"] = db_api.ip_policy_create(context,
exclude=cidrs)
quota.QUOTAS.limit_check(context, context.tenant_id,
routes_per_subnet=len(host_routes))
default_route = None
for route in host_routes:
netaddr_route = netaddr.IPNetwork(route["destination"])
if netaddr_route.value == routes.DEFAULT_ROUTE.value:
if default_route:
raise q_exc.DuplicateRouteConflict(
subnet_id=new_subnet["id"])
default_route = route
gateway_ip = default_route["nexthop"]
alloc_pools.validate_gateway_excluded(gateway_ip)
new_subnet["routes"].append(db_api.route_create(
context, cidr=route["destination"], gateway=route["nexthop"]))
quota.QUOTAS.limit_check(context, context.tenant_id,
dns_nameservers_per_subnet=len(dns_ips))
for dns_ip in dns_ips:
new_subnet["dns_nameservers"].append(db_api.dns_create(
context, ip=netaddr.IPAddress(dns_ip)))
# if the gateway_ip is IN the cidr for the subnet and NOT excluded by
# policies, we should raise a 409 conflict
if gateway_ip and default_route is None:
alloc_pools.validate_gateway_excluded(gateway_ip)
new_subnet["routes"].append(db_api.route_create(
context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip))
subnet_dict = v._make_subnet_dict(new_subnet)
subnet_dict["gateway_ip"] = gateway_ip
return subnet_dict | Create a subnet.
Create a subnet which represents a range of IP addresses
that can be allocated to devices
: param context: neutron api request context
: param subnet: dictionary describing the subnet, with keys
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py. All keys will be populated. | Below is the the instruction that describes the task:
### Input:
Create a subnet.
Create a subnet which represents a range of IP addresses
that can be allocated to devices
: param context: neutron api request context
: param subnet: dictionary describing the subnet, with keys
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py. All keys will be populated.
### Response:
def create_subnet(context, subnet):
"""Create a subnet.
Create a subnet which represents a range of IP addresses
that can be allocated to devices
: param context: neutron api request context
: param subnet: dictionary describing the subnet, with keys
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py. All keys will be populated.
"""
LOG.info("create_subnet for tenant %s" % context.tenant_id)
net_id = subnet["subnet"]["network_id"]
with context.session.begin():
net = db_api.network_find(context=context, limit=None, sorts=['id'],
marker=None, page_reverse=False, fields=None,
id=net_id, scope=db_api.ONE)
if not net:
raise n_exc.NetworkNotFound(net_id=net_id)
sub_attrs = subnet["subnet"]
always_pop = ["enable_dhcp", "ip_version", "first_ip", "last_ip",
"_cidr"]
admin_only = ["segment_id", "do_not_use", "created_at",
"next_auto_assign_ip"]
utils.filter_body(context, sub_attrs, admin_only, always_pop)
_validate_subnet_cidr(context, net_id, sub_attrs["cidr"])
cidr = netaddr.IPNetwork(sub_attrs["cidr"])
err_vals = {'cidr': sub_attrs["cidr"], 'network_id': net_id}
err = _("Requested subnet with cidr: %(cidr)s for "
"network: %(network_id)s. Prefix is too small, must be a "
"larger subnet. A prefix less than /%(prefix)s is required.")
if cidr.version == 6 and cidr.prefixlen > 64:
err_vals["prefix"] = 65
err_msg = err % err_vals
raise n_exc.InvalidInput(error_message=err_msg)
elif cidr.version == 4 and cidr.prefixlen > 30:
err_vals["prefix"] = 31
err_msg = err % err_vals
raise n_exc.InvalidInput(error_message=err_msg)
# Enforce subnet quotas
net_subnets = get_subnets(context,
filters=dict(network_id=net_id))
if not context.is_admin:
v4_count, v6_count = 0, 0
for subnet in net_subnets:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
v6_count += 1
else:
v4_count += 1
if cidr.version == 6:
tenant_quota_v6 = context.session.query(qdv.Quota).filter_by(
tenant_id=context.tenant_id,
resource='v6_subnets_per_network').first()
if tenant_quota_v6 != -1:
quota.QUOTAS.limit_check(
context, context.tenant_id,
v6_subnets_per_network=v6_count + 1)
else:
tenant_quota_v4 = context.session.query(qdv.Quota).filter_by(
tenant_id=context.tenant_id,
resource='v4_subnets_per_network').first()
if tenant_quota_v4 != -1:
quota.QUOTAS.limit_check(
context, context.tenant_id,
v4_subnets_per_network=v4_count + 1)
# See RM981. The default behavior of setting a gateway unless
# explicitly asked to not is no longer desirable.
gateway_ip = utils.pop_param(sub_attrs, "gateway_ip")
dns_ips = utils.pop_param(sub_attrs, "dns_nameservers", [])
host_routes = utils.pop_param(sub_attrs, "host_routes", [])
allocation_pools = utils.pop_param(sub_attrs, "allocation_pools", None)
sub_attrs["network"] = net
new_subnet = db_api.subnet_create(context, **sub_attrs)
cidrs = []
alloc_pools = allocation_pool.AllocationPools(sub_attrs["cidr"],
allocation_pools)
if isinstance(allocation_pools, list):
cidrs = alloc_pools.get_policy_cidrs()
quota.QUOTAS.limit_check(
context,
context.tenant_id,
alloc_pools_per_subnet=len(alloc_pools))
ip_policies.ensure_default_policy(cidrs, [new_subnet])
new_subnet["ip_policy"] = db_api.ip_policy_create(context,
exclude=cidrs)
quota.QUOTAS.limit_check(context, context.tenant_id,
routes_per_subnet=len(host_routes))
default_route = None
for route in host_routes:
netaddr_route = netaddr.IPNetwork(route["destination"])
if netaddr_route.value == routes.DEFAULT_ROUTE.value:
if default_route:
raise q_exc.DuplicateRouteConflict(
subnet_id=new_subnet["id"])
default_route = route
gateway_ip = default_route["nexthop"]
alloc_pools.validate_gateway_excluded(gateway_ip)
new_subnet["routes"].append(db_api.route_create(
context, cidr=route["destination"], gateway=route["nexthop"]))
quota.QUOTAS.limit_check(context, context.tenant_id,
dns_nameservers_per_subnet=len(dns_ips))
for dns_ip in dns_ips:
new_subnet["dns_nameservers"].append(db_api.dns_create(
context, ip=netaddr.IPAddress(dns_ip)))
# if the gateway_ip is IN the cidr for the subnet and NOT excluded by
# policies, we should raise a 409 conflict
if gateway_ip and default_route is None:
alloc_pools.validate_gateway_excluded(gateway_ip)
new_subnet["routes"].append(db_api.route_create(
context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip))
subnet_dict = v._make_subnet_dict(new_subnet)
subnet_dict["gateway_ip"] = gateway_ip
return subnet_dict |
def get_autoscaling_group_properties(asg_client, env, service):
"""
Gets the autoscaling group properties based on the service name that is provided. This function will attempt the find
the autoscaling group base on the following logic:
1. If the service name provided matches the autoscaling group name
2. If the service name provided matches the Name tag of the autoscaling group
3. If the service name provided does not match the above, return None
Args:
clients: Instantiated boto3 autoscaling client
env: Name of the environment to search for the autoscaling group
service: Name of the service
Returns:
JSON object of the autoscaling group properties if it exists
"""
try:
# See if {{ENV}}-{{SERVICE}} matches ASG name
response = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=["{}-{}".format(env, service)])
if len(response["AutoScalingGroups"]) == 0:
# See if {{ENV}}-{{SERVICE}} matches ASG tag name
response = asg_client.describe_tags(Filters=[{ "Name": "Key", "Values": ["Name"] }, { "Name": "Value", "Values": ["{}-{}".format(env, service)]}])
if len(response["Tags"]) == 0:
# Query does not match either of the above, return None
return None
else:
asg_name = response["Tags"][0]["ResourceId"]
response = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])
return response["AutoScalingGroups"]
else:
return response["AutoScalingGroups"]
except ClientError as error:
raise RuntimeError("Error in finding autoscaling group {} {}".format(env, service), error) | Gets the autoscaling group properties based on the service name that is provided. This function will attempt the find
the autoscaling group base on the following logic:
1. If the service name provided matches the autoscaling group name
2. If the service name provided matches the Name tag of the autoscaling group
3. If the service name provided does not match the above, return None
Args:
clients: Instantiated boto3 autoscaling client
env: Name of the environment to search for the autoscaling group
service: Name of the service
Returns:
JSON object of the autoscaling group properties if it exists | Below is the the instruction that describes the task:
### Input:
Gets the autoscaling group properties based on the service name that is provided. This function will attempt the find
the autoscaling group base on the following logic:
1. If the service name provided matches the autoscaling group name
2. If the service name provided matches the Name tag of the autoscaling group
3. If the service name provided does not match the above, return None
Args:
clients: Instantiated boto3 autoscaling client
env: Name of the environment to search for the autoscaling group
service: Name of the service
Returns:
JSON object of the autoscaling group properties if it exists
### Response:
def get_autoscaling_group_properties(asg_client, env, service):
"""
Gets the autoscaling group properties based on the service name that is provided. This function will attempt the find
the autoscaling group base on the following logic:
1. If the service name provided matches the autoscaling group name
2. If the service name provided matches the Name tag of the autoscaling group
3. If the service name provided does not match the above, return None
Args:
clients: Instantiated boto3 autoscaling client
env: Name of the environment to search for the autoscaling group
service: Name of the service
Returns:
JSON object of the autoscaling group properties if it exists
"""
try:
# See if {{ENV}}-{{SERVICE}} matches ASG name
response = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=["{}-{}".format(env, service)])
if len(response["AutoScalingGroups"]) == 0:
# See if {{ENV}}-{{SERVICE}} matches ASG tag name
response = asg_client.describe_tags(Filters=[{ "Name": "Key", "Values": ["Name"] }, { "Name": "Value", "Values": ["{}-{}".format(env, service)]}])
if len(response["Tags"]) == 0:
# Query does not match either of the above, return None
return None
else:
asg_name = response["Tags"][0]["ResourceId"]
response = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])
return response["AutoScalingGroups"]
else:
return response["AutoScalingGroups"]
except ClientError as error:
raise RuntimeError("Error in finding autoscaling group {} {}".format(env, service), error) |
def register_bjam_action (self, action_name, function=None):
"""Informs self that 'action_name' is declared in bjam.
From this point, 'action_name' is a valid argument to the
set_update_action method. The action_name should be callable
in the global module of bjam.
"""
# We allow duplicate calls to this rule for the same
# action name. This way, jamfile rules that take action names
# can just register them without specially checking if
# action is already registered.
assert isinstance(action_name, basestring)
assert function is None or callable(function)
if action_name not in self.actions:
self.actions[action_name] = BjamNativeAction(action_name, function) | Informs self that 'action_name' is declared in bjam.
From this point, 'action_name' is a valid argument to the
set_update_action method. The action_name should be callable
in the global module of bjam. | Below is the the instruction that describes the task:
### Input:
Informs self that 'action_name' is declared in bjam.
From this point, 'action_name' is a valid argument to the
set_update_action method. The action_name should be callable
in the global module of bjam.
### Response:
def register_bjam_action (self, action_name, function=None):
"""Informs self that 'action_name' is declared in bjam.
From this point, 'action_name' is a valid argument to the
set_update_action method. The action_name should be callable
in the global module of bjam.
"""
# We allow duplicate calls to this rule for the same
# action name. This way, jamfile rules that take action names
# can just register them without specially checking if
# action is already registered.
assert isinstance(action_name, basestring)
assert function is None or callable(function)
if action_name not in self.actions:
self.actions[action_name] = BjamNativeAction(action_name, function) |
def info(self):
"""
tuple of the start_pc, end_pc, handler_pc and catch_type_ref
"""
return (self.start_pc, self.end_pc,
self.handler_pc, self.get_catch_type()) | tuple of the start_pc, end_pc, handler_pc and catch_type_ref | Below is the the instruction that describes the task:
### Input:
tuple of the start_pc, end_pc, handler_pc and catch_type_ref
### Response:
def info(self):
"""
tuple of the start_pc, end_pc, handler_pc and catch_type_ref
"""
return (self.start_pc, self.end_pc,
self.handler_pc, self.get_catch_type()) |
def parse_csv_header(line):
"""Parse the CSV header returned by TDS."""
units = {}
names = []
for var in line.split(','):
start = var.find('[')
if start < 0:
names.append(str(var))
continue
else:
names.append(str(var[:start]))
end = var.find(']', start)
unitstr = var[start + 1:end]
eq = unitstr.find('=')
if eq >= 0:
# go past = and ", skip final "
units[names[-1]] = unitstr[eq + 2:-1]
return names, units | Parse the CSV header returned by TDS. | Below is the the instruction that describes the task:
### Input:
Parse the CSV header returned by TDS.
### Response:
def parse_csv_header(line):
"""Parse the CSV header returned by TDS."""
units = {}
names = []
for var in line.split(','):
start = var.find('[')
if start < 0:
names.append(str(var))
continue
else:
names.append(str(var[:start]))
end = var.find(']', start)
unitstr = var[start + 1:end]
eq = unitstr.find('=')
if eq >= 0:
# go past = and ", skip final "
units[names[-1]] = unitstr[eq + 2:-1]
return names, units |
def timespan(self, from_date, to_date=None, span=None, current=False):
"""
Takes a beginning date a filters entries. An optional to_date can be
specified, or a span, which is one of ('month', 'week', 'day').
N.B. - If given a to_date, it does not include that date, only before.
"""
if span and not to_date:
diff = None
if span == 'month':
diff = relativedelta(months=1)
elif span == 'week':
diff = relativedelta(days=7)
elif span == 'day':
diff = relativedelta(days=1)
if diff is not None:
to_date = from_date + diff
datesQ = Q(end_time__gte=from_date)
datesQ &= Q(end_time__lt=to_date) if to_date else Q()
datesQ |= Q(end_time__isnull=True) if current else Q()
return self.filter(datesQ) | Takes a beginning date a filters entries. An optional to_date can be
specified, or a span, which is one of ('month', 'week', 'day').
N.B. - If given a to_date, it does not include that date, only before. | Below is the the instruction that describes the task:
### Input:
Takes a beginning date a filters entries. An optional to_date can be
specified, or a span, which is one of ('month', 'week', 'day').
N.B. - If given a to_date, it does not include that date, only before.
### Response:
def timespan(self, from_date, to_date=None, span=None, current=False):
"""
Takes a beginning date a filters entries. An optional to_date can be
specified, or a span, which is one of ('month', 'week', 'day').
N.B. - If given a to_date, it does not include that date, only before.
"""
if span and not to_date:
diff = None
if span == 'month':
diff = relativedelta(months=1)
elif span == 'week':
diff = relativedelta(days=7)
elif span == 'day':
diff = relativedelta(days=1)
if diff is not None:
to_date = from_date + diff
datesQ = Q(end_time__gte=from_date)
datesQ &= Q(end_time__lt=to_date) if to_date else Q()
datesQ |= Q(end_time__isnull=True) if current else Q()
return self.filter(datesQ) |
def write_chunks(self, data, start, step, count) -> None:
'''
Split data to count equal parts.
Write the chunks using offsets calculated from start, step and stop.
Args:
data (bytes): The data.
start (int): First offset.
step (int): Offset increment.
count (int): The number of offsets.
'''
self.mglo.write_chunks(data, start, step, count) | Split data to count equal parts.
Write the chunks using offsets calculated from start, step and stop.
Args:
data (bytes): The data.
start (int): First offset.
step (int): Offset increment.
count (int): The number of offsets. | Below is the the instruction that describes the task:
### Input:
Split data to count equal parts.
Write the chunks using offsets calculated from start, step and stop.
Args:
data (bytes): The data.
start (int): First offset.
step (int): Offset increment.
count (int): The number of offsets.
### Response:
def write_chunks(self, data, start, step, count) -> None:
'''
Split data to count equal parts.
Write the chunks using offsets calculated from start, step and stop.
Args:
data (bytes): The data.
start (int): First offset.
step (int): Offset increment.
count (int): The number of offsets.
'''
self.mglo.write_chunks(data, start, step, count) |
def advance_robots(self):
'''Produces a new game state in which the robots have advanced
towards the player by one step. Handles the robots crashing into
one another too.'''
# move the robots towards the player
self = lens.robots.Each().call_step_towards(self.player)(self)
# robots in the same place are crashes
self = lens.crashes.call_union(duplicates(self.robots))(self)
# remove crashed robots
self = lens.robots.modify(lambda r: list(set(r) - self.crashes))(self)
return self | Produces a new game state in which the robots have advanced
towards the player by one step. Handles the robots crashing into
one another too. | Below is the the instruction that describes the task:
### Input:
Produces a new game state in which the robots have advanced
towards the player by one step. Handles the robots crashing into
one another too.
### Response:
def advance_robots(self):
'''Produces a new game state in which the robots have advanced
towards the player by one step. Handles the robots crashing into
one another too.'''
# move the robots towards the player
self = lens.robots.Each().call_step_towards(self.player)(self)
# robots in the same place are crashes
self = lens.crashes.call_union(duplicates(self.robots))(self)
# remove crashed robots
self = lens.robots.modify(lambda r: list(set(r) - self.crashes))(self)
return self |
def dict(self):
"""A dict that holds key/values for all of the properties in the
object.
:return:
"""
d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs
if p.key not in ('contents', 'dataset')}
d['modified_datetime'] = self.modified_datetime
d['modified_ago'] = self.modified_ago
return d | A dict that holds key/values for all of the properties in the
object.
:return: | Below is the the instruction that describes the task:
### Input:
A dict that holds key/values for all of the properties in the
object.
:return:
### Response:
def dict(self):
"""A dict that holds key/values for all of the properties in the
object.
:return:
"""
d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs
if p.key not in ('contents', 'dataset')}
d['modified_datetime'] = self.modified_datetime
d['modified_ago'] = self.modified_ago
return d |
def extractLargestRegion(actor):
"""Keep only the largest connected part of a mesh and discard all the smaller pieces.
.. hint:: |largestregion.py|_
"""
conn = vtk.vtkConnectivityFilter()
conn.SetExtractionModeToLargestRegion()
conn.ScalarConnectivityOff()
poly = actor.GetMapper().GetInput()
conn.SetInputData(poly)
conn.Update()
epoly = conn.GetOutput()
eact = Actor(epoly)
pr = vtk.vtkProperty()
pr.DeepCopy(actor.GetProperty())
eact.SetProperty(pr)
return eact | Keep only the largest connected part of a mesh and discard all the smaller pieces.
.. hint:: |largestregion.py|_ | Below is the the instruction that describes the task:
### Input:
Keep only the largest connected part of a mesh and discard all the smaller pieces.
.. hint:: |largestregion.py|_
### Response:
def extractLargestRegion(actor):
"""Keep only the largest connected part of a mesh and discard all the smaller pieces.
.. hint:: |largestregion.py|_
"""
conn = vtk.vtkConnectivityFilter()
conn.SetExtractionModeToLargestRegion()
conn.ScalarConnectivityOff()
poly = actor.GetMapper().GetInput()
conn.SetInputData(poly)
conn.Update()
epoly = conn.GetOutput()
eact = Actor(epoly)
pr = vtk.vtkProperty()
pr.DeepCopy(actor.GetProperty())
eact.SetProperty(pr)
return eact |
def _install(self, name, autoinstall):
'''Check existence of Python module and install it using command
pip install if necessary.'''
import importlib
import pkg_resources
spam_spec = importlib.util.find_spec(name)
reinstall = False
if spam_spec is not None:
if self._version:
mod = importlib.__import__(name)
if hasattr(mod, '__version__'):
ver = mod.__version__
else:
try:
ver = pkg_resources.get_distribution(name).version
except Exception as e:
env.logger.debug(
f'Failed to get version of {name}: {e}')
env.logger.debug(
f'Comparing exiting version {ver} against requested version {self._version}'
)
if self._version.startswith(
'==') and pkg_resources.parse_version(
ver) == pkg_resources.parse_version(
self._version[2:]):
pass
elif self._version.startswith(
'<=') and pkg_resources.parse_version(
ver) <= pkg_resources.parse_version(
self._version[2:]):
pass
elif self._version.startswith(
'<') and not self._version.startswith(
'<=') and pkg_resources.parse_version(
ver) < pkg_resources.parse_version(
self._version[1:]):
pass
elif self._version.startswith(
'>=') and pkg_resources.parse_version(
ver) >= pkg_resources.parse_version(
self._version[2:]):
pass
# the case of >
elif self._version.startswith(
'>') and not self._version.startswith(
'>=') and pkg_resources.parse_version(
ver) > pkg_resources.parse_version(
self._version[1:]):
pass
elif self._version.startswith(
'!=') and pkg_resources.parse_version(
ver) != pkg_resources.parse_version(
self._version[2:]):
pass
elif self._version[0] not in (
'=', '>', '<', '!') and pkg_resources.parse_version(
ver) == pkg_resources.parse_version(self._version):
pass
else:
env.logger.warning(
f'Version {ver} of installed {name} does not match specified version {self._version}.'
)
reinstall = True
if spam_spec and not reinstall:
return True
if not autoinstall:
return False
# try to install it?
import subprocess
cmd = ['pip', 'install'] + ([] if self._version else ['-U']) + [
self._module + (self._version if self._version else '')
if self._autoinstall is True else self._autoinstall
]
env.logger.info(
f'Installing python module {name} with command {" ".join(cmd)}')
ret = subprocess.call(cmd)
if reinstall:
import sys
importlib.reload(sys.modules[name])
# try to check version
return ret == 0 and self._install(name, False) | Check existence of Python module and install it using command
pip install if necessary. | Below is the the instruction that describes the task:
### Input:
Check existence of Python module and install it using command
pip install if necessary.
### Response:
def _install(self, name, autoinstall):
'''Check existence of Python module and install it using command
pip install if necessary.'''
import importlib
import pkg_resources
spam_spec = importlib.util.find_spec(name)
reinstall = False
if spam_spec is not None:
if self._version:
mod = importlib.__import__(name)
if hasattr(mod, '__version__'):
ver = mod.__version__
else:
try:
ver = pkg_resources.get_distribution(name).version
except Exception as e:
env.logger.debug(
f'Failed to get version of {name}: {e}')
env.logger.debug(
f'Comparing exiting version {ver} against requested version {self._version}'
)
if self._version.startswith(
'==') and pkg_resources.parse_version(
ver) == pkg_resources.parse_version(
self._version[2:]):
pass
elif self._version.startswith(
'<=') and pkg_resources.parse_version(
ver) <= pkg_resources.parse_version(
self._version[2:]):
pass
elif self._version.startswith(
'<') and not self._version.startswith(
'<=') and pkg_resources.parse_version(
ver) < pkg_resources.parse_version(
self._version[1:]):
pass
elif self._version.startswith(
'>=') and pkg_resources.parse_version(
ver) >= pkg_resources.parse_version(
self._version[2:]):
pass
# the case of >
elif self._version.startswith(
'>') and not self._version.startswith(
'>=') and pkg_resources.parse_version(
ver) > pkg_resources.parse_version(
self._version[1:]):
pass
elif self._version.startswith(
'!=') and pkg_resources.parse_version(
ver) != pkg_resources.parse_version(
self._version[2:]):
pass
elif self._version[0] not in (
'=', '>', '<', '!') and pkg_resources.parse_version(
ver) == pkg_resources.parse_version(self._version):
pass
else:
env.logger.warning(
f'Version {ver} of installed {name} does not match specified version {self._version}.'
)
reinstall = True
if spam_spec and not reinstall:
return True
if not autoinstall:
return False
# try to install it?
import subprocess
cmd = ['pip', 'install'] + ([] if self._version else ['-U']) + [
self._module + (self._version if self._version else '')
if self._autoinstall is True else self._autoinstall
]
env.logger.info(
f'Installing python module {name} with command {" ".join(cmd)}')
ret = subprocess.call(cmd)
if reinstall:
import sys
importlib.reload(sys.modules[name])
# try to check version
return ret == 0 and self._install(name, False) |
def _get_referenced_apps(specs):
"""
Returns a set of all apps that are required to run any bundle in specs[constants.CONFIG_BUNDLES_KEY]
"""
activated_bundles = specs[constants.CONFIG_BUNDLES_KEY].keys()
all_active_apps = set()
for active_bundle in activated_bundles:
bundle_spec = specs[constants.CONFIG_BUNDLES_KEY].get(active_bundle)
for app_name in bundle_spec['apps']:
all_active_apps.add(app_name)
all_active_apps |= _get_dependent('apps', app_name, specs, 'apps')
return all_active_apps | Returns a set of all apps that are required to run any bundle in specs[constants.CONFIG_BUNDLES_KEY] | Below is the the instruction that describes the task:
### Input:
Returns a set of all apps that are required to run any bundle in specs[constants.CONFIG_BUNDLES_KEY]
### Response:
def _get_referenced_apps(specs):
"""
Returns a set of all apps that are required to run any bundle in specs[constants.CONFIG_BUNDLES_KEY]
"""
activated_bundles = specs[constants.CONFIG_BUNDLES_KEY].keys()
all_active_apps = set()
for active_bundle in activated_bundles:
bundle_spec = specs[constants.CONFIG_BUNDLES_KEY].get(active_bundle)
for app_name in bundle_spec['apps']:
all_active_apps.add(app_name)
all_active_apps |= _get_dependent('apps', app_name, specs, 'apps')
return all_active_apps |
def get_j_ty_callback(self):
""" Generates a callback for evaluating the jacobian. """
j_exprs = self.get_jac()
if j_exprs is False:
return None
cb = self._callback_factory(j_exprs)
if self.sparse:
from scipy.sparse import csc_matrix
def sparse_cb(x, y, p=()):
data = cb(x, y, p).flatten()
return csc_matrix((data, self._rowvals, self._colptrs))
return sparse_cb
else:
return cb | Generates a callback for evaluating the jacobian. | Below is the the instruction that describes the task:
### Input:
Generates a callback for evaluating the jacobian.
### Response:
def get_j_ty_callback(self):
""" Generates a callback for evaluating the jacobian. """
j_exprs = self.get_jac()
if j_exprs is False:
return None
cb = self._callback_factory(j_exprs)
if self.sparse:
from scipy.sparse import csc_matrix
def sparse_cb(x, y, p=()):
data = cb(x, y, p).flatten()
return csc_matrix((data, self._rowvals, self._colptrs))
return sparse_cb
else:
return cb |
def device_id(self):
"""
Return a unique and persistent identifier for the device.
This is the basename (last path component) of the symlink in
`/dev/disk/by-id/`.
"""
if self.is_block:
for filename in self._P.Block.Symlinks:
parts = decode_ay(filename).split('/')
if parts[-2] == 'by-id':
return parts[-1]
elif self.is_drive:
return self._assocdrive._P.Drive.Id
return '' | Return a unique and persistent identifier for the device.
This is the basename (last path component) of the symlink in
`/dev/disk/by-id/`. | Below is the the instruction that describes the task:
### Input:
Return a unique and persistent identifier for the device.
This is the basename (last path component) of the symlink in
`/dev/disk/by-id/`.
### Response:
def device_id(self):
"""
Return a unique and persistent identifier for the device.
This is the basename (last path component) of the symlink in
`/dev/disk/by-id/`.
"""
if self.is_block:
for filename in self._P.Block.Symlinks:
parts = decode_ay(filename).split('/')
if parts[-2] == 'by-id':
return parts[-1]
elif self.is_drive:
return self._assocdrive._P.Drive.Id
return '' |
def unescape_quoted_string(string):
r'''
This function implementes the recommended functionality described in the
tor control-spec to be compatible with older tor versions:
* Read \\n \\t \\r and \\0 ... \\377 as C escapes.
* Treat a backslash followed by any other character as that character.
Except the legacy support for the escape sequences above this function
implements parsing of QuotedString using qcontent from
QuotedString = DQUOTE *qcontent DQUOTE
:param string: The escaped quoted string.
:returns: The unescaped string.
:raises ValueError: If the string is in a invalid form
(e.g. a single backslash)
'''
match = re.match(r'''^"((?:[^"\\]|\\.)*)"$''', string)
if not match:
raise ValueError("Invalid quoted string", string)
string = match.group(1)
# remove backslash before all characters which should not be
# handeled as escape codes by string.decode('string-escape').
# This is needed so e.g. '\x00' is not unescaped as '\0'
string = re.sub(r'((?:^|[^\\])(?:\\\\)*)\\([^ntr0-7\\])', r'\1\2', string)
if six.PY3:
# XXX hmmm?
return bytes(string, 'ascii').decode('unicode-escape')
return string.decode('string-escape') | r'''
This function implementes the recommended functionality described in the
tor control-spec to be compatible with older tor versions:
* Read \\n \\t \\r and \\0 ... \\377 as C escapes.
* Treat a backslash followed by any other character as that character.
Except the legacy support for the escape sequences above this function
implements parsing of QuotedString using qcontent from
QuotedString = DQUOTE *qcontent DQUOTE
:param string: The escaped quoted string.
:returns: The unescaped string.
:raises ValueError: If the string is in a invalid form
(e.g. a single backslash) | Below is the the instruction that describes the task:
### Input:
r'''
This function implementes the recommended functionality described in the
tor control-spec to be compatible with older tor versions:
* Read \\n \\t \\r and \\0 ... \\377 as C escapes.
* Treat a backslash followed by any other character as that character.
Except the legacy support for the escape sequences above this function
implements parsing of QuotedString using qcontent from
QuotedString = DQUOTE *qcontent DQUOTE
:param string: The escaped quoted string.
:returns: The unescaped string.
:raises ValueError: If the string is in a invalid form
(e.g. a single backslash)
### Response:
def unescape_quoted_string(string):
r'''
This function implementes the recommended functionality described in the
tor control-spec to be compatible with older tor versions:
* Read \\n \\t \\r and \\0 ... \\377 as C escapes.
* Treat a backslash followed by any other character as that character.
Except the legacy support for the escape sequences above this function
implements parsing of QuotedString using qcontent from
QuotedString = DQUOTE *qcontent DQUOTE
:param string: The escaped quoted string.
:returns: The unescaped string.
:raises ValueError: If the string is in a invalid form
(e.g. a single backslash)
'''
match = re.match(r'''^"((?:[^"\\]|\\.)*)"$''', string)
if not match:
raise ValueError("Invalid quoted string", string)
string = match.group(1)
# remove backslash before all characters which should not be
# handeled as escape codes by string.decode('string-escape').
# This is needed so e.g. '\x00' is not unescaped as '\0'
string = re.sub(r'((?:^|[^\\])(?:\\\\)*)\\([^ntr0-7\\])', r'\1\2', string)
if six.PY3:
# XXX hmmm?
return bytes(string, 'ascii').decode('unicode-escape')
return string.decode('string-escape') |
def _flatten_ex_dict(self):
"""Flatten structure of exceptions dictionary."""
odict = {}
for _, fdict in self._ex_dict.items():
for (extype, exmsg), value in fdict.items():
key = value["name"]
odict[key] = copy.deepcopy(value)
del odict[key]["name"]
odict[key]["type"] = extype
odict[key]["msg"] = exmsg
return odict | Flatten structure of exceptions dictionary. | Below is the the instruction that describes the task:
### Input:
Flatten structure of exceptions dictionary.
### Response:
def _flatten_ex_dict(self):
"""Flatten structure of exceptions dictionary."""
odict = {}
for _, fdict in self._ex_dict.items():
for (extype, exmsg), value in fdict.items():
key = value["name"]
odict[key] = copy.deepcopy(value)
del odict[key]["name"]
odict[key]["type"] = extype
odict[key]["msg"] = exmsg
return odict |
def _generate_examples(self, file_path):
"""Generate features and target given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features and the target
"""
with tf.io.gfile.GFile(file_path) as f:
raw_data = csv.DictReader(f)
for row in raw_data:
survive_val = row.pop("survived")
yield {
"survived": convert_to_label(survive_val, _SURVIVED_DICT),
"features": {
name: FEATURE_DICT[name][1](value)
for name, value in row.items()
}
} | Generate features and target given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features and the target | Below is the the instruction that describes the task:
### Input:
Generate features and target given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features and the target
### Response:
def _generate_examples(self, file_path):
"""Generate features and target given the directory path.
Args:
file_path: path where the csv file is stored
Yields:
The features and the target
"""
with tf.io.gfile.GFile(file_path) as f:
raw_data = csv.DictReader(f)
for row in raw_data:
survive_val = row.pop("survived")
yield {
"survived": convert_to_label(survive_val, _SURVIVED_DICT),
"features": {
name: FEATURE_DICT[name][1](value)
for name, value in row.items()
}
} |
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response) | Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode. | Below is the the instruction that describes the task:
### Input:
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
### Response:
def server_online(self):
"""
Returns True if the Joe Sandbox servers are running or False if they are in maintenance mode.
"""
response = self._post(self.apiurl + '/v2/server/online', data={'apikey': self.apikey})
return self._raise_or_extract(response) |
def set_editor(self, editor, refresh=True):
"""
Set associated editor/web page:
codeeditor.base.TextEditBaseWidget
browser.WebView
"""
self.editor = editor
# Note: This is necessary to test widgets/editor.py
# in Qt builds that don't have web widgets
try:
from qtpy.QtWebEngineWidgets import QWebEngineView
except ImportError:
QWebEngineView = type(None)
self.words_button.setVisible(not isinstance(editor, QWebEngineView))
self.re_button.setVisible(not isinstance(editor, QWebEngineView))
from spyder.plugins.editor.widgets.codeeditor import CodeEditor
self.is_code_editor = isinstance(editor, CodeEditor)
self.highlight_button.setVisible(self.is_code_editor)
if refresh:
self.refresh()
if self.isHidden() and editor is not None:
self.clear_matches() | Set associated editor/web page:
codeeditor.base.TextEditBaseWidget
browser.WebView | Below is the the instruction that describes the task:
### Input:
Set associated editor/web page:
codeeditor.base.TextEditBaseWidget
browser.WebView
### Response:
def set_editor(self, editor, refresh=True):
"""
Set associated editor/web page:
codeeditor.base.TextEditBaseWidget
browser.WebView
"""
self.editor = editor
# Note: This is necessary to test widgets/editor.py
# in Qt builds that don't have web widgets
try:
from qtpy.QtWebEngineWidgets import QWebEngineView
except ImportError:
QWebEngineView = type(None)
self.words_button.setVisible(not isinstance(editor, QWebEngineView))
self.re_button.setVisible(not isinstance(editor, QWebEngineView))
from spyder.plugins.editor.widgets.codeeditor import CodeEditor
self.is_code_editor = isinstance(editor, CodeEditor)
self.highlight_button.setVisible(self.is_code_editor)
if refresh:
self.refresh()
if self.isHidden() and editor is not None:
self.clear_matches() |
def _data_build(self, data, modname, path):
"""Build tree node from data and add some informations"""
try:
node = _parse(data + "\n")
except (TypeError, ValueError, SyntaxError) as exc:
raise exceptions.AstroidSyntaxError(
"Parsing Python code failed:\n{error}",
source=data,
modname=modname,
path=path,
error=exc,
) from exc
if path is not None:
node_file = os.path.abspath(path)
else:
node_file = "<?>"
if modname.endswith(".__init__"):
modname = modname[:-9]
package = True
else:
package = (
path is not None
and os.path.splitext(os.path.basename(path))[0] == "__init__"
)
builder = rebuilder.TreeRebuilder(self._manager)
module = builder.visit_module(node, modname, node_file, package)
module._import_from_nodes = builder._import_from_nodes
module._delayed_assattr = builder._delayed_assattr
return module | Build tree node from data and add some informations | Below is the the instruction that describes the task:
### Input:
Build tree node from data and add some informations
### Response:
def _data_build(self, data, modname, path):
"""Build tree node from data and add some informations"""
try:
node = _parse(data + "\n")
except (TypeError, ValueError, SyntaxError) as exc:
raise exceptions.AstroidSyntaxError(
"Parsing Python code failed:\n{error}",
source=data,
modname=modname,
path=path,
error=exc,
) from exc
if path is not None:
node_file = os.path.abspath(path)
else:
node_file = "<?>"
if modname.endswith(".__init__"):
modname = modname[:-9]
package = True
else:
package = (
path is not None
and os.path.splitext(os.path.basename(path))[0] == "__init__"
)
builder = rebuilder.TreeRebuilder(self._manager)
module = builder.visit_module(node, modname, node_file, package)
module._import_from_nodes = builder._import_from_nodes
module._delayed_assattr = builder._delayed_assattr
return module |
def rename_nodes(self, renaming_map):
'''Rename nodes in this ``Tree``
Args:
``renaming_map`` (``dict``): A dictionary mapping old labels (keys) to new labels (values)
'''
if not isinstance(renaming_map, dict):
raise TypeError("renaming_map must be a dict")
for node in self.traverse_preorder():
if node.label in renaming_map:
node.label = renaming_map[node.label] | Rename nodes in this ``Tree``
Args:
``renaming_map`` (``dict``): A dictionary mapping old labels (keys) to new labels (values) | Below is the the instruction that describes the task:
### Input:
Rename nodes in this ``Tree``
Args:
``renaming_map`` (``dict``): A dictionary mapping old labels (keys) to new labels (values)
### Response:
def rename_nodes(self, renaming_map):
'''Rename nodes in this ``Tree``
Args:
``renaming_map`` (``dict``): A dictionary mapping old labels (keys) to new labels (values)
'''
if not isinstance(renaming_map, dict):
raise TypeError("renaming_map must be a dict")
for node in self.traverse_preorder():
if node.label in renaming_map:
node.label = renaming_map[node.label] |
def verify_declared_bit(self, obj):
"""Verify a qubit id against the gate prototype."""
# We are verifying gate args against the formal parameters of a
# gate prototype.
if obj.name not in self.current_symtab:
raise QasmError("Cannot find symbol '" + obj.name
+ "' in argument list for gate, line",
str(obj.line), 'file', obj.file)
# This insures the thing is from the bitlist and not from the
# argument list.
sym = self.current_symtab[obj.name]
if not (sym.type == 'id' and sym.is_bit):
raise QasmError("Bit", obj.name,
'is not declared as a bit in the gate.') | Verify a qubit id against the gate prototype. | Below is the the instruction that describes the task:
### Input:
Verify a qubit id against the gate prototype.
### Response:
def verify_declared_bit(self, obj):
"""Verify a qubit id against the gate prototype."""
# We are verifying gate args against the formal parameters of a
# gate prototype.
if obj.name not in self.current_symtab:
raise QasmError("Cannot find symbol '" + obj.name
+ "' in argument list for gate, line",
str(obj.line), 'file', obj.file)
# This insures the thing is from the bitlist and not from the
# argument list.
sym = self.current_symtab[obj.name]
if not (sym.type == 'id' and sym.is_bit):
raise QasmError("Bit", obj.name,
'is not declared as a bit in the gate.') |
def hydrophobic_interactions(atom_set_a, atom_set_b):
"""Detection of hydrophobic pliprofiler between atom_set_a (binding site) and atom_set_b (ligand).
Definition: All pairs of qualified carbon atoms within a distance of HYDROPH_DIST_MAX
"""
data = namedtuple('hydroph_interaction', 'bsatom bsatom_orig_idx ligatom ligatom_orig_idx '
'distance restype resnr reschain restype_l, resnr_l, reschain_l')
pairings = []
for a, b in itertools.product(atom_set_a, atom_set_b):
if a.orig_idx == b.orig_idx:
continue
e = euclidean3d(a.atom.coords, b.atom.coords)
if not config.MIN_DIST < e < config.HYDROPH_DIST_MAX:
continue
restype, resnr, reschain = whichrestype(a.atom), whichresnumber(a.atom), whichchain(a.atom)
restype_l, resnr_l, reschain_l = whichrestype(b.orig_atom), whichresnumber(b.orig_atom), whichchain(b.orig_atom)
contact = data(bsatom=a.atom, bsatom_orig_idx=a.orig_idx, ligatom=b.atom, ligatom_orig_idx=b.orig_idx,
distance=e, restype=restype, resnr=resnr,
reschain=reschain, restype_l=restype_l,
resnr_l=resnr_l, reschain_l=reschain_l)
pairings.append(contact)
return filter_contacts(pairings) | Detection of hydrophobic pliprofiler between atom_set_a (binding site) and atom_set_b (ligand).
Definition: All pairs of qualified carbon atoms within a distance of HYDROPH_DIST_MAX | Below is the the instruction that describes the task:
### Input:
Detection of hydrophobic pliprofiler between atom_set_a (binding site) and atom_set_b (ligand).
Definition: All pairs of qualified carbon atoms within a distance of HYDROPH_DIST_MAX
### Response:
def hydrophobic_interactions(atom_set_a, atom_set_b):
"""Detection of hydrophobic pliprofiler between atom_set_a (binding site) and atom_set_b (ligand).
Definition: All pairs of qualified carbon atoms within a distance of HYDROPH_DIST_MAX
"""
data = namedtuple('hydroph_interaction', 'bsatom bsatom_orig_idx ligatom ligatom_orig_idx '
'distance restype resnr reschain restype_l, resnr_l, reschain_l')
pairings = []
for a, b in itertools.product(atom_set_a, atom_set_b):
if a.orig_idx == b.orig_idx:
continue
e = euclidean3d(a.atom.coords, b.atom.coords)
if not config.MIN_DIST < e < config.HYDROPH_DIST_MAX:
continue
restype, resnr, reschain = whichrestype(a.atom), whichresnumber(a.atom), whichchain(a.atom)
restype_l, resnr_l, reschain_l = whichrestype(b.orig_atom), whichresnumber(b.orig_atom), whichchain(b.orig_atom)
contact = data(bsatom=a.atom, bsatom_orig_idx=a.orig_idx, ligatom=b.atom, ligatom_orig_idx=b.orig_idx,
distance=e, restype=restype, resnr=resnr,
reschain=reschain, restype_l=restype_l,
resnr_l=resnr_l, reschain_l=reschain_l)
pairings.append(contact)
return filter_contacts(pairings) |
def _lookup_by_mapping():
"""Return a the init system based on a constant mapping of
distribution+version to init system..
See constants.py for the mapping.
A failover of the version is proposed for when no version is supplied.
For instance, Arch Linux's version will most probably be "rolling" at
any given time, which means that the init system cannot be idenfied
by the version of the distro.
On top of trying to identify by the distro's ID, if /etc/os-release
contains an "ID_LIKE" field, it will be tried. That, again is true
for Arch where the distro's ID changes (Manjaro, Antergos, etc...)
But the "ID_LIKE" field is always (?) `arch`.
"""
like = distro.like().lower()
distribution_id = distro.id().lower()
version = distro.major_version()
if 'arch' in (distribution_id, like):
version = 'any'
init_sys = constants.DIST_TO_INITSYS.get(
distribution_id, constants.DIST_TO_INITSYS.get(like))
if init_sys:
system = init_sys.get(version)
return [system] if system else [] | Return a the init system based on a constant mapping of
distribution+version to init system..
See constants.py for the mapping.
A failover of the version is proposed for when no version is supplied.
For instance, Arch Linux's version will most probably be "rolling" at
any given time, which means that the init system cannot be idenfied
by the version of the distro.
On top of trying to identify by the distro's ID, if /etc/os-release
contains an "ID_LIKE" field, it will be tried. That, again is true
for Arch where the distro's ID changes (Manjaro, Antergos, etc...)
But the "ID_LIKE" field is always (?) `arch`. | Below is the the instruction that describes the task:
### Input:
Return a the init system based on a constant mapping of
distribution+version to init system..
See constants.py for the mapping.
A failover of the version is proposed for when no version is supplied.
For instance, Arch Linux's version will most probably be "rolling" at
any given time, which means that the init system cannot be idenfied
by the version of the distro.
On top of trying to identify by the distro's ID, if /etc/os-release
contains an "ID_LIKE" field, it will be tried. That, again is true
for Arch where the distro's ID changes (Manjaro, Antergos, etc...)
But the "ID_LIKE" field is always (?) `arch`.
### Response:
def _lookup_by_mapping():
"""Return a the init system based on a constant mapping of
distribution+version to init system..
See constants.py for the mapping.
A failover of the version is proposed for when no version is supplied.
For instance, Arch Linux's version will most probably be "rolling" at
any given time, which means that the init system cannot be idenfied
by the version of the distro.
On top of trying to identify by the distro's ID, if /etc/os-release
contains an "ID_LIKE" field, it will be tried. That, again is true
for Arch where the distro's ID changes (Manjaro, Antergos, etc...)
But the "ID_LIKE" field is always (?) `arch`.
"""
like = distro.like().lower()
distribution_id = distro.id().lower()
version = distro.major_version()
if 'arch' in (distribution_id, like):
version = 'any'
init_sys = constants.DIST_TO_INITSYS.get(
distribution_id, constants.DIST_TO_INITSYS.get(like))
if init_sys:
system = init_sys.get(version)
return [system] if system else [] |
def evolve(self, rho: Density) -> Density:
"""Apply the action of this channel upon a density"""
N = rho.qubit_nb
qubits = rho.qubits
indices = list([qubits.index(q) for q in self.qubits]) + \
list([qubits.index(q) + N for q in self.qubits])
tensor = bk.tensormul(self.tensor, rho.tensor, indices)
return Density(tensor, qubits, rho.memory) | Apply the action of this channel upon a density | Below is the the instruction that describes the task:
### Input:
Apply the action of this channel upon a density
### Response:
def evolve(self, rho: Density) -> Density:
"""Apply the action of this channel upon a density"""
N = rho.qubit_nb
qubits = rho.qubits
indices = list([qubits.index(q) for q in self.qubits]) + \
list([qubits.index(q) + N for q in self.qubits])
tensor = bk.tensormul(self.tensor, rho.tensor, indices)
return Density(tensor, qubits, rho.memory) |
def write(self):
""" Writes generated presentation code into the destination file.
"""
html = self.render()
if self.file_type == 'pdf':
self.write_pdf(html)
else:
with codecs.open(self.destination_file, 'w',
encoding='utf_8') as outfile:
outfile.write(html) | Writes generated presentation code into the destination file. | Below is the the instruction that describes the task:
### Input:
Writes generated presentation code into the destination file.
### Response:
def write(self):
""" Writes generated presentation code into the destination file.
"""
html = self.render()
if self.file_type == 'pdf':
self.write_pdf(html)
else:
with codecs.open(self.destination_file, 'w',
encoding='utf_8') as outfile:
outfile.write(html) |
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
crypto = _get_openssl_crypto_module()
import tempfile
import atexit
cert, pkey = generate_adhoc_ssl_pair()
cert_handle, cert_file = tempfile.mkstemp()
pkey_handle, pkey_file = tempfile.mkstemp()
atexit.register(os.remove, pkey_file)
atexit.register(os.remove, cert_file)
os.write(cert_handle, crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
os.write(pkey_handle, crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
os.close(cert_handle)
os.close(pkey_handle)
ctx = load_ssl_context(cert_file, pkey_file)
return ctx | Generates an adhoc SSL context for the development server. | Below is the the instruction that describes the task:
### Input:
Generates an adhoc SSL context for the development server.
### Response:
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
crypto = _get_openssl_crypto_module()
import tempfile
import atexit
cert, pkey = generate_adhoc_ssl_pair()
cert_handle, cert_file = tempfile.mkstemp()
pkey_handle, pkey_file = tempfile.mkstemp()
atexit.register(os.remove, pkey_file)
atexit.register(os.remove, cert_file)
os.write(cert_handle, crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
os.write(pkey_handle, crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
os.close(cert_handle)
os.close(pkey_handle)
ctx = load_ssl_context(cert_file, pkey_file)
return ctx |
def approxEqual(x, y, *args, **kwargs):
"""approxEqual(float1, float2[, tol=1e-18, rel=1e-7]) -> True|False
approxEqual(obj1, obj2[, *args, **kwargs]) -> True|False
Return True if x and y are approximately equal, otherwise False.
If x and y are floats, return True if y is within either absolute error
tol or relative error rel of x. You can disable either the absolute or
relative check by passing None as tol or rel (but not both).
For any other objects, x and y are checked in that order for a method
__approxEqual__, and the result of that is returned as a bool. Any
optional arguments are passed to the __approxEqual__ method.
__approxEqual__ can return NotImplemented to signal that it doesn't know
how to perform that specific comparison, in which case the other object is
checked instead. If neither object have the method, or both defer by
returning NotImplemented, approxEqual falls back on the same numeric
comparison used for floats.
>>> almost_equal(1.2345678, 1.2345677)
True
>>> almost_equal(1.234, 1.235)
False
"""
if not (type(x) is type(y) is float):
# Skip checking for __approxEqual__ in the common case of two floats.
methodname = '__approxEqual__'
# Allow the objects to specify what they consider "approximately equal",
# giving precedence to x. If either object has the appropriate method, we
# pass on any optional arguments untouched.
for a,b in ((x, y), (y, x)):
try:
method = getattr(a, methodname)
except AttributeError:
continue
else:
result = method(b, *args, **kwargs)
if result is NotImplemented:
print "WARNING: NotImplemented approxEqual for types"
continue
return bool(result)
# If we get here without returning, then neither x nor y knows how to do an
# approximate equal comparison (or are both floats). Fall back to a numeric
# comparison.
return _float_approxEqual(x, y, *args, **kwargs) | approxEqual(float1, float2[, tol=1e-18, rel=1e-7]) -> True|False
approxEqual(obj1, obj2[, *args, **kwargs]) -> True|False
Return True if x and y are approximately equal, otherwise False.
If x and y are floats, return True if y is within either absolute error
tol or relative error rel of x. You can disable either the absolute or
relative check by passing None as tol or rel (but not both).
For any other objects, x and y are checked in that order for a method
__approxEqual__, and the result of that is returned as a bool. Any
optional arguments are passed to the __approxEqual__ method.
__approxEqual__ can return NotImplemented to signal that it doesn't know
how to perform that specific comparison, in which case the other object is
checked instead. If neither object have the method, or both defer by
returning NotImplemented, approxEqual falls back on the same numeric
comparison used for floats.
>>> almost_equal(1.2345678, 1.2345677)
True
>>> almost_equal(1.234, 1.235)
False | Below is the the instruction that describes the task:
### Input:
approxEqual(float1, float2[, tol=1e-18, rel=1e-7]) -> True|False
approxEqual(obj1, obj2[, *args, **kwargs]) -> True|False
Return True if x and y are approximately equal, otherwise False.
If x and y are floats, return True if y is within either absolute error
tol or relative error rel of x. You can disable either the absolute or
relative check by passing None as tol or rel (but not both).
For any other objects, x and y are checked in that order for a method
__approxEqual__, and the result of that is returned as a bool. Any
optional arguments are passed to the __approxEqual__ method.
__approxEqual__ can return NotImplemented to signal that it doesn't know
how to perform that specific comparison, in which case the other object is
checked instead. If neither object have the method, or both defer by
returning NotImplemented, approxEqual falls back on the same numeric
comparison used for floats.
>>> almost_equal(1.2345678, 1.2345677)
True
>>> almost_equal(1.234, 1.235)
False
### Response:
def approxEqual(x, y, *args, **kwargs):
"""approxEqual(float1, float2[, tol=1e-18, rel=1e-7]) -> True|False
approxEqual(obj1, obj2[, *args, **kwargs]) -> True|False
Return True if x and y are approximately equal, otherwise False.
If x and y are floats, return True if y is within either absolute error
tol or relative error rel of x. You can disable either the absolute or
relative check by passing None as tol or rel (but not both).
For any other objects, x and y are checked in that order for a method
__approxEqual__, and the result of that is returned as a bool. Any
optional arguments are passed to the __approxEqual__ method.
__approxEqual__ can return NotImplemented to signal that it doesn't know
how to perform that specific comparison, in which case the other object is
checked instead. If neither object have the method, or both defer by
returning NotImplemented, approxEqual falls back on the same numeric
comparison used for floats.
>>> almost_equal(1.2345678, 1.2345677)
True
>>> almost_equal(1.234, 1.235)
False
"""
if not (type(x) is type(y) is float):
# Skip checking for __approxEqual__ in the common case of two floats.
methodname = '__approxEqual__'
# Allow the objects to specify what they consider "approximately equal",
# giving precedence to x. If either object has the appropriate method, we
# pass on any optional arguments untouched.
for a,b in ((x, y), (y, x)):
try:
method = getattr(a, methodname)
except AttributeError:
continue
else:
result = method(b, *args, **kwargs)
if result is NotImplemented:
print "WARNING: NotImplemented approxEqual for types"
continue
return bool(result)
# If we get here without returning, then neither x nor y knows how to do an
# approximate equal comparison (or are both floats). Fall back to a numeric
# comparison.
return _float_approxEqual(x, y, *args, **kwargs) |
def _run__cherrypy(app, config, mode):
"""Run WsgiDAV using cherrypy.wsgiserver if CherryPy is installed."""
assert mode == "cherrypy-wsgiserver"
try:
from cherrypy import wsgiserver
from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter
_logger.warning("WARNING: cherrypy.wsgiserver is deprecated.")
_logger.warning(
" Starting with CherryPy 9.0 the functionality from cherrypy.wsgiserver"
)
_logger.warning(" was moved to the cheroot project.")
_logger.warning(" Consider using --server=cheroot.")
except ImportError:
_logger.error("*" * 78)
_logger.error("ERROR: Could not import cherrypy.wsgiserver.")
_logger.error(
"Try `pip install cherrypy` or specify another server using the --server option."
)
_logger.error("Note that starting with CherryPy 9.0, the server was moved to")
_logger.error(
"the cheroot project, so it is recommended to use `-server=cheroot`"
)
_logger.error("and run `pip install cheroot` instead.")
_logger.error("*" * 78)
raise
server_name = "WsgiDAV/{} {} Python/{}".format(
__version__, wsgiserver.CherryPyWSGIServer.version, util.PYTHON_VERSION
)
wsgiserver.CherryPyWSGIServer.version = server_name
# Support SSL
ssl_certificate = _get_checked_path(config.get("ssl_certificate"), config)
ssl_private_key = _get_checked_path(config.get("ssl_private_key"), config)
ssl_certificate_chain = _get_checked_path(
config.get("ssl_certificate_chain"), config
)
protocol = "http"
if ssl_certificate:
assert ssl_private_key
wsgiserver.CherryPyWSGIServer.ssl_adapter = BuiltinSSLAdapter(
ssl_certificate, ssl_private_key, ssl_certificate_chain
)
protocol = "https"
_logger.info("SSL / HTTPS enabled.")
_logger.info("Running {}".format(server_name))
_logger.info(
"Serving on {}://{}:{} ...".format(protocol, config["host"], config["port"])
)
server_args = {
"bind_addr": (config["host"], config["port"]),
"wsgi_app": app,
"server_name": server_name,
}
# Override or add custom args
server_args.update(config.get("server_args", {}))
server = wsgiserver.CherryPyWSGIServer(**server_args)
# If the caller passed a startup event, monkey patch the server to set it
# when the request handler loop is entered
startup_event = config.get("startup_event")
if startup_event:
def _patched_tick():
server.tick = org_tick # undo the monkey patch
org_tick()
_logger.info("CherryPyWSGIServer is ready")
startup_event.set()
org_tick = server.tick
server.tick = _patched_tick
try:
server.start()
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
finally:
server.stop()
return | Run WsgiDAV using cherrypy.wsgiserver if CherryPy is installed. | Below is the the instruction that describes the task:
### Input:
Run WsgiDAV using cherrypy.wsgiserver if CherryPy is installed.
### Response:
def _run__cherrypy(app, config, mode):
"""Run WsgiDAV using cherrypy.wsgiserver if CherryPy is installed."""
assert mode == "cherrypy-wsgiserver"
try:
from cherrypy import wsgiserver
from cherrypy.wsgiserver.ssl_builtin import BuiltinSSLAdapter
_logger.warning("WARNING: cherrypy.wsgiserver is deprecated.")
_logger.warning(
" Starting with CherryPy 9.0 the functionality from cherrypy.wsgiserver"
)
_logger.warning(" was moved to the cheroot project.")
_logger.warning(" Consider using --server=cheroot.")
except ImportError:
_logger.error("*" * 78)
_logger.error("ERROR: Could not import cherrypy.wsgiserver.")
_logger.error(
"Try `pip install cherrypy` or specify another server using the --server option."
)
_logger.error("Note that starting with CherryPy 9.0, the server was moved to")
_logger.error(
"the cheroot project, so it is recommended to use `-server=cheroot`"
)
_logger.error("and run `pip install cheroot` instead.")
_logger.error("*" * 78)
raise
server_name = "WsgiDAV/{} {} Python/{}".format(
__version__, wsgiserver.CherryPyWSGIServer.version, util.PYTHON_VERSION
)
wsgiserver.CherryPyWSGIServer.version = server_name
# Support SSL
ssl_certificate = _get_checked_path(config.get("ssl_certificate"), config)
ssl_private_key = _get_checked_path(config.get("ssl_private_key"), config)
ssl_certificate_chain = _get_checked_path(
config.get("ssl_certificate_chain"), config
)
protocol = "http"
if ssl_certificate:
assert ssl_private_key
wsgiserver.CherryPyWSGIServer.ssl_adapter = BuiltinSSLAdapter(
ssl_certificate, ssl_private_key, ssl_certificate_chain
)
protocol = "https"
_logger.info("SSL / HTTPS enabled.")
_logger.info("Running {}".format(server_name))
_logger.info(
"Serving on {}://{}:{} ...".format(protocol, config["host"], config["port"])
)
server_args = {
"bind_addr": (config["host"], config["port"]),
"wsgi_app": app,
"server_name": server_name,
}
# Override or add custom args
server_args.update(config.get("server_args", {}))
server = wsgiserver.CherryPyWSGIServer(**server_args)
# If the caller passed a startup event, monkey patch the server to set it
# when the request handler loop is entered
startup_event = config.get("startup_event")
if startup_event:
def _patched_tick():
server.tick = org_tick # undo the monkey patch
org_tick()
_logger.info("CherryPyWSGIServer is ready")
startup_event.set()
org_tick = server.tick
server.tick = _patched_tick
try:
server.start()
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
finally:
server.stop()
return |
def _wrap(self, value, priority=0):
"""
Given a function/method, this will automatically wrap a method using
weakref to avoid circular references.
"""
if not callable(value):
raise TypeError("Only callable values can be stored in CallbackContainer")
elif self.is_bound_method(value):
# We are dealing with a bound method. Method references aren't
# persistent, so instead we store a reference to the function
# and instance.
value = (weakref.ref(value.__func__),
weakref.ref(value.__self__, self._auto_remove),
priority)
else:
value = (value, priority)
return value | Given a function/method, this will automatically wrap a method using
weakref to avoid circular references. | Below is the the instruction that describes the task:
### Input:
Given a function/method, this will automatically wrap a method using
weakref to avoid circular references.
### Response:
def _wrap(self, value, priority=0):
"""
Given a function/method, this will automatically wrap a method using
weakref to avoid circular references.
"""
if not callable(value):
raise TypeError("Only callable values can be stored in CallbackContainer")
elif self.is_bound_method(value):
# We are dealing with a bound method. Method references aren't
# persistent, so instead we store a reference to the function
# and instance.
value = (weakref.ref(value.__func__),
weakref.ref(value.__self__, self._auto_remove),
priority)
else:
value = (value, priority)
return value |
def letterSequence(letters, w=40):
"""
Return a list of input vectors corresponding to sequence of letters.
The vector for each letter has w contiguous bits ON and represented as a
sequence of non-zero indices.
"""
sequence = []
for letter in letters:
i = ord(letter) - ord('A')
sequence.append(set(range(i*w,(i+1)*w)))
return sequence | Return a list of input vectors corresponding to sequence of letters.
The vector for each letter has w contiguous bits ON and represented as a
sequence of non-zero indices. | Below is the the instruction that describes the task:
### Input:
Return a list of input vectors corresponding to sequence of letters.
The vector for each letter has w contiguous bits ON and represented as a
sequence of non-zero indices.
### Response:
def letterSequence(letters, w=40):
"""
Return a list of input vectors corresponding to sequence of letters.
The vector for each letter has w contiguous bits ON and represented as a
sequence of non-zero indices.
"""
sequence = []
for letter in letters:
i = ord(letter) - ord('A')
sequence.append(set(range(i*w,(i+1)*w)))
return sequence |
def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()):
"""
Analyze the files contained in directory dirname.
Args:
dirname: directory path
exclude_exts: list of file extensions that should be skipped.
exclude_fnames: list of file names that should be skipped.
Returns:
List of pseudopotential objects.
"""
for i, ext in enumerate(exclude_exts):
if not ext.strip().startswith("."):
exclude_exts[i] = "." + ext.strip()
# Exclude files depending on the extension.
paths = []
for fname in os.listdir(dirname):
root, ext = os.path.splitext(fname)
path = os.path.join(dirname, fname)
if (ext in exclude_exts or fname in exclude_fnames or
fname.startswith(".") or not os.path.isfile(path)): continue
paths.append(path)
pseudos = []
for path in paths:
# Parse the file and generate the pseudo.
try:
pseudo = self.parse(path)
except:
pseudo = None
if pseudo is not None:
pseudos.append(pseudo)
self._parsed_paths.extend(path)
else:
self._wrong_paths.extend(path)
return pseudos | Analyze the files contained in directory dirname.
Args:
dirname: directory path
exclude_exts: list of file extensions that should be skipped.
exclude_fnames: list of file names that should be skipped.
Returns:
List of pseudopotential objects. | Below is the the instruction that describes the task:
### Input:
Analyze the files contained in directory dirname.
Args:
dirname: directory path
exclude_exts: list of file extensions that should be skipped.
exclude_fnames: list of file names that should be skipped.
Returns:
List of pseudopotential objects.
### Response:
def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()):
"""
Analyze the files contained in directory dirname.
Args:
dirname: directory path
exclude_exts: list of file extensions that should be skipped.
exclude_fnames: list of file names that should be skipped.
Returns:
List of pseudopotential objects.
"""
for i, ext in enumerate(exclude_exts):
if not ext.strip().startswith("."):
exclude_exts[i] = "." + ext.strip()
# Exclude files depending on the extension.
paths = []
for fname in os.listdir(dirname):
root, ext = os.path.splitext(fname)
path = os.path.join(dirname, fname)
if (ext in exclude_exts or fname in exclude_fnames or
fname.startswith(".") or not os.path.isfile(path)): continue
paths.append(path)
pseudos = []
for path in paths:
# Parse the file and generate the pseudo.
try:
pseudo = self.parse(path)
except:
pseudo = None
if pseudo is not None:
pseudos.append(pseudo)
self._parsed_paths.extend(path)
else:
self._wrong_paths.extend(path)
return pseudos |
def append(self, item):
"""Append `item` (:class:`StyledText` or :class:`str`) to the end of
this mixed-styled text.
The parent of `item` is set to this mixed-styled text."""
if isinstance(item, str):
item = SingleStyledText(item)
item.parent = self
list.append(self, item) | Append `item` (:class:`StyledText` or :class:`str`) to the end of
this mixed-styled text.
The parent of `item` is set to this mixed-styled text. | Below is the the instruction that describes the task:
### Input:
Append `item` (:class:`StyledText` or :class:`str`) to the end of
this mixed-styled text.
The parent of `item` is set to this mixed-styled text.
### Response:
def append(self, item):
"""Append `item` (:class:`StyledText` or :class:`str`) to the end of
this mixed-styled text.
The parent of `item` is set to this mixed-styled text."""
if isinstance(item, str):
item = SingleStyledText(item)
item.parent = self
list.append(self, item) |
def upload_module(self, local_path=None, remote_path="/tmp/lime.ko"):
"""
Upload LiME kernel module to remote host
:type local_path: str
:param local_path: local path to lime kernel module
:type remote_path: str
:param remote_path: remote path to upload lime kernel module
"""
if local_path is None:
raise FileNotFoundFoundError(local_path)
self.shell.upload_file(local_path, remote_path) | Upload LiME kernel module to remote host
:type local_path: str
:param local_path: local path to lime kernel module
:type remote_path: str
:param remote_path: remote path to upload lime kernel module | Below is the the instruction that describes the task:
### Input:
Upload LiME kernel module to remote host
:type local_path: str
:param local_path: local path to lime kernel module
:type remote_path: str
:param remote_path: remote path to upload lime kernel module
### Response:
def upload_module(self, local_path=None, remote_path="/tmp/lime.ko"):
"""
Upload LiME kernel module to remote host
:type local_path: str
:param local_path: local path to lime kernel module
:type remote_path: str
:param remote_path: remote path to upload lime kernel module
"""
if local_path is None:
raise FileNotFoundFoundError(local_path)
self.shell.upload_file(local_path, remote_path) |
def validate_supported_property_type_id(property_name, property_type_id):
"""Ensure that the given property type_id is supported by the graph."""
if property_type_id not in PROPERTY_TYPE_ID_TO_NAME:
raise AssertionError(u'Property "{}" has unsupported property type id: '
u'{}'.format(property_name, property_type_id)) | Ensure that the given property type_id is supported by the graph. | Below is the the instruction that describes the task:
### Input:
Ensure that the given property type_id is supported by the graph.
### Response:
def validate_supported_property_type_id(property_name, property_type_id):
"""Ensure that the given property type_id is supported by the graph."""
if property_type_id not in PROPERTY_TYPE_ID_TO_NAME:
raise AssertionError(u'Property "{}" has unsupported property type id: '
u'{}'.format(property_name, property_type_id)) |
def cart_modify(self, items, CartId=None, HMAC=None, **kwargs):
"""CartAdd.
:param items:
A dictionary containing the items to be added to the cart.
Or a list containing these dictionaries.
example: [{'cart_item_id': 'rt2ofih3f389nwiuhf8934z87o3f4h',
'quantity': 1}]
:param CartId: Id of Cart
:param HMAC: HMAC of Cart, see CartCreate for more info
:return:
An :class:`~.AmazonCart`.
"""
if not CartId or not HMAC:
raise CartException('CartId required for CartModify call')
if isinstance(items, dict):
items = [items]
if len(items) > 10:
raise CartException("You can't add more than 10 items at once")
cart_item_id_key_template = 'Item.{0}.CartItemId'
quantity_key_template = 'Item.{0}.Quantity'
for i, item in enumerate(items):
kwargs[cart_item_id_key_template.format(i)] = item['cart_item_id']
kwargs[quantity_key_template.format(i)] = item['quantity']
response = self.api.CartModify(CartId=CartId, HMAC=HMAC, **kwargs)
root = objectify.fromstring(response)
new_cart = AmazonCart(root)
self._check_for_cart_error(new_cart)
return new_cart | CartAdd.
:param items:
A dictionary containing the items to be added to the cart.
Or a list containing these dictionaries.
example: [{'cart_item_id': 'rt2ofih3f389nwiuhf8934z87o3f4h',
'quantity': 1}]
:param CartId: Id of Cart
:param HMAC: HMAC of Cart, see CartCreate for more info
:return:
An :class:`~.AmazonCart`. | Below is the the instruction that describes the task:
### Input:
CartAdd.
:param items:
A dictionary containing the items to be added to the cart.
Or a list containing these dictionaries.
example: [{'cart_item_id': 'rt2ofih3f389nwiuhf8934z87o3f4h',
'quantity': 1}]
:param CartId: Id of Cart
:param HMAC: HMAC of Cart, see CartCreate for more info
:return:
An :class:`~.AmazonCart`.
### Response:
def cart_modify(self, items, CartId=None, HMAC=None, **kwargs):
"""CartAdd.
:param items:
A dictionary containing the items to be added to the cart.
Or a list containing these dictionaries.
example: [{'cart_item_id': 'rt2ofih3f389nwiuhf8934z87o3f4h',
'quantity': 1}]
:param CartId: Id of Cart
:param HMAC: HMAC of Cart, see CartCreate for more info
:return:
An :class:`~.AmazonCart`.
"""
if not CartId or not HMAC:
raise CartException('CartId required for CartModify call')
if isinstance(items, dict):
items = [items]
if len(items) > 10:
raise CartException("You can't add more than 10 items at once")
cart_item_id_key_template = 'Item.{0}.CartItemId'
quantity_key_template = 'Item.{0}.Quantity'
for i, item in enumerate(items):
kwargs[cart_item_id_key_template.format(i)] = item['cart_item_id']
kwargs[quantity_key_template.format(i)] = item['quantity']
response = self.api.CartModify(CartId=CartId, HMAC=HMAC, **kwargs)
root = objectify.fromstring(response)
new_cart = AmazonCart(root)
self._check_for_cart_error(new_cart)
return new_cart |
def send_message(self, author, content):
"""发送私信给一个用户
:param Author author: 接收私信用户对象
:param string content: 发送给用户的私信内容
:return: 成功返回 True,失败返回 False
:rtype: bool
"""
if isinstance(author, Author) is False:
raise ValueError('argument answer need to be Zhihu.Author object.')
if not content:
raise ValueError('answer content cannot be empty')
if author.url == self.url:
return False
data = {
'member_id': author.hash_id,
'content': content,
'token': '',
'_xsrf': author.xsrf
}
res = self._session.post(Send_Message_Url,
data=data)
return res.json()['r'] == 0 | 发送私信给一个用户
:param Author author: 接收私信用户对象
:param string content: 发送给用户的私信内容
:return: 成功返回 True,失败返回 False
:rtype: bool | Below is the the instruction that describes the task:
### Input:
发送私信给一个用户
:param Author author: 接收私信用户对象
:param string content: 发送给用户的私信内容
:return: 成功返回 True,失败返回 False
:rtype: bool
### Response:
def send_message(self, author, content):
"""发送私信给一个用户
:param Author author: 接收私信用户对象
:param string content: 发送给用户的私信内容
:return: 成功返回 True,失败返回 False
:rtype: bool
"""
if isinstance(author, Author) is False:
raise ValueError('argument answer need to be Zhihu.Author object.')
if not content:
raise ValueError('answer content cannot be empty')
if author.url == self.url:
return False
data = {
'member_id': author.hash_id,
'content': content,
'token': '',
'_xsrf': author.xsrf
}
res = self._session.post(Send_Message_Url,
data=data)
return res.json()['r'] == 0 |
def lambdanu_to_Rz(l,n,ac=5.,Delta=1.):
"""
NAME:
lambdanu_to_Rz
PURPOSE:
calculate galactocentric cylindrical coordinates (R,z)
from prolate spheroidal coordinates (lambda,nu),
cf. eq. (2.2) in Dejonghe & de Zeeuw (1988a)
INPUT:
l - prolate spheroidal coordinate lambda
n - prolate spheroidal coordinate nu
ac - axis ratio of the coordinate surfaces
(a/c) = sqrt(-a) / sqrt(-g) (default: 5.)
Delta - focal distance that defines the spheroidal coordinate system (default: 1.)
Delta=sqrt(g-a)
OUTPUT:
(R,z)
HISTORY:
2015-02-13 - Written - Trick (MPIA)
"""
g = Delta**2 / (1.-ac**2)
a = g - Delta**2
r2 = (l + a) * (n + a) / (a - g)
z2 = (l + g) * (n + g) / (g - a)
index = (r2 < 0.) * ((n+a) > 0.) * ((n+a) < 1e-10)
if nu.any(index):
if isinstance(r2,nu.ndarray): r2[index] = 0.
else: r2 = 0.
index = (z2 < 0.) * ((n+g) < 0.) * ((n+g) > -1e-10)
if nu.any(index):
if isinstance(z2,nu.ndarray): z2[index] = 0.
else: z2 = 0.
return (nu.sqrt(r2),nu.sqrt(z2)) | NAME:
lambdanu_to_Rz
PURPOSE:
calculate galactocentric cylindrical coordinates (R,z)
from prolate spheroidal coordinates (lambda,nu),
cf. eq. (2.2) in Dejonghe & de Zeeuw (1988a)
INPUT:
l - prolate spheroidal coordinate lambda
n - prolate spheroidal coordinate nu
ac - axis ratio of the coordinate surfaces
(a/c) = sqrt(-a) / sqrt(-g) (default: 5.)
Delta - focal distance that defines the spheroidal coordinate system (default: 1.)
Delta=sqrt(g-a)
OUTPUT:
(R,z)
HISTORY:
2015-02-13 - Written - Trick (MPIA) | Below is the the instruction that describes the task:
### Input:
NAME:
lambdanu_to_Rz
PURPOSE:
calculate galactocentric cylindrical coordinates (R,z)
from prolate spheroidal coordinates (lambda,nu),
cf. eq. (2.2) in Dejonghe & de Zeeuw (1988a)
INPUT:
l - prolate spheroidal coordinate lambda
n - prolate spheroidal coordinate nu
ac - axis ratio of the coordinate surfaces
(a/c) = sqrt(-a) / sqrt(-g) (default: 5.)
Delta - focal distance that defines the spheroidal coordinate system (default: 1.)
Delta=sqrt(g-a)
OUTPUT:
(R,z)
HISTORY:
2015-02-13 - Written - Trick (MPIA)
### Response:
def lambdanu_to_Rz(l,n,ac=5.,Delta=1.):
"""
NAME:
lambdanu_to_Rz
PURPOSE:
calculate galactocentric cylindrical coordinates (R,z)
from prolate spheroidal coordinates (lambda,nu),
cf. eq. (2.2) in Dejonghe & de Zeeuw (1988a)
INPUT:
l - prolate spheroidal coordinate lambda
n - prolate spheroidal coordinate nu
ac - axis ratio of the coordinate surfaces
(a/c) = sqrt(-a) / sqrt(-g) (default: 5.)
Delta - focal distance that defines the spheroidal coordinate system (default: 1.)
Delta=sqrt(g-a)
OUTPUT:
(R,z)
HISTORY:
2015-02-13 - Written - Trick (MPIA)
"""
g = Delta**2 / (1.-ac**2)
a = g - Delta**2
r2 = (l + a) * (n + a) / (a - g)
z2 = (l + g) * (n + g) / (g - a)
index = (r2 < 0.) * ((n+a) > 0.) * ((n+a) < 1e-10)
if nu.any(index):
if isinstance(r2,nu.ndarray): r2[index] = 0.
else: r2 = 0.
index = (z2 < 0.) * ((n+g) < 0.) * ((n+g) > -1e-10)
if nu.any(index):
if isinstance(z2,nu.ndarray): z2[index] = 0.
else: z2 = 0.
return (nu.sqrt(r2),nu.sqrt(z2)) |
def TENSES(self):
""" Yields a list of tenses for this language, excluding negations.
Each tense is a (tense, person, number, mood, aspect)-tuple.
"""
a = set(TENSES[id] for id in self._format)
a = a.union(set(TENSES[id] for id in self._default.keys()))
a = a.union(set(TENSES[id] for id in self._default.values()))
a = sorted(x[:-2] for x in a if x[-2] is False) # Exclude negation.
return a | Yields a list of tenses for this language, excluding negations.
Each tense is a (tense, person, number, mood, aspect)-tuple. | Below is the the instruction that describes the task:
### Input:
Yields a list of tenses for this language, excluding negations.
Each tense is a (tense, person, number, mood, aspect)-tuple.
### Response:
def TENSES(self):
""" Yields a list of tenses for this language, excluding negations.
Each tense is a (tense, person, number, mood, aspect)-tuple.
"""
a = set(TENSES[id] for id in self._format)
a = a.union(set(TENSES[id] for id in self._default.keys()))
a = a.union(set(TENSES[id] for id in self._default.values()))
a = sorted(x[:-2] for x in a if x[-2] is False) # Exclude negation.
return a |
def save_asset(self, asset_form, *args, **kwargs):
"""Pass through to provider AssetAdminSession.update_asset"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.update_resource
if asset_form.is_for_update():
return self.update_asset(asset_form, *args, **kwargs)
else:
return self.create_asset(asset_form, *args, **kwargs) | Pass through to provider AssetAdminSession.update_asset | Below is the the instruction that describes the task:
### Input:
Pass through to provider AssetAdminSession.update_asset
### Response:
def save_asset(self, asset_form, *args, **kwargs):
"""Pass through to provider AssetAdminSession.update_asset"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.update_resource
if asset_form.is_for_update():
return self.update_asset(asset_form, *args, **kwargs)
else:
return self.create_asset(asset_form, *args, **kwargs) |
def build_model(self, n_features, regtype='none'):
"""Build the Restricted Boltzmann Machine model in TensorFlow.
:param n_features: number of features
:param regtype: regularization type
:return: self
"""
self._create_placeholders(n_features)
self._create_variables(n_features)
self.encode = self.sample_hidden_from_visible(self.input_data)[0]
self.reconstruction = self.sample_visible_from_hidden(
self.encode, n_features)
hprob0, hstate0, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
self.input_data, n_features)
positive = self.compute_positive_association(self.input_data,
hprob0, hstate0)
nn_input = vprob
for step in range(self.gibbs_sampling_steps - 1):
hprob, hstate, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
nn_input, n_features)
nn_input = vprob
negative = tf.matmul(tf.transpose(vprob), hprob1)
self.w_upd8 = self.W.assign_add(
self.learning_rate * (positive - negative) / self.batch_size)
self.bh_upd8 = self.bh_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean(
tf.subtract(hprob0, hprob1), 0)))
self.bv_upd8 = self.bv_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean(
tf.subtract(self.input_data, vprob), 0)))
variables = [self.W, self.bh_, self.bv_]
regterm = Layers.regularization(variables, self.regtype, self.regcoef)
self.cost = self.loss.compile(vprob, self.input_data, regterm=regterm) | Build the Restricted Boltzmann Machine model in TensorFlow.
:param n_features: number of features
:param regtype: regularization type
:return: self | Below is the the instruction that describes the task:
### Input:
Build the Restricted Boltzmann Machine model in TensorFlow.
:param n_features: number of features
:param regtype: regularization type
:return: self
### Response:
def build_model(self, n_features, regtype='none'):
"""Build the Restricted Boltzmann Machine model in TensorFlow.
:param n_features: number of features
:param regtype: regularization type
:return: self
"""
self._create_placeholders(n_features)
self._create_variables(n_features)
self.encode = self.sample_hidden_from_visible(self.input_data)[0]
self.reconstruction = self.sample_visible_from_hidden(
self.encode, n_features)
hprob0, hstate0, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
self.input_data, n_features)
positive = self.compute_positive_association(self.input_data,
hprob0, hstate0)
nn_input = vprob
for step in range(self.gibbs_sampling_steps - 1):
hprob, hstate, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
nn_input, n_features)
nn_input = vprob
negative = tf.matmul(tf.transpose(vprob), hprob1)
self.w_upd8 = self.W.assign_add(
self.learning_rate * (positive - negative) / self.batch_size)
self.bh_upd8 = self.bh_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean(
tf.subtract(hprob0, hprob1), 0)))
self.bv_upd8 = self.bv_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean(
tf.subtract(self.input_data, vprob), 0)))
variables = [self.W, self.bh_, self.bv_]
regterm = Layers.regularization(variables, self.regtype, self.regcoef)
self.cost = self.loss.compile(vprob, self.input_data, regterm=regterm) |
def addCases(self, tupesValStmnts):
"""
Add multiple case statements from iterable of tuleles
(caseVal, statements)
"""
s = self
for val, statements in tupesValStmnts:
s = s.Case(val, statements)
return s | Add multiple case statements from iterable of tuleles
(caseVal, statements) | Below is the the instruction that describes the task:
### Input:
Add multiple case statements from iterable of tuleles
(caseVal, statements)
### Response:
def addCases(self, tupesValStmnts):
"""
Add multiple case statements from iterable of tuleles
(caseVal, statements)
"""
s = self
for val, statements in tupesValStmnts:
s = s.Case(val, statements)
return s |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.