code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def flatten4d3d(x):
"""Flatten a 4d-tensor into a 3d-tensor by joining width and height."""
xshape = shape_list(x)
result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])
return result | Flatten a 4d-tensor into a 3d-tensor by joining width and height. | Below is the the instruction that describes the task:
### Input:
Flatten a 4d-tensor into a 3d-tensor by joining width and height.
### Response:
def flatten4d3d(x):
"""Flatten a 4d-tensor into a 3d-tensor by joining width and height."""
xshape = shape_list(x)
result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]])
return result |
def lambda_vpc_execution_statements():
"""Allow Lambda to manipuate EC2 ENIs for VPC support."""
return [
Statement(
Effect=Allow,
Resource=['*'],
Action=[
ec2.CreateNetworkInterface,
ec2.DescribeNetworkInterfaces,
ec2.DeleteNetworkInterface,
]
)
] | Allow Lambda to manipuate EC2 ENIs for VPC support. | Below is the the instruction that describes the task:
### Input:
Allow Lambda to manipuate EC2 ENIs for VPC support.
### Response:
def lambda_vpc_execution_statements():
"""Allow Lambda to manipuate EC2 ENIs for VPC support."""
return [
Statement(
Effect=Allow,
Resource=['*'],
Action=[
ec2.CreateNetworkInterface,
ec2.DescribeNetworkInterfaces,
ec2.DeleteNetworkInterface,
]
)
] |
def validate_object(obj, field_validators=None, non_field_validators=None,
schema=None, context=None):
"""
Takes a mapping and applies a mapping of validator functions to it
collecting and reraising any validation errors that occur.
"""
if schema is None:
schema = {}
if context is None:
context = {}
if field_validators is None:
field_validators = ValidationDict()
if non_field_validators is None:
non_field_validators = ValidationList()
from flex.validation.schema import (
construct_schema_validators,
)
schema_validators = construct_schema_validators(schema, context)
if '$ref' in schema_validators and hasattr(schema_validators['$ref'], 'validators'):
ref_ = field_validators.pop('$ref')
for k, v in ref_.validators.items():
if k not in schema_validators:
schema_validators.add_validator(k, v)
if 'discriminator' in schema:
schema_validators = add_polymorphism_requirements(obj, schema, context, schema_validators)
# delete resolved discriminator to avoid infinite recursion
del schema['discriminator']
schema_validators.update(field_validators)
schema_validators.validate_object(obj, context=context)
non_field_validators.validate_object(obj, context=context)
return obj | Takes a mapping and applies a mapping of validator functions to it
collecting and reraising any validation errors that occur. | Below is the the instruction that describes the task:
### Input:
Takes a mapping and applies a mapping of validator functions to it
collecting and reraising any validation errors that occur.
### Response:
def validate_object(obj, field_validators=None, non_field_validators=None,
schema=None, context=None):
"""
Takes a mapping and applies a mapping of validator functions to it
collecting and reraising any validation errors that occur.
"""
if schema is None:
schema = {}
if context is None:
context = {}
if field_validators is None:
field_validators = ValidationDict()
if non_field_validators is None:
non_field_validators = ValidationList()
from flex.validation.schema import (
construct_schema_validators,
)
schema_validators = construct_schema_validators(schema, context)
if '$ref' in schema_validators and hasattr(schema_validators['$ref'], 'validators'):
ref_ = field_validators.pop('$ref')
for k, v in ref_.validators.items():
if k not in schema_validators:
schema_validators.add_validator(k, v)
if 'discriminator' in schema:
schema_validators = add_polymorphism_requirements(obj, schema, context, schema_validators)
# delete resolved discriminator to avoid infinite recursion
del schema['discriminator']
schema_validators.update(field_validators)
schema_validators.validate_object(obj, context=context)
non_field_validators.validate_object(obj, context=context)
return obj |
def mincost_diameter_augment(graph, max_cost, candidates=None, weight=None, cost=None):
"""
PROBLEM: Bounded Cost Minimum Diameter Edge Addition (BCMD)
Args:
graph (nx.Graph): input graph
max_cost (float): maximum weighted diamter of the graph
weight (str): key of the edge weight attribute
cost (str): key of the edge cost attribute
candidates (list): set of non-edges, optional, defaults
to the complement of the graph
Returns:
None: if no solution exists
list: minimum cost edges if solution exists
Notes:
We are given a graph G = (V, E) with an edge weight function w, an edge
cost function c, an a maximum cost B.
The goal is to find a set of candidate non-edges F.
Let x[e] in {0, 1} denote if a non-edge e is excluded or included.
minimize sum(c(e) * x[e] for e in F)
such that
weighted_diamter(graph.union({e for e in F if x[e]})) <= B
References:
https://www.cse.unsw.edu.au/~sergeg/papers/FratiGGM13isaac.pdf
http://www.cis.upenn.edu/~sanjeev/papers/diameter.pdf
http://dl.acm.org/citation.cfm?id=2953882
Notes:
There is a 4-Approximation of the BCMD problem
Running time is O((3 ** B * B ** 3 + n + log(B * n)) * B * n ** 2)
This algorithm usexs a clustering approach to find a set C, of B + 1
cluster centers. Then we create a minimum height rooted tree, T = (U
\subseteq V, D) so that C \subseteq U. This tree T approximates an
optimal B-augmentation.
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> graph = nx.Graph()
>>> if nx.__version__.startswith('1'):
>>> nx.add_path = nx.Graph.add_path
>>> nx.add_path(graph, range(6))
>>> #cost_func = lambda e: e[0] + e[1]
>>> cost_func = lambda e: 1
>>> weight_func = lambda e: (e[0]) / e[1]
>>> comp_graph = nx.complement(graph)
>>> nx.set_edge_attributes(graph, name='cost', values={e: cost_func(e) for e in graph.edges()})
>>> nx.set_edge_attributes(graph, name='weight', values={e: weight_func(e) for e in graph.edges()})
>>> nx.set_edge_attributes(comp_graph, name='cost', values={e: cost_func(e) for e in comp_graph.edges()})
>>> nx.set_edge_attributes(comp_graph, name='weight', values={e: weight_func(e) for e in comp_graph.edges()})
>>> candidates = list(comp_graph.edges(data=True))
>>> max_cost = 2
>>> cost = 'cost'
>>> weight = 'weight'
>>> best_edges = mincost_diameter_augment(graph, max_cost, candidates, weight, cost)
>>> print('best_edges = %r' % (best_edges,))
>>> soln_edges = greedy_mincost_diameter_augment(graph, max_cost, candidates, weight, cost)
>>> print('soln_edges = %r' % (soln_edges,))
"""
import utool as ut
import operator as op
if candidates is None:
candidates = list(graph.complement().edges(data=True))
def augment_add(graph, edges):
aug_graph = graph.copy()
aug_graph.add_edges_from(edges)
return aug_graph
def solution_energy(chosen_edges):
if weight is None:
return len(chosen_edges)
else:
return sum(d[weight] for (u, v, d) in chosen_edges)
variable_basis = [(0, 1) for _ in candidates]
best_energy = np.inf
best_soln = None
soln_generator = ut.product(*variable_basis)
length = reduce(op.mul, map(len, variable_basis), 1)
if length > 3000:
# Let the user know that it might take some time to find a solution
soln_generator = ut.ProgIter(soln_generator, label='BruteForce BCMD',
length=length)
# Brute force solution
for x in soln_generator:
chosen_edges = ut.compress(candidates, x)
aug_graph = augment_add(graph, chosen_edges)
total_cost = weighted_diamter(aug_graph, weight=cost)
energy = solution_energy(chosen_edges)
if total_cost <= max_cost:
if energy < best_energy:
best_energy = energy
best_soln = x
best_edges = ut.compress(candidates, best_soln)
return best_edges | PROBLEM: Bounded Cost Minimum Diameter Edge Addition (BCMD)
Args:
graph (nx.Graph): input graph
max_cost (float): maximum weighted diamter of the graph
weight (str): key of the edge weight attribute
cost (str): key of the edge cost attribute
candidates (list): set of non-edges, optional, defaults
to the complement of the graph
Returns:
None: if no solution exists
list: minimum cost edges if solution exists
Notes:
We are given a graph G = (V, E) with an edge weight function w, an edge
cost function c, an a maximum cost B.
The goal is to find a set of candidate non-edges F.
Let x[e] in {0, 1} denote if a non-edge e is excluded or included.
minimize sum(c(e) * x[e] for e in F)
such that
weighted_diamter(graph.union({e for e in F if x[e]})) <= B
References:
https://www.cse.unsw.edu.au/~sergeg/papers/FratiGGM13isaac.pdf
http://www.cis.upenn.edu/~sanjeev/papers/diameter.pdf
http://dl.acm.org/citation.cfm?id=2953882
Notes:
There is a 4-Approximation of the BCMD problem
Running time is O((3 ** B * B ** 3 + n + log(B * n)) * B * n ** 2)
This algorithm usexs a clustering approach to find a set C, of B + 1
cluster centers. Then we create a minimum height rooted tree, T = (U
\subseteq V, D) so that C \subseteq U. This tree T approximates an
optimal B-augmentation.
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> graph = nx.Graph()
>>> if nx.__version__.startswith('1'):
>>> nx.add_path = nx.Graph.add_path
>>> nx.add_path(graph, range(6))
>>> #cost_func = lambda e: e[0] + e[1]
>>> cost_func = lambda e: 1
>>> weight_func = lambda e: (e[0]) / e[1]
>>> comp_graph = nx.complement(graph)
>>> nx.set_edge_attributes(graph, name='cost', values={e: cost_func(e) for e in graph.edges()})
>>> nx.set_edge_attributes(graph, name='weight', values={e: weight_func(e) for e in graph.edges()})
>>> nx.set_edge_attributes(comp_graph, name='cost', values={e: cost_func(e) for e in comp_graph.edges()})
>>> nx.set_edge_attributes(comp_graph, name='weight', values={e: weight_func(e) for e in comp_graph.edges()})
>>> candidates = list(comp_graph.edges(data=True))
>>> max_cost = 2
>>> cost = 'cost'
>>> weight = 'weight'
>>> best_edges = mincost_diameter_augment(graph, max_cost, candidates, weight, cost)
>>> print('best_edges = %r' % (best_edges,))
>>> soln_edges = greedy_mincost_diameter_augment(graph, max_cost, candidates, weight, cost)
>>> print('soln_edges = %r' % (soln_edges,)) | Below is the the instruction that describes the task:
### Input:
PROBLEM: Bounded Cost Minimum Diameter Edge Addition (BCMD)
Args:
graph (nx.Graph): input graph
max_cost (float): maximum weighted diamter of the graph
weight (str): key of the edge weight attribute
cost (str): key of the edge cost attribute
candidates (list): set of non-edges, optional, defaults
to the complement of the graph
Returns:
None: if no solution exists
list: minimum cost edges if solution exists
Notes:
We are given a graph G = (V, E) with an edge weight function w, an edge
cost function c, an a maximum cost B.
The goal is to find a set of candidate non-edges F.
Let x[e] in {0, 1} denote if a non-edge e is excluded or included.
minimize sum(c(e) * x[e] for e in F)
such that
weighted_diamter(graph.union({e for e in F if x[e]})) <= B
References:
https://www.cse.unsw.edu.au/~sergeg/papers/FratiGGM13isaac.pdf
http://www.cis.upenn.edu/~sanjeev/papers/diameter.pdf
http://dl.acm.org/citation.cfm?id=2953882
Notes:
There is a 4-Approximation of the BCMD problem
Running time is O((3 ** B * B ** 3 + n + log(B * n)) * B * n ** 2)
This algorithm usexs a clustering approach to find a set C, of B + 1
cluster centers. Then we create a minimum height rooted tree, T = (U
\subseteq V, D) so that C \subseteq U. This tree T approximates an
optimal B-augmentation.
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> graph = nx.Graph()
>>> if nx.__version__.startswith('1'):
>>> nx.add_path = nx.Graph.add_path
>>> nx.add_path(graph, range(6))
>>> #cost_func = lambda e: e[0] + e[1]
>>> cost_func = lambda e: 1
>>> weight_func = lambda e: (e[0]) / e[1]
>>> comp_graph = nx.complement(graph)
>>> nx.set_edge_attributes(graph, name='cost', values={e: cost_func(e) for e in graph.edges()})
>>> nx.set_edge_attributes(graph, name='weight', values={e: weight_func(e) for e in graph.edges()})
>>> nx.set_edge_attributes(comp_graph, name='cost', values={e: cost_func(e) for e in comp_graph.edges()})
>>> nx.set_edge_attributes(comp_graph, name='weight', values={e: weight_func(e) for e in comp_graph.edges()})
>>> candidates = list(comp_graph.edges(data=True))
>>> max_cost = 2
>>> cost = 'cost'
>>> weight = 'weight'
>>> best_edges = mincost_diameter_augment(graph, max_cost, candidates, weight, cost)
>>> print('best_edges = %r' % (best_edges,))
>>> soln_edges = greedy_mincost_diameter_augment(graph, max_cost, candidates, weight, cost)
>>> print('soln_edges = %r' % (soln_edges,))
### Response:
def mincost_diameter_augment(graph, max_cost, candidates=None, weight=None, cost=None):
"""
PROBLEM: Bounded Cost Minimum Diameter Edge Addition (BCMD)
Args:
graph (nx.Graph): input graph
max_cost (float): maximum weighted diamter of the graph
weight (str): key of the edge weight attribute
cost (str): key of the edge cost attribute
candidates (list): set of non-edges, optional, defaults
to the complement of the graph
Returns:
None: if no solution exists
list: minimum cost edges if solution exists
Notes:
We are given a graph G = (V, E) with an edge weight function w, an edge
cost function c, an a maximum cost B.
The goal is to find a set of candidate non-edges F.
Let x[e] in {0, 1} denote if a non-edge e is excluded or included.
minimize sum(c(e) * x[e] for e in F)
such that
weighted_diamter(graph.union({e for e in F if x[e]})) <= B
References:
https://www.cse.unsw.edu.au/~sergeg/papers/FratiGGM13isaac.pdf
http://www.cis.upenn.edu/~sanjeev/papers/diameter.pdf
http://dl.acm.org/citation.cfm?id=2953882
Notes:
There is a 4-Approximation of the BCMD problem
Running time is O((3 ** B * B ** 3 + n + log(B * n)) * B * n ** 2)
This algorithm usexs a clustering approach to find a set C, of B + 1
cluster centers. Then we create a minimum height rooted tree, T = (U
\subseteq V, D) so that C \subseteq U. This tree T approximates an
optimal B-augmentation.
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> graph = nx.Graph()
>>> if nx.__version__.startswith('1'):
>>> nx.add_path = nx.Graph.add_path
>>> nx.add_path(graph, range(6))
>>> #cost_func = lambda e: e[0] + e[1]
>>> cost_func = lambda e: 1
>>> weight_func = lambda e: (e[0]) / e[1]
>>> comp_graph = nx.complement(graph)
>>> nx.set_edge_attributes(graph, name='cost', values={e: cost_func(e) for e in graph.edges()})
>>> nx.set_edge_attributes(graph, name='weight', values={e: weight_func(e) for e in graph.edges()})
>>> nx.set_edge_attributes(comp_graph, name='cost', values={e: cost_func(e) for e in comp_graph.edges()})
>>> nx.set_edge_attributes(comp_graph, name='weight', values={e: weight_func(e) for e in comp_graph.edges()})
>>> candidates = list(comp_graph.edges(data=True))
>>> max_cost = 2
>>> cost = 'cost'
>>> weight = 'weight'
>>> best_edges = mincost_diameter_augment(graph, max_cost, candidates, weight, cost)
>>> print('best_edges = %r' % (best_edges,))
>>> soln_edges = greedy_mincost_diameter_augment(graph, max_cost, candidates, weight, cost)
>>> print('soln_edges = %r' % (soln_edges,))
"""
import utool as ut
import operator as op
if candidates is None:
candidates = list(graph.complement().edges(data=True))
def augment_add(graph, edges):
aug_graph = graph.copy()
aug_graph.add_edges_from(edges)
return aug_graph
def solution_energy(chosen_edges):
if weight is None:
return len(chosen_edges)
else:
return sum(d[weight] for (u, v, d) in chosen_edges)
variable_basis = [(0, 1) for _ in candidates]
best_energy = np.inf
best_soln = None
soln_generator = ut.product(*variable_basis)
length = reduce(op.mul, map(len, variable_basis), 1)
if length > 3000:
# Let the user know that it might take some time to find a solution
soln_generator = ut.ProgIter(soln_generator, label='BruteForce BCMD',
length=length)
# Brute force solution
for x in soln_generator:
chosen_edges = ut.compress(candidates, x)
aug_graph = augment_add(graph, chosen_edges)
total_cost = weighted_diamter(aug_graph, weight=cost)
energy = solution_energy(chosen_edges)
if total_cost <= max_cost:
if energy < best_energy:
best_energy = energy
best_soln = x
best_edges = ut.compress(candidates, best_soln)
return best_edges |
def get_newsentry_meta_description(newsentry):
"""Returns the meta description for the given entry."""
if newsentry.meta_description:
return newsentry.meta_description
# If there is no seo addon found, take the info from the placeholders
text = newsentry.get_description()
if len(text) > 160:
return u'{}...'.format(text[:160])
return text | Returns the meta description for the given entry. | Below is the the instruction that describes the task:
### Input:
Returns the meta description for the given entry.
### Response:
def get_newsentry_meta_description(newsentry):
"""Returns the meta description for the given entry."""
if newsentry.meta_description:
return newsentry.meta_description
# If there is no seo addon found, take the info from the placeholders
text = newsentry.get_description()
if len(text) > 160:
return u'{}...'.format(text[:160])
return text |
def _parse_command_response(response):
"""Parse an SCI command response into ElementTree XML
This is a helper method that takes a Requests Response object
of an SCI command response and will parse it into an ElementTree Element
representing the root of the XML response.
:param response: The requests response object
:return: An ElementTree Element that is the root of the response XML
:raises ResponseParseError: If the response XML is not well formed
"""
try:
root = ET.fromstring(response.text)
except ET.ParseError:
raise ResponseParseError(
"Unexpected response format, could not parse XML. Response: {}".format(response.text))
return root | Parse an SCI command response into ElementTree XML
This is a helper method that takes a Requests Response object
of an SCI command response and will parse it into an ElementTree Element
representing the root of the XML response.
:param response: The requests response object
:return: An ElementTree Element that is the root of the response XML
:raises ResponseParseError: If the response XML is not well formed | Below is the the instruction that describes the task:
### Input:
Parse an SCI command response into ElementTree XML
This is a helper method that takes a Requests Response object
of an SCI command response and will parse it into an ElementTree Element
representing the root of the XML response.
:param response: The requests response object
:return: An ElementTree Element that is the root of the response XML
:raises ResponseParseError: If the response XML is not well formed
### Response:
def _parse_command_response(response):
"""Parse an SCI command response into ElementTree XML
This is a helper method that takes a Requests Response object
of an SCI command response and will parse it into an ElementTree Element
representing the root of the XML response.
:param response: The requests response object
:return: An ElementTree Element that is the root of the response XML
:raises ResponseParseError: If the response XML is not well formed
"""
try:
root = ET.fromstring(response.text)
except ET.ParseError:
raise ResponseParseError(
"Unexpected response format, could not parse XML. Response: {}".format(response.text))
return root |
async def _handle_message(self,
message: BaseMessage,
responder: Responder) -> Optional[Dict]:
"""
Handles a message: find a state and run it.
:return: The register that was saved
"""
async def noop(request: Request, responder: Responder):
pass
mm = MiddlewareManager.instance()
reg_manager = self.register\
.work_on_register(message.get_conversation().id)
async with reg_manager as reg:
request = Request(message, reg)
await request.transform()
if not request.stack.layers:
return
logger.debug('Incoming message: %s', request.stack)
await mm.get('pre_handle', noop)(request, responder)
# noinspection PyBroadException
try:
state, trigger, dnr = \
await self._build_state(request, message, responder)
except Exception:
logger.exception('Error while finding a transition from %s',
reg.get(Register.STATE))
reporter.report(request, None)
return
if state is None:
logger.debug(
'No next state found but "%s" is not confusing, stopping',
request.message,
)
return
state = await self._run_state(responder, state, trigger, request)
# noinspection PyBroadException
try:
await responder.flush(request)
except MissingTranslationError as e:
responder.clear()
responder.send([RawText(str(e))])
await responder.flush(request)
reporter.report(request, state.name())
logger.exception('Missing translation in state %s',
state.name())
except Exception:
reporter.report(request, state.name())
logger.exception('Could not flush content after %s',
state.name())
else:
if not dnr:
reg.replacement = await self._build_state_register(
state,
request,
responder,
)
return reg.replacement | Handles a message: find a state and run it.
:return: The register that was saved | Below is the the instruction that describes the task:
### Input:
Handles a message: find a state and run it.
:return: The register that was saved
### Response:
async def _handle_message(self,
message: BaseMessage,
responder: Responder) -> Optional[Dict]:
"""
Handles a message: find a state and run it.
:return: The register that was saved
"""
async def noop(request: Request, responder: Responder):
pass
mm = MiddlewareManager.instance()
reg_manager = self.register\
.work_on_register(message.get_conversation().id)
async with reg_manager as reg:
request = Request(message, reg)
await request.transform()
if not request.stack.layers:
return
logger.debug('Incoming message: %s', request.stack)
await mm.get('pre_handle', noop)(request, responder)
# noinspection PyBroadException
try:
state, trigger, dnr = \
await self._build_state(request, message, responder)
except Exception:
logger.exception('Error while finding a transition from %s',
reg.get(Register.STATE))
reporter.report(request, None)
return
if state is None:
logger.debug(
'No next state found but "%s" is not confusing, stopping',
request.message,
)
return
state = await self._run_state(responder, state, trigger, request)
# noinspection PyBroadException
try:
await responder.flush(request)
except MissingTranslationError as e:
responder.clear()
responder.send([RawText(str(e))])
await responder.flush(request)
reporter.report(request, state.name())
logger.exception('Missing translation in state %s',
state.name())
except Exception:
reporter.report(request, state.name())
logger.exception('Could not flush content after %s',
state.name())
else:
if not dnr:
reg.replacement = await self._build_state_register(
state,
request,
responder,
)
return reg.replacement |
def create_device(name,
role,
model,
manufacturer,
site):
'''
.. versionadded:: 2019.2.0
Create a new device with a name, role, model, manufacturer and site.
All these components need to be already in Netbox.
name
The name of the device, e.g., ``edge_router``
role
String of device role, e.g., ``router``
model
String of device model, e.g., ``MX480``
manufacturer
String of device manufacturer, e.g., ``Juniper``
site
String of device site, e.g., ``BRU``
CLI Example:
.. code-block:: bash
salt myminion netbox.create_device edge_router router MX480 Juniper BRU
'''
try:
nb_role = get_('dcim', 'device-roles', name=role)
if not nb_role:
return False
nb_type = get_('dcim', 'device-types', model=model)
if not nb_type:
return False
nb_site = get_('dcim', 'sites', name=site)
if not nb_site:
return False
status = {'label': "Active", 'value': 1}
except RequestError as e:
log.error('%s, %s, %s', e.req.request.headers, e.request_body, e.error)
return False
payload = {'name': name, 'display_name': name, 'slug': slugify(name), 'device_type': nb_type['id'],
'device_role': nb_role['id'], 'site': nb_site['id']}
new_dev = _add('dcim', 'devices', payload)
if new_dev:
return {'dcim': {'devices': payload}}
else:
return False | .. versionadded:: 2019.2.0
Create a new device with a name, role, model, manufacturer and site.
All these components need to be already in Netbox.
name
The name of the device, e.g., ``edge_router``
role
String of device role, e.g., ``router``
model
String of device model, e.g., ``MX480``
manufacturer
String of device manufacturer, e.g., ``Juniper``
site
String of device site, e.g., ``BRU``
CLI Example:
.. code-block:: bash
salt myminion netbox.create_device edge_router router MX480 Juniper BRU | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
Create a new device with a name, role, model, manufacturer and site.
All these components need to be already in Netbox.
name
The name of the device, e.g., ``edge_router``
role
String of device role, e.g., ``router``
model
String of device model, e.g., ``MX480``
manufacturer
String of device manufacturer, e.g., ``Juniper``
site
String of device site, e.g., ``BRU``
CLI Example:
.. code-block:: bash
salt myminion netbox.create_device edge_router router MX480 Juniper BRU
### Response:
def create_device(name,
role,
model,
manufacturer,
site):
'''
.. versionadded:: 2019.2.0
Create a new device with a name, role, model, manufacturer and site.
All these components need to be already in Netbox.
name
The name of the device, e.g., ``edge_router``
role
String of device role, e.g., ``router``
model
String of device model, e.g., ``MX480``
manufacturer
String of device manufacturer, e.g., ``Juniper``
site
String of device site, e.g., ``BRU``
CLI Example:
.. code-block:: bash
salt myminion netbox.create_device edge_router router MX480 Juniper BRU
'''
try:
nb_role = get_('dcim', 'device-roles', name=role)
if not nb_role:
return False
nb_type = get_('dcim', 'device-types', model=model)
if not nb_type:
return False
nb_site = get_('dcim', 'sites', name=site)
if not nb_site:
return False
status = {'label': "Active", 'value': 1}
except RequestError as e:
log.error('%s, %s, %s', e.req.request.headers, e.request_body, e.error)
return False
payload = {'name': name, 'display_name': name, 'slug': slugify(name), 'device_type': nb_type['id'],
'device_role': nb_role['id'], 'site': nb_site['id']}
new_dev = _add('dcim', 'devices', payload)
if new_dev:
return {'dcim': {'devices': payload}}
else:
return False |
def group_membership_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/group_memberships#show-membership"
api_path = "/api/v2/group_memberships/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/group_memberships#show-membership | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/group_memberships#show-membership
### Response:
def group_membership_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/group_memberships#show-membership"
api_path = "/api/v2/group_memberships/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) |
def image_predict(self, X):
"""
Predicts class label for the entire image.
Parameters:
-----------
X: array, shape = [n_samples, n_pixels_y, n_pixels_x, n_bands]
Array of training images
y: array, shape = [n_samples] or [n_samples, n_pixels_y, n_pixels_x]
Target labels or masks.
"""
self._check_image(X)
patches, patches_shape = self._to_patches(X)
predictions = self.classifier.predict(self._transform_input(patches))
image_predictions = predictions.reshape(patches_shape[0:3])
image_results = np.zeros((self._samples,) + self._image_size)
nx, ny = self.receptive_field
row_steps = self._image_size[0] // nx
col_steps = self._image_size[1] // ny
# how can this be optimised?
for i, j, k in itertools.product(range(row_steps), range(col_steps), range(self._samples)):
image_results[k, nx * i:nx * (i + 1), ny * j:ny * (j + 1)] = image_predictions[k, i, j]
return image_results | Predicts class label for the entire image.
Parameters:
-----------
X: array, shape = [n_samples, n_pixels_y, n_pixels_x, n_bands]
Array of training images
y: array, shape = [n_samples] or [n_samples, n_pixels_y, n_pixels_x]
Target labels or masks. | Below is the the instruction that describes the task:
### Input:
Predicts class label for the entire image.
Parameters:
-----------
X: array, shape = [n_samples, n_pixels_y, n_pixels_x, n_bands]
Array of training images
y: array, shape = [n_samples] or [n_samples, n_pixels_y, n_pixels_x]
Target labels or masks.
### Response:
def image_predict(self, X):
"""
Predicts class label for the entire image.
Parameters:
-----------
X: array, shape = [n_samples, n_pixels_y, n_pixels_x, n_bands]
Array of training images
y: array, shape = [n_samples] or [n_samples, n_pixels_y, n_pixels_x]
Target labels or masks.
"""
self._check_image(X)
patches, patches_shape = self._to_patches(X)
predictions = self.classifier.predict(self._transform_input(patches))
image_predictions = predictions.reshape(patches_shape[0:3])
image_results = np.zeros((self._samples,) + self._image_size)
nx, ny = self.receptive_field
row_steps = self._image_size[0] // nx
col_steps = self._image_size[1] // ny
# how can this be optimised?
for i, j, k in itertools.product(range(row_steps), range(col_steps), range(self._samples)):
image_results[k, nx * i:nx * (i + 1), ny * j:ny * (j + 1)] = image_predictions[k, i, j]
return image_results |
def last_continuous_indexes_slice(ol,value):
'''
from elist.elist import *
ol = [1,"a","a",2,3,"a",4,"a","a","a",5]
last_continuous_indexes_slice(ol,"a")
'''
length = ol.__len__()
end = None
slice = []
for i in range(length-1,-1,-1):
if(ol[i]==value):
end = i
break
else:
pass
if(end == None):
return(None)
else:
slice.append(end)
for i in range(end-1,-1,-1):
if(ol[i]==value):
slice.append(i)
else:
break
slice.reverse()
return(slice) | from elist.elist import *
ol = [1,"a","a",2,3,"a",4,"a","a","a",5]
last_continuous_indexes_slice(ol,"a") | Below is the the instruction that describes the task:
### Input:
from elist.elist import *
ol = [1,"a","a",2,3,"a",4,"a","a","a",5]
last_continuous_indexes_slice(ol,"a")
### Response:
def last_continuous_indexes_slice(ol,value):
'''
from elist.elist import *
ol = [1,"a","a",2,3,"a",4,"a","a","a",5]
last_continuous_indexes_slice(ol,"a")
'''
length = ol.__len__()
end = None
slice = []
for i in range(length-1,-1,-1):
if(ol[i]==value):
end = i
break
else:
pass
if(end == None):
return(None)
else:
slice.append(end)
for i in range(end-1,-1,-1):
if(ol[i]==value):
slice.append(i)
else:
break
slice.reverse()
return(slice) |
def get_relevant_versions(self, package_name: str):
"""Return a tuple: (latest release, latest stable)
If there are different, it means the latest is not a stable
"""
versions = self.get_ordered_versions(package_name)
pre_releases = [version for version in versions if not version.is_prerelease]
return (
versions[-1],
pre_releases[-1]
) | Return a tuple: (latest release, latest stable)
If there are different, it means the latest is not a stable | Below is the the instruction that describes the task:
### Input:
Return a tuple: (latest release, latest stable)
If there are different, it means the latest is not a stable
### Response:
def get_relevant_versions(self, package_name: str):
"""Return a tuple: (latest release, latest stable)
If there are different, it means the latest is not a stable
"""
versions = self.get_ordered_versions(package_name)
pre_releases = [version for version in versions if not version.is_prerelease]
return (
versions[-1],
pre_releases[-1]
) |
def infographic_header_element(impact_function_name, feature, parent):
"""Get a formatted infographic header sentence for an impact function.
For instance:
* infographic_header_element('flood') -> 'Estimated impact of a flood'
"""
_ = feature, parent # NOQA
string_format = infographic_header['string_format']
if impact_function_name:
header = string_format.format(
impact_function_name=impact_function_name)
return header.capitalize()
return None | Get a formatted infographic header sentence for an impact function.
For instance:
* infographic_header_element('flood') -> 'Estimated impact of a flood' | Below is the the instruction that describes the task:
### Input:
Get a formatted infographic header sentence for an impact function.
For instance:
* infographic_header_element('flood') -> 'Estimated impact of a flood'
### Response:
def infographic_header_element(impact_function_name, feature, parent):
"""Get a formatted infographic header sentence for an impact function.
For instance:
* infographic_header_element('flood') -> 'Estimated impact of a flood'
"""
_ = feature, parent # NOQA
string_format = infographic_header['string_format']
if impact_function_name:
header = string_format.format(
impact_function_name=impact_function_name)
return header.capitalize()
return None |
def clone_with_git(repo_uri, dest_path):
"""Create a clone by cloning a git repository.
Args:
repo_uri: The URI of the git repository to clone.
dest_path: The location to clone to.
"""
log.info('Cloning git repo %s to %s', repo_uri, dest_path)
git.Repo.clone_from(repo_uri, dest_path, depth=1) | Create a clone by cloning a git repository.
Args:
repo_uri: The URI of the git repository to clone.
dest_path: The location to clone to. | Below is the the instruction that describes the task:
### Input:
Create a clone by cloning a git repository.
Args:
repo_uri: The URI of the git repository to clone.
dest_path: The location to clone to.
### Response:
def clone_with_git(repo_uri, dest_path):
"""Create a clone by cloning a git repository.
Args:
repo_uri: The URI of the git repository to clone.
dest_path: The location to clone to.
"""
log.info('Cloning git repo %s to %s', repo_uri, dest_path)
git.Repo.clone_from(repo_uri, dest_path, depth=1) |
def _handleAuth(fn):
''' Decorator to re-try API calls after asking the user for authentication. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
# auth, , authenticate users, internal
from yotta.lib import auth
# if yotta is being run noninteractively, then we never retry, but we
# do call auth.authorizeUser, so that a login URL can be displayed:
interactive = globalconf.get('interactive')
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized: #pylint: disable=no-member
logger.debug('%s unauthorised', fn)
# any provider is sufficient for registry auth
auth.authorizeUser(provider=None, interactive=interactive)
if interactive:
logger.debug('retrying after authentication...')
return fn(*args, **kwargs)
raise
return wrapped | Decorator to re-try API calls after asking the user for authentication. | Below is the the instruction that describes the task:
### Input:
Decorator to re-try API calls after asking the user for authentication.
### Response:
def _handleAuth(fn):
''' Decorator to re-try API calls after asking the user for authentication. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
# auth, , authenticate users, internal
from yotta.lib import auth
# if yotta is being run noninteractively, then we never retry, but we
# do call auth.authorizeUser, so that a login URL can be displayed:
interactive = globalconf.get('interactive')
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized: #pylint: disable=no-member
logger.debug('%s unauthorised', fn)
# any provider is sufficient for registry auth
auth.authorizeUser(provider=None, interactive=interactive)
if interactive:
logger.debug('retrying after authentication...')
return fn(*args, **kwargs)
raise
return wrapped |
def _init_stages(self, config, name):
'''Create a list of indirect stages.
`name` should be the name of a config item that holds a list
of names of stages, for instance, ``writers``. This looks up
the names of those stages, then creates and returns the
corresponding list of stage objects. For instance, if the
config says
.. code-block:: yaml
incremental_transforms: [clean_html, clean_visible]
then calling ``self._init_stages(scp_config,
'incremental_transforms')`` will return a list of the two
named stage instances.
:param dict config: `streamcorpus_pipeline` configuration block
:param str name: name of the stage name list entry
:return: list of new stage instances
'''
if name not in config:
return []
return [self.create(stage, config) for stage in config[name]] | Create a list of indirect stages.
`name` should be the name of a config item that holds a list
of names of stages, for instance, ``writers``. This looks up
the names of those stages, then creates and returns the
corresponding list of stage objects. For instance, if the
config says
.. code-block:: yaml
incremental_transforms: [clean_html, clean_visible]
then calling ``self._init_stages(scp_config,
'incremental_transforms')`` will return a list of the two
named stage instances.
:param dict config: `streamcorpus_pipeline` configuration block
:param str name: name of the stage name list entry
:return: list of new stage instances | Below is the the instruction that describes the task:
### Input:
Create a list of indirect stages.
`name` should be the name of a config item that holds a list
of names of stages, for instance, ``writers``. This looks up
the names of those stages, then creates and returns the
corresponding list of stage objects. For instance, if the
config says
.. code-block:: yaml
incremental_transforms: [clean_html, clean_visible]
then calling ``self._init_stages(scp_config,
'incremental_transforms')`` will return a list of the two
named stage instances.
:param dict config: `streamcorpus_pipeline` configuration block
:param str name: name of the stage name list entry
:return: list of new stage instances
### Response:
def _init_stages(self, config, name):
'''Create a list of indirect stages.
`name` should be the name of a config item that holds a list
of names of stages, for instance, ``writers``. This looks up
the names of those stages, then creates and returns the
corresponding list of stage objects. For instance, if the
config says
.. code-block:: yaml
incremental_transforms: [clean_html, clean_visible]
then calling ``self._init_stages(scp_config,
'incremental_transforms')`` will return a list of the two
named stage instances.
:param dict config: `streamcorpus_pipeline` configuration block
:param str name: name of the stage name list entry
:return: list of new stage instances
'''
if name not in config:
return []
return [self.create(stage, config) for stage in config[name]] |
def envelope(self, data, many):
"""Wrap result in envelope."""
if not many:
return data
result = dict(
hits=dict(
hits=data,
total=self.context.get('total', len(data))
)
)
page = self.context.get('page')
if page:
links_pagination_factory = self.context.get(
'links_pagination_factory',
default_links_pagination_factory
)
urlkwargs = self.context.get('urlkwargs', {})
result['links'] = links_pagination_factory(page, urlkwargs)
return result | Wrap result in envelope. | Below is the the instruction that describes the task:
### Input:
Wrap result in envelope.
### Response:
def envelope(self, data, many):
"""Wrap result in envelope."""
if not many:
return data
result = dict(
hits=dict(
hits=data,
total=self.context.get('total', len(data))
)
)
page = self.context.get('page')
if page:
links_pagination_factory = self.context.get(
'links_pagination_factory',
default_links_pagination_factory
)
urlkwargs = self.context.get('urlkwargs', {})
result['links'] = links_pagination_factory(page, urlkwargs)
return result |
def _update_callbacks(self, plot):
"""
Iterates over all subplots and updates existing CustomJS
callbacks with models that were replaced when compositing
subplots into a CompositePlot and sets the plot id to match
the root level bokeh model.
"""
subplots = self.traverse(lambda x: x, [GenericElementPlot])
merged_tools = {t: list(plot.select({'type': TOOL_TYPES[t]}))
for t in self._merged_tools}
for subplot in subplots:
for cb in subplot.callbacks:
for c in cb.callbacks:
for tool, objs in merged_tools.items():
if tool in c.args and objs:
c.args[tool] = objs[0]
if self.top_level:
c.code = c.code.replace('PLACEHOLDER_PLOT_ID', self.id) | Iterates over all subplots and updates existing CustomJS
callbacks with models that were replaced when compositing
subplots into a CompositePlot and sets the plot id to match
the root level bokeh model. | Below is the the instruction that describes the task:
### Input:
Iterates over all subplots and updates existing CustomJS
callbacks with models that were replaced when compositing
subplots into a CompositePlot and sets the plot id to match
the root level bokeh model.
### Response:
def _update_callbacks(self, plot):
"""
Iterates over all subplots and updates existing CustomJS
callbacks with models that were replaced when compositing
subplots into a CompositePlot and sets the plot id to match
the root level bokeh model.
"""
subplots = self.traverse(lambda x: x, [GenericElementPlot])
merged_tools = {t: list(plot.select({'type': TOOL_TYPES[t]}))
for t in self._merged_tools}
for subplot in subplots:
for cb in subplot.callbacks:
for c in cb.callbacks:
for tool, objs in merged_tools.items():
if tool in c.args and objs:
c.args[tool] = objs[0]
if self.top_level:
c.code = c.code.replace('PLACEHOLDER_PLOT_ID', self.id) |
def decode(self, encoded, parentFieldName=''):
""" See the function description in base.py
"""
# Get the scalar values from the underlying scalar encoder
(fieldsDict, fieldNames) = self.encoder.decode(encoded)
if len(fieldsDict) == 0:
return (fieldsDict, fieldNames)
# Expect only 1 field
assert(len(fieldsDict) == 1)
# Get the list of categories the scalar values correspond to and
# generate the description from the category name(s).
(inRanges, inDesc) = fieldsDict.values()[0]
outRanges = []
desc = ""
for (minV, maxV) in inRanges:
minV = int(round(minV))
maxV = int(round(maxV))
outRanges.append((minV, maxV))
while minV <= maxV:
if len(desc) > 0:
desc += ", "
desc += self.indexToCategory[minV]
minV += 1
# Return result
if parentFieldName != '':
fieldName = "%s.%s" % (parentFieldName, self.name)
else:
fieldName = self.name
return ({fieldName: (outRanges, desc)}, [fieldName]) | See the function description in base.py | Below is the the instruction that describes the task:
### Input:
See the function description in base.py
### Response:
def decode(self, encoded, parentFieldName=''):
""" See the function description in base.py
"""
# Get the scalar values from the underlying scalar encoder
(fieldsDict, fieldNames) = self.encoder.decode(encoded)
if len(fieldsDict) == 0:
return (fieldsDict, fieldNames)
# Expect only 1 field
assert(len(fieldsDict) == 1)
# Get the list of categories the scalar values correspond to and
# generate the description from the category name(s).
(inRanges, inDesc) = fieldsDict.values()[0]
outRanges = []
desc = ""
for (minV, maxV) in inRanges:
minV = int(round(minV))
maxV = int(round(maxV))
outRanges.append((minV, maxV))
while minV <= maxV:
if len(desc) > 0:
desc += ", "
desc += self.indexToCategory[minV]
minV += 1
# Return result
if parentFieldName != '':
fieldName = "%s.%s" % (parentFieldName, self.name)
else:
fieldName = self.name
return ({fieldName: (outRanges, desc)}, [fieldName]) |
def read(self, timeout=READ_TIMEOUT, raw=False):
''' Read data from the arm. Data is returned as a latin_1 encoded
string, or raw bytes if 'raw' is True. '''
time.sleep(READ_SLEEP_TIME)
raw_out = self.ser.read(self.ser.in_waiting)
out = raw_out.decode(OUTPUT_ENCODING)
time_waiting = 0
while len(out) == 0 or ending_in(out.strip(OUTPUT_STRIP_CHARS), RESPONSE_END_WORDS) is None:
time.sleep(READ_SLEEP_TIME)
time_waiting += READ_SLEEP_TIME
raw_out += self.ser.read(self.ser.in_waiting)
out = raw_out.decode(OUTPUT_ENCODING)
# TODO how to handle timeouts, if they're now unexpected?
if time_waiting >= timeout:
break
if raw:
return raw_out
return out | Read data from the arm. Data is returned as a latin_1 encoded
string, or raw bytes if 'raw' is True. | Below is the the instruction that describes the task:
### Input:
Read data from the arm. Data is returned as a latin_1 encoded
string, or raw bytes if 'raw' is True.
### Response:
def read(self, timeout=READ_TIMEOUT, raw=False):
''' Read data from the arm. Data is returned as a latin_1 encoded
string, or raw bytes if 'raw' is True. '''
time.sleep(READ_SLEEP_TIME)
raw_out = self.ser.read(self.ser.in_waiting)
out = raw_out.decode(OUTPUT_ENCODING)
time_waiting = 0
while len(out) == 0 or ending_in(out.strip(OUTPUT_STRIP_CHARS), RESPONSE_END_WORDS) is None:
time.sleep(READ_SLEEP_TIME)
time_waiting += READ_SLEEP_TIME
raw_out += self.ser.read(self.ser.in_waiting)
out = raw_out.decode(OUTPUT_ENCODING)
# TODO how to handle timeouts, if they're now unexpected?
if time_waiting >= timeout:
break
if raw:
return raw_out
return out |
def reads_generator_to_protein_sequences_generator(
variant_and_overlapping_reads_generator,
transcript_id_whitelist=None,
protein_sequence_length=PROTEIN_SEQUENCE_LENGTH,
min_alt_rna_reads=MIN_ALT_RNA_READS,
min_variant_sequence_coverage=MIN_VARIANT_SEQUENCE_COVERAGE,
min_transcript_prefix_length=MIN_TRANSCRIPT_PREFIX_LENGTH,
max_transcript_mismatches=MAX_REFERENCE_TRANSCRIPT_MISMATCHES,
include_mismatches_after_variant=INCLUDE_MISMATCHES_AFTER_VARIANT,
max_protein_sequences_per_variant=MAX_PROTEIN_SEQUENCES_PER_VARIANT,
variant_sequence_assembly=VARIANT_SEQUENCE_ASSEMBLY):
""""
Translates each coding variant in a collection to one or more
Translation objects, which are then aggregated into equivalent
ProteinSequence objects.
Parameters
----------
variant_and_overlapping_reads_generator : generator
Yields sequence of varcode.Variant objects paired with sequences
of AlleleRead objects that support that variant.
transcript_id_whitelist : set, optional
If given, expected to be a set of transcript IDs which we should use
for determining the reading frame around a variant. If omitted, then
try to use all overlapping reference transcripts.
protein_sequence_length : int
Try to translate protein sequences of this length, though sometimes
we'll have to return something shorter (depending on the RNAseq data,
and presence of stop codons).
min_alt_rna_reads : int
Drop variant sequences at loci with fewer than this number of reads
supporting the alt allele.
min_variant_sequence_coverage : int
Trim variant sequences to positions supported by at least this number
of RNA reads.
min_transcript_prefix_length : int
Minimum number of bases we need to try matching between the reference
context and variant sequence.
max_transcript_mismatches : int
Don't try to determine the reading frame for a transcript if more
than this number of bases differ.
include_mismatches_after_variant : bool
Include mismatches after the variant locus in the count compared
against max_transcript_mismatches.
max_protein_sequences_per_variant : int
Number of protein sequences to return for each ProteinSequence
variant_cdna_sequence_assembly : bool
If True, then assemble variant cDNA sequences based on overlap of
RNA reads. If False, then variant cDNA sequences must be fully spanned
and contained within RNA reads.
Yields pairs of a Variant and a list of ProteinSequence objects
"""
for (variant, overlapping_reads) in variant_and_overlapping_reads_generator:
overlapping_transcript_ids = [
t.id
for t in variant.transcripts
if t.is_protein_coding
]
_, ref, alt = trim_variant(variant)
overlapping_reads = list(overlapping_reads)
reads_grouped_by_allele = group_reads_by_allele(overlapping_reads)
ref_reads = reads_grouped_by_allele.get(ref, [])
alt_reads = reads_grouped_by_allele.get(alt, [])
translations = translate_variant_reads(
variant=variant,
variant_reads=alt_reads,
transcript_id_whitelist=transcript_id_whitelist,
protein_sequence_length=protein_sequence_length,
min_alt_rna_reads=min_alt_rna_reads,
min_variant_sequence_coverage=min_variant_sequence_coverage,
min_transcript_prefix_length=min_transcript_prefix_length,
max_transcript_mismatches=max_transcript_mismatches,
include_mismatches_after_variant=include_mismatches_after_variant,
variant_sequence_assembly=variant_sequence_assembly)
protein_sequences = []
for (key, equivalent_translations) in groupby(
translations, key_fn=Translation.as_translation_key).items():
# get the variant read names, transcript IDs and gene names for
# protein sequence we're about to construct
alt_reads_supporting_protein_sequence, group_transcript_ids, group_gene_names = \
ProteinSequence._summarize_translations(equivalent_translations)
logger.info(
"%s: %s alt reads supporting protein sequence (gene names = %s)",
key,
len(alt_reads_supporting_protein_sequence),
group_gene_names)
protein_sequence = ProteinSequence.from_translation_key(
translation_key=key,
translations=equivalent_translations,
overlapping_reads=overlapping_reads,
alt_reads=alt_reads,
ref_reads=ref_reads,
alt_reads_supporting_protein_sequence=alt_reads_supporting_protein_sequence,
transcripts_supporting_protein_sequence=group_transcript_ids,
transcripts_overlapping_variant=overlapping_transcript_ids,
gene=list(group_gene_names))
logger.info("%s: protein sequence = %s" % (key, protein_sequence.amino_acids))
protein_sequences.append(protein_sequence)
# sort protein sequences before returning the top results
protein_sequences = sort_protein_sequences(protein_sequences)
yield variant, protein_sequences[:max_protein_sequences_per_variant] | Translates each coding variant in a collection to one or more
Translation objects, which are then aggregated into equivalent
ProteinSequence objects.
Parameters
----------
variant_and_overlapping_reads_generator : generator
Yields sequence of varcode.Variant objects paired with sequences
of AlleleRead objects that support that variant.
transcript_id_whitelist : set, optional
If given, expected to be a set of transcript IDs which we should use
for determining the reading frame around a variant. If omitted, then
try to use all overlapping reference transcripts.
protein_sequence_length : int
Try to translate protein sequences of this length, though sometimes
we'll have to return something shorter (depending on the RNAseq data,
and presence of stop codons).
min_alt_rna_reads : int
Drop variant sequences at loci with fewer than this number of reads
supporting the alt allele.
min_variant_sequence_coverage : int
Trim variant sequences to positions supported by at least this number
of RNA reads.
min_transcript_prefix_length : int
Minimum number of bases we need to try matching between the reference
context and variant sequence.
max_transcript_mismatches : int
Don't try to determine the reading frame for a transcript if more
than this number of bases differ.
include_mismatches_after_variant : bool
Include mismatches after the variant locus in the count compared
against max_transcript_mismatches.
max_protein_sequences_per_variant : int
Number of protein sequences to return for each ProteinSequence
variant_cdna_sequence_assembly : bool
If True, then assemble variant cDNA sequences based on overlap of
RNA reads. If False, then variant cDNA sequences must be fully spanned
and contained within RNA reads.
Yields pairs of a Variant and a list of ProteinSequence objects | Below is the the instruction that describes the task:
### Input:
Translates each coding variant in a collection to one or more
Translation objects, which are then aggregated into equivalent
ProteinSequence objects.
Parameters
----------
variant_and_overlapping_reads_generator : generator
Yields sequence of varcode.Variant objects paired with sequences
of AlleleRead objects that support that variant.
transcript_id_whitelist : set, optional
If given, expected to be a set of transcript IDs which we should use
for determining the reading frame around a variant. If omitted, then
try to use all overlapping reference transcripts.
protein_sequence_length : int
Try to translate protein sequences of this length, though sometimes
we'll have to return something shorter (depending on the RNAseq data,
and presence of stop codons).
min_alt_rna_reads : int
Drop variant sequences at loci with fewer than this number of reads
supporting the alt allele.
min_variant_sequence_coverage : int
Trim variant sequences to positions supported by at least this number
of RNA reads.
min_transcript_prefix_length : int
Minimum number of bases we need to try matching between the reference
context and variant sequence.
max_transcript_mismatches : int
Don't try to determine the reading frame for a transcript if more
than this number of bases differ.
include_mismatches_after_variant : bool
Include mismatches after the variant locus in the count compared
against max_transcript_mismatches.
max_protein_sequences_per_variant : int
Number of protein sequences to return for each ProteinSequence
variant_cdna_sequence_assembly : bool
If True, then assemble variant cDNA sequences based on overlap of
RNA reads. If False, then variant cDNA sequences must be fully spanned
and contained within RNA reads.
Yields pairs of a Variant and a list of ProteinSequence objects
### Response:
def reads_generator_to_protein_sequences_generator(
variant_and_overlapping_reads_generator,
transcript_id_whitelist=None,
protein_sequence_length=PROTEIN_SEQUENCE_LENGTH,
min_alt_rna_reads=MIN_ALT_RNA_READS,
min_variant_sequence_coverage=MIN_VARIANT_SEQUENCE_COVERAGE,
min_transcript_prefix_length=MIN_TRANSCRIPT_PREFIX_LENGTH,
max_transcript_mismatches=MAX_REFERENCE_TRANSCRIPT_MISMATCHES,
include_mismatches_after_variant=INCLUDE_MISMATCHES_AFTER_VARIANT,
max_protein_sequences_per_variant=MAX_PROTEIN_SEQUENCES_PER_VARIANT,
variant_sequence_assembly=VARIANT_SEQUENCE_ASSEMBLY):
""""
Translates each coding variant in a collection to one or more
Translation objects, which are then aggregated into equivalent
ProteinSequence objects.
Parameters
----------
variant_and_overlapping_reads_generator : generator
Yields sequence of varcode.Variant objects paired with sequences
of AlleleRead objects that support that variant.
transcript_id_whitelist : set, optional
If given, expected to be a set of transcript IDs which we should use
for determining the reading frame around a variant. If omitted, then
try to use all overlapping reference transcripts.
protein_sequence_length : int
Try to translate protein sequences of this length, though sometimes
we'll have to return something shorter (depending on the RNAseq data,
and presence of stop codons).
min_alt_rna_reads : int
Drop variant sequences at loci with fewer than this number of reads
supporting the alt allele.
min_variant_sequence_coverage : int
Trim variant sequences to positions supported by at least this number
of RNA reads.
min_transcript_prefix_length : int
Minimum number of bases we need to try matching between the reference
context and variant sequence.
max_transcript_mismatches : int
Don't try to determine the reading frame for a transcript if more
than this number of bases differ.
include_mismatches_after_variant : bool
Include mismatches after the variant locus in the count compared
against max_transcript_mismatches.
max_protein_sequences_per_variant : int
Number of protein sequences to return for each ProteinSequence
variant_cdna_sequence_assembly : bool
If True, then assemble variant cDNA sequences based on overlap of
RNA reads. If False, then variant cDNA sequences must be fully spanned
and contained within RNA reads.
Yields pairs of a Variant and a list of ProteinSequence objects
"""
for (variant, overlapping_reads) in variant_and_overlapping_reads_generator:
overlapping_transcript_ids = [
t.id
for t in variant.transcripts
if t.is_protein_coding
]
_, ref, alt = trim_variant(variant)
overlapping_reads = list(overlapping_reads)
reads_grouped_by_allele = group_reads_by_allele(overlapping_reads)
ref_reads = reads_grouped_by_allele.get(ref, [])
alt_reads = reads_grouped_by_allele.get(alt, [])
translations = translate_variant_reads(
variant=variant,
variant_reads=alt_reads,
transcript_id_whitelist=transcript_id_whitelist,
protein_sequence_length=protein_sequence_length,
min_alt_rna_reads=min_alt_rna_reads,
min_variant_sequence_coverage=min_variant_sequence_coverage,
min_transcript_prefix_length=min_transcript_prefix_length,
max_transcript_mismatches=max_transcript_mismatches,
include_mismatches_after_variant=include_mismatches_after_variant,
variant_sequence_assembly=variant_sequence_assembly)
protein_sequences = []
for (key, equivalent_translations) in groupby(
translations, key_fn=Translation.as_translation_key).items():
# get the variant read names, transcript IDs and gene names for
# protein sequence we're about to construct
alt_reads_supporting_protein_sequence, group_transcript_ids, group_gene_names = \
ProteinSequence._summarize_translations(equivalent_translations)
logger.info(
"%s: %s alt reads supporting protein sequence (gene names = %s)",
key,
len(alt_reads_supporting_protein_sequence),
group_gene_names)
protein_sequence = ProteinSequence.from_translation_key(
translation_key=key,
translations=equivalent_translations,
overlapping_reads=overlapping_reads,
alt_reads=alt_reads,
ref_reads=ref_reads,
alt_reads_supporting_protein_sequence=alt_reads_supporting_protein_sequence,
transcripts_supporting_protein_sequence=group_transcript_ids,
transcripts_overlapping_variant=overlapping_transcript_ids,
gene=list(group_gene_names))
logger.info("%s: protein sequence = %s" % (key, protein_sequence.amino_acids))
protein_sequences.append(protein_sequence)
# sort protein sequences before returning the top results
protein_sequences = sort_protein_sequences(protein_sequences)
yield variant, protein_sequences[:max_protein_sequences_per_variant] |
def create_spectra_lookup(lookup, fn_spectra):
"""Stores all spectra rt, injection time, and scan nr in db"""
to_store = []
mzmlmap = lookup.get_mzmlfile_map()
for fn, spectrum in fn_spectra:
spec_id = '{}_{}'.format(mzmlmap[fn], spectrum['scan'])
mzml_rt = round(float(spectrum['rt']), 12)
mzml_iit = round(float(spectrum['iit']), 12)
mz = float(spectrum['mz'])
to_store.append((spec_id, mzmlmap[fn], spectrum['scan'],
spectrum['charge'], mz, mzml_rt, mzml_iit))
if len(to_store) == DB_STORE_CHUNK:
lookup.store_mzmls(to_store)
to_store = []
lookup.store_mzmls(to_store)
lookup.index_mzml() | Stores all spectra rt, injection time, and scan nr in db | Below is the the instruction that describes the task:
### Input:
Stores all spectra rt, injection time, and scan nr in db
### Response:
def create_spectra_lookup(lookup, fn_spectra):
"""Stores all spectra rt, injection time, and scan nr in db"""
to_store = []
mzmlmap = lookup.get_mzmlfile_map()
for fn, spectrum in fn_spectra:
spec_id = '{}_{}'.format(mzmlmap[fn], spectrum['scan'])
mzml_rt = round(float(spectrum['rt']), 12)
mzml_iit = round(float(spectrum['iit']), 12)
mz = float(spectrum['mz'])
to_store.append((spec_id, mzmlmap[fn], spectrum['scan'],
spectrum['charge'], mz, mzml_rt, mzml_iit))
if len(to_store) == DB_STORE_CHUNK:
lookup.store_mzmls(to_store)
to_store = []
lookup.store_mzmls(to_store)
lookup.index_mzml() |
def param (self, param, kwargs, default_value=False):
"""gets a param from kwargs, or uses a default_value. if found, it's
removed from kwargs"""
if param in kwargs:
value= kwargs[param]
del kwargs[param]
else:
value= default_value
setattr (self, param, value) | gets a param from kwargs, or uses a default_value. if found, it's
removed from kwargs | Below is the the instruction that describes the task:
### Input:
gets a param from kwargs, or uses a default_value. if found, it's
removed from kwargs
### Response:
def param (self, param, kwargs, default_value=False):
"""gets a param from kwargs, or uses a default_value. if found, it's
removed from kwargs"""
if param in kwargs:
value= kwargs[param]
del kwargs[param]
else:
value= default_value
setattr (self, param, value) |
def populateWidget(self):
"""
Populate the widget using data stored in the state
object. The order in which the individual widgets are populated
follows their arrangment.
The models are recreated every time the function is called.
This might seem to be an overkill, but in practice it is very fast.
Don't try to move the model creation outside this function; is not
worth the effort, and there is nothing to gain from it.
"""
self.elementComboBox.setItems(self.state._elements, self.state.element)
self.chargeComboBox.setItems(self.state._charges, self.state.charge)
self.symmetryComboBox.setItems(
self.state._symmetries, self.state.symmetry)
self.experimentComboBox.setItems(
self.state._experiments, self.state.experiment)
self.edgeComboBox.setItems(self.state._edges, self.state.edge)
self.temperatureLineEdit.setValue(self.state.temperature)
self.magneticFieldLineEdit.setValue(self.state.magneticField)
self.axesTabWidget.setTabText(0, str(self.state.xLabel))
self.xMinLineEdit.setValue(self.state.xMin)
self.xMaxLineEdit.setValue(self.state.xMax)
self.xNPointsLineEdit.setValue(self.state.xNPoints)
self.xLorentzianLineEdit.setList(self.state.xLorentzian)
self.xGaussianLineEdit.setValue(self.state.xGaussian)
self.k1LineEdit.setVector(self.state.k1)
self.eps11LineEdit.setVector(self.state.eps11)
self.eps12LineEdit.setVector(self.state.eps12)
if self.state.experiment in ['RIXS', ]:
if self.axesTabWidget.count() == 1:
tab = self.axesTabWidget.findChild(QWidget, 'yTab')
self.axesTabWidget.addTab(tab, tab.objectName())
self.axesTabWidget.setTabText(1, self.state.yLabel)
self.yMinLineEdit.setValue(self.state.yMin)
self.yMaxLineEdit.setValue(self.state.yMax)
self.yNPointsLineEdit.setValue(self.state.yNPoints)
self.yLorentzianLineEdit.setList(self.state.yLorentzian)
self.yGaussianLineEdit.setValue(self.state.yGaussian)
self.k2LineEdit.setVector(self.state.k2)
self.eps21LineEdit.setVector(self.state.eps21)
self.eps22LineEdit.setVector(self.state.eps22)
text = self.eps11Label.text()
text = re.sub('>[vσ]', '>σ', text)
self.eps11Label.setText(text)
text = self.eps12Label.text()
text = re.sub('>[hπ]', '>π', text)
self.eps12Label.setText(text)
else:
self.axesTabWidget.removeTab(1)
text = self.eps11Label.text()
text = re.sub('>[vσ]', '>v', text)
self.eps11Label.setText(text)
text = self.eps12Label.text()
text = re.sub('>[hπ]', '>h', text)
self.eps12Label.setText(text)
# Create the spectra selection model.
self.spectraModel = SpectraModel(parent=self)
self.spectraModel.setModelData(
self.state.spectra.toCalculate,
self.state.spectra.toCalculateChecked)
self.spectraModel.checkStateChanged.connect(
self.updateSpectraCheckState)
self.spectraListView.setModel(self.spectraModel)
self.spectraListView.selectionModel().setCurrentIndex(
self.spectraModel.index(0, 0), QItemSelectionModel.Select)
self.fkLineEdit.setValue(self.state.fk)
self.gkLineEdit.setValue(self.state.gk)
self.zetaLineEdit.setValue(self.state.zeta)
# Create the Hamiltonian model.
self.hamiltonianModel = HamiltonianModel(parent=self)
self.hamiltonianModel.setModelData(self.state.hamiltonianData)
self.hamiltonianModel.setNodesCheckState(self.state.hamiltonianState)
if self.syncParametersCheckBox.isChecked():
self.hamiltonianModel.setSyncState(True)
else:
self.hamiltonianModel.setSyncState(False)
self.hamiltonianModel.dataChanged.connect(self.updateHamiltonianData)
self.hamiltonianModel.itemCheckStateChanged.connect(
self.updateHamiltonianNodeCheckState)
# Assign the Hamiltonian model to the Hamiltonian terms view.
self.hamiltonianTermsView.setModel(self.hamiltonianModel)
self.hamiltonianTermsView.selectionModel().setCurrentIndex(
self.hamiltonianModel.index(0, 0), QItemSelectionModel.Select)
self.hamiltonianTermsView.selectionModel().selectionChanged.connect(
self.selectedHamiltonianTermChanged)
# Assign the Hamiltonian model to the Hamiltonian parameters view.
self.hamiltonianParametersView.setModel(self.hamiltonianModel)
self.hamiltonianParametersView.expandAll()
self.hamiltonianParametersView.resizeAllColumnsToContents()
self.hamiltonianParametersView.setColumnWidth(0, 130)
self.hamiltonianParametersView.setRootIndex(
self.hamiltonianTermsView.currentIndex())
self.nPsisLineEdit.setValue(self.state.nPsis)
self.nPsisAutoCheckBox.setChecked(self.state.nPsisAuto)
self.nConfigurationsLineEdit.setValue(self.state.nConfigurations)
self.nConfigurationsLineEdit.setEnabled(False)
name = '{}-Ligands Hybridization'.format(self.state.block)
for termName in self.state.hamiltonianData:
if name in termName:
termState = self.state.hamiltonianState[termName]
if termState == 0:
continue
else:
self.nConfigurationsLineEdit.setEnabled(True)
if not hasattr(self, 'resultsModel'):
# Create the results model.
self.resultsModel = ResultsModel(parent=self)
self.resultsModel.itemNameChanged.connect(
self.updateCalculationName)
self.resultsModel.itemCheckStateChanged.connect(
self.updatePlotWidget)
self.resultsModel.dataChanged.connect(self.updatePlotWidget)
self.resultsModel.dataChanged.connect(self.updateResultsView)
# Assign the results model to the results view.
self.resultsView.setModel(self.resultsModel)
self.resultsView.selectionModel().selectionChanged.connect(
self.selectedResultsChanged)
self.resultsView.resizeColumnsToContents()
self.resultsView.horizontalHeader().setSectionsMovable(False)
self.resultsView.horizontalHeader().setSectionsClickable(False)
if sys.platform == 'darwin':
self.resultsView.horizontalHeader().setMaximumHeight(17)
# Add a context menu to the view.
self.resultsView.setContextMenuPolicy(Qt.CustomContextMenu)
self.resultsView.customContextMenuRequested[QPoint].connect(
self.showResultsContextMenu)
if not hasattr(self, 'resultDetailsDialog'):
self.resultDetailsDialog = QuantyResultDetailsDialog(parent=self)
self.updateMainWindowTitle(self.state.baseName) | Populate the widget using data stored in the state
object. The order in which the individual widgets are populated
follows their arrangment.
The models are recreated every time the function is called.
This might seem to be an overkill, but in practice it is very fast.
Don't try to move the model creation outside this function; is not
worth the effort, and there is nothing to gain from it. | Below is the the instruction that describes the task:
### Input:
Populate the widget using data stored in the state
object. The order in which the individual widgets are populated
follows their arrangment.
The models are recreated every time the function is called.
This might seem to be an overkill, but in practice it is very fast.
Don't try to move the model creation outside this function; is not
worth the effort, and there is nothing to gain from it.
### Response:
def populateWidget(self):
"""
Populate the widget using data stored in the state
object. The order in which the individual widgets are populated
follows their arrangment.
The models are recreated every time the function is called.
This might seem to be an overkill, but in practice it is very fast.
Don't try to move the model creation outside this function; is not
worth the effort, and there is nothing to gain from it.
"""
self.elementComboBox.setItems(self.state._elements, self.state.element)
self.chargeComboBox.setItems(self.state._charges, self.state.charge)
self.symmetryComboBox.setItems(
self.state._symmetries, self.state.symmetry)
self.experimentComboBox.setItems(
self.state._experiments, self.state.experiment)
self.edgeComboBox.setItems(self.state._edges, self.state.edge)
self.temperatureLineEdit.setValue(self.state.temperature)
self.magneticFieldLineEdit.setValue(self.state.magneticField)
self.axesTabWidget.setTabText(0, str(self.state.xLabel))
self.xMinLineEdit.setValue(self.state.xMin)
self.xMaxLineEdit.setValue(self.state.xMax)
self.xNPointsLineEdit.setValue(self.state.xNPoints)
self.xLorentzianLineEdit.setList(self.state.xLorentzian)
self.xGaussianLineEdit.setValue(self.state.xGaussian)
self.k1LineEdit.setVector(self.state.k1)
self.eps11LineEdit.setVector(self.state.eps11)
self.eps12LineEdit.setVector(self.state.eps12)
if self.state.experiment in ['RIXS', ]:
if self.axesTabWidget.count() == 1:
tab = self.axesTabWidget.findChild(QWidget, 'yTab')
self.axesTabWidget.addTab(tab, tab.objectName())
self.axesTabWidget.setTabText(1, self.state.yLabel)
self.yMinLineEdit.setValue(self.state.yMin)
self.yMaxLineEdit.setValue(self.state.yMax)
self.yNPointsLineEdit.setValue(self.state.yNPoints)
self.yLorentzianLineEdit.setList(self.state.yLorentzian)
self.yGaussianLineEdit.setValue(self.state.yGaussian)
self.k2LineEdit.setVector(self.state.k2)
self.eps21LineEdit.setVector(self.state.eps21)
self.eps22LineEdit.setVector(self.state.eps22)
text = self.eps11Label.text()
text = re.sub('>[vσ]', '>σ', text)
self.eps11Label.setText(text)
text = self.eps12Label.text()
text = re.sub('>[hπ]', '>π', text)
self.eps12Label.setText(text)
else:
self.axesTabWidget.removeTab(1)
text = self.eps11Label.text()
text = re.sub('>[vσ]', '>v', text)
self.eps11Label.setText(text)
text = self.eps12Label.text()
text = re.sub('>[hπ]', '>h', text)
self.eps12Label.setText(text)
# Create the spectra selection model.
self.spectraModel = SpectraModel(parent=self)
self.spectraModel.setModelData(
self.state.spectra.toCalculate,
self.state.spectra.toCalculateChecked)
self.spectraModel.checkStateChanged.connect(
self.updateSpectraCheckState)
self.spectraListView.setModel(self.spectraModel)
self.spectraListView.selectionModel().setCurrentIndex(
self.spectraModel.index(0, 0), QItemSelectionModel.Select)
self.fkLineEdit.setValue(self.state.fk)
self.gkLineEdit.setValue(self.state.gk)
self.zetaLineEdit.setValue(self.state.zeta)
# Create the Hamiltonian model.
self.hamiltonianModel = HamiltonianModel(parent=self)
self.hamiltonianModel.setModelData(self.state.hamiltonianData)
self.hamiltonianModel.setNodesCheckState(self.state.hamiltonianState)
if self.syncParametersCheckBox.isChecked():
self.hamiltonianModel.setSyncState(True)
else:
self.hamiltonianModel.setSyncState(False)
self.hamiltonianModel.dataChanged.connect(self.updateHamiltonianData)
self.hamiltonianModel.itemCheckStateChanged.connect(
self.updateHamiltonianNodeCheckState)
# Assign the Hamiltonian model to the Hamiltonian terms view.
self.hamiltonianTermsView.setModel(self.hamiltonianModel)
self.hamiltonianTermsView.selectionModel().setCurrentIndex(
self.hamiltonianModel.index(0, 0), QItemSelectionModel.Select)
self.hamiltonianTermsView.selectionModel().selectionChanged.connect(
self.selectedHamiltonianTermChanged)
# Assign the Hamiltonian model to the Hamiltonian parameters view.
self.hamiltonianParametersView.setModel(self.hamiltonianModel)
self.hamiltonianParametersView.expandAll()
self.hamiltonianParametersView.resizeAllColumnsToContents()
self.hamiltonianParametersView.setColumnWidth(0, 130)
self.hamiltonianParametersView.setRootIndex(
self.hamiltonianTermsView.currentIndex())
self.nPsisLineEdit.setValue(self.state.nPsis)
self.nPsisAutoCheckBox.setChecked(self.state.nPsisAuto)
self.nConfigurationsLineEdit.setValue(self.state.nConfigurations)
self.nConfigurationsLineEdit.setEnabled(False)
name = '{}-Ligands Hybridization'.format(self.state.block)
for termName in self.state.hamiltonianData:
if name in termName:
termState = self.state.hamiltonianState[termName]
if termState == 0:
continue
else:
self.nConfigurationsLineEdit.setEnabled(True)
if not hasattr(self, 'resultsModel'):
# Create the results model.
self.resultsModel = ResultsModel(parent=self)
self.resultsModel.itemNameChanged.connect(
self.updateCalculationName)
self.resultsModel.itemCheckStateChanged.connect(
self.updatePlotWidget)
self.resultsModel.dataChanged.connect(self.updatePlotWidget)
self.resultsModel.dataChanged.connect(self.updateResultsView)
# Assign the results model to the results view.
self.resultsView.setModel(self.resultsModel)
self.resultsView.selectionModel().selectionChanged.connect(
self.selectedResultsChanged)
self.resultsView.resizeColumnsToContents()
self.resultsView.horizontalHeader().setSectionsMovable(False)
self.resultsView.horizontalHeader().setSectionsClickable(False)
if sys.platform == 'darwin':
self.resultsView.horizontalHeader().setMaximumHeight(17)
# Add a context menu to the view.
self.resultsView.setContextMenuPolicy(Qt.CustomContextMenu)
self.resultsView.customContextMenuRequested[QPoint].connect(
self.showResultsContextMenu)
if not hasattr(self, 'resultDetailsDialog'):
self.resultDetailsDialog = QuantyResultDetailsDialog(parent=self)
self.updateMainWindowTitle(self.state.baseName) |
def _configureShortcuts(self):
'''Add keyboard shortcuts to navigate the filesystem.'''
self._upShortcut = QtGui.QShortcut(
QtGui.QKeySequence('Backspace'), self
)
self._upShortcut.setAutoRepeat(False)
self._upShortcut.activated.connect(self._onNavigateUpButtonClicked) | Add keyboard shortcuts to navigate the filesystem. | Below is the the instruction that describes the task:
### Input:
Add keyboard shortcuts to navigate the filesystem.
### Response:
def _configureShortcuts(self):
'''Add keyboard shortcuts to navigate the filesystem.'''
self._upShortcut = QtGui.QShortcut(
QtGui.QKeySequence('Backspace'), self
)
self._upShortcut.setAutoRepeat(False)
self._upShortcut.activated.connect(self._onNavigateUpButtonClicked) |
def _commit_timer_stopped(self, lCall):
"""We're shutting down, clean up our looping call..."""
if self._commit_looper is not lCall:
log.warning('_commit_timer_stopped with wrong timer:%s not:%s',
lCall, self._commit_looper)
else:
log.debug('_commit_timer_stopped: %s %s', lCall,
self._commit_looper)
self._commit_looper = None
self._commit_looper_d = None | We're shutting down, clean up our looping call... | Below is the the instruction that describes the task:
### Input:
We're shutting down, clean up our looping call...
### Response:
def _commit_timer_stopped(self, lCall):
"""We're shutting down, clean up our looping call..."""
if self._commit_looper is not lCall:
log.warning('_commit_timer_stopped with wrong timer:%s not:%s',
lCall, self._commit_looper)
else:
log.debug('_commit_timer_stopped: %s %s', lCall,
self._commit_looper)
self._commit_looper = None
self._commit_looper_d = None |
def pication(rings, pos_charged, protcharged):
"""Return all pi-Cation interaction between aromatic rings and positively charged groups.
For tertiary and quaternary amines, check also the angle between the ring and the nitrogen.
"""
data = namedtuple(
'pication', 'ring charge distance offset type restype resnr reschain restype_l resnr_l reschain_l protcharged')
pairings = []
if len(rings) == 0 or len(pos_charged) == 0:
return pairings
for ring in rings:
c = ring.center
for p in pos_charged:
d = euclidean3d(c, p.center)
# Project the center of charge into the ring and measure distance to ring center
proj = projection(ring.normal, ring.center, p.center)
offset = euclidean3d(proj, ring.center)
if not config.MIN_DIST < d < config.PICATION_DIST_MAX or not offset < config.PISTACK_OFFSET_MAX:
continue
if type(p).__name__ == 'lcharge' and p.fgroup == 'tertamine':
# Special case here if the ligand has a tertiary amine, check an additional angle
# Otherwise, we might have have a pi-cation interaction 'through' the ligand
n_atoms = [a_neighbor for a_neighbor in OBAtomAtomIter(p.atoms[0].OBAtom)]
n_atoms_coords = [(a.x(), a.y(), a.z()) for a in n_atoms]
amine_normal = np.cross(vector(n_atoms_coords[0], n_atoms_coords[1]),
vector(n_atoms_coords[2], n_atoms_coords[0]))
b = vecangle(ring.normal, amine_normal)
# Smallest of two angles, depending on direction of normal
a = min(b, 180 - b if not 180 - b < 0 else b)
if not a > 30.0:
resnr, restype = whichresnumber(ring.atoms[0]), whichrestype(ring.atoms[0])
reschain = whichchain(ring.atoms[0])
resnr_l, restype_l = whichresnumber(p.orig_atoms[0]), whichrestype(p.orig_atoms[0])
reschain_l = whichchain(p.orig_atoms[0])
contact = data(ring=ring, charge=p, distance=d, offset=offset, type='regular',
restype=restype, resnr=resnr, reschain=reschain,
restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l,
protcharged=protcharged)
pairings.append(contact)
break
resnr = whichresnumber(p.atoms[0]) if protcharged else whichresnumber(ring.atoms[0])
resnr_l = whichresnumber(ring.orig_atoms[0]) if protcharged else whichresnumber(p.orig_atoms[0])
restype = whichrestype(p.atoms[0]) if protcharged else whichrestype(ring.atoms[0])
restype_l = whichrestype(ring.orig_atoms[0]) if protcharged else whichrestype(p.orig_atoms[0])
reschain = whichchain(p.atoms[0]) if protcharged else whichchain(ring.atoms[0])
reschain_l = whichchain(ring.orig_atoms[0]) if protcharged else whichchain(p.orig_atoms[0])
contact = data(ring=ring, charge=p, distance=d, offset=offset, type='regular', restype=restype,
resnr=resnr, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l,
reschain_l=reschain_l, protcharged=protcharged)
pairings.append(contact)
return filter_contacts(pairings) | Return all pi-Cation interaction between aromatic rings and positively charged groups.
For tertiary and quaternary amines, check also the angle between the ring and the nitrogen. | Below is the the instruction that describes the task:
### Input:
Return all pi-Cation interaction between aromatic rings and positively charged groups.
For tertiary and quaternary amines, check also the angle between the ring and the nitrogen.
### Response:
def pication(rings, pos_charged, protcharged):
"""Return all pi-Cation interaction between aromatic rings and positively charged groups.
For tertiary and quaternary amines, check also the angle between the ring and the nitrogen.
"""
data = namedtuple(
'pication', 'ring charge distance offset type restype resnr reschain restype_l resnr_l reschain_l protcharged')
pairings = []
if len(rings) == 0 or len(pos_charged) == 0:
return pairings
for ring in rings:
c = ring.center
for p in pos_charged:
d = euclidean3d(c, p.center)
# Project the center of charge into the ring and measure distance to ring center
proj = projection(ring.normal, ring.center, p.center)
offset = euclidean3d(proj, ring.center)
if not config.MIN_DIST < d < config.PICATION_DIST_MAX or not offset < config.PISTACK_OFFSET_MAX:
continue
if type(p).__name__ == 'lcharge' and p.fgroup == 'tertamine':
# Special case here if the ligand has a tertiary amine, check an additional angle
# Otherwise, we might have have a pi-cation interaction 'through' the ligand
n_atoms = [a_neighbor for a_neighbor in OBAtomAtomIter(p.atoms[0].OBAtom)]
n_atoms_coords = [(a.x(), a.y(), a.z()) for a in n_atoms]
amine_normal = np.cross(vector(n_atoms_coords[0], n_atoms_coords[1]),
vector(n_atoms_coords[2], n_atoms_coords[0]))
b = vecangle(ring.normal, amine_normal)
# Smallest of two angles, depending on direction of normal
a = min(b, 180 - b if not 180 - b < 0 else b)
if not a > 30.0:
resnr, restype = whichresnumber(ring.atoms[0]), whichrestype(ring.atoms[0])
reschain = whichchain(ring.atoms[0])
resnr_l, restype_l = whichresnumber(p.orig_atoms[0]), whichrestype(p.orig_atoms[0])
reschain_l = whichchain(p.orig_atoms[0])
contact = data(ring=ring, charge=p, distance=d, offset=offset, type='regular',
restype=restype, resnr=resnr, reschain=reschain,
restype_l=restype_l, resnr_l=resnr_l, reschain_l=reschain_l,
protcharged=protcharged)
pairings.append(contact)
break
resnr = whichresnumber(p.atoms[0]) if protcharged else whichresnumber(ring.atoms[0])
resnr_l = whichresnumber(ring.orig_atoms[0]) if protcharged else whichresnumber(p.orig_atoms[0])
restype = whichrestype(p.atoms[0]) if protcharged else whichrestype(ring.atoms[0])
restype_l = whichrestype(ring.orig_atoms[0]) if protcharged else whichrestype(p.orig_atoms[0])
reschain = whichchain(p.atoms[0]) if protcharged else whichchain(ring.atoms[0])
reschain_l = whichchain(ring.orig_atoms[0]) if protcharged else whichchain(p.orig_atoms[0])
contact = data(ring=ring, charge=p, distance=d, offset=offset, type='regular', restype=restype,
resnr=resnr, reschain=reschain, restype_l=restype_l, resnr_l=resnr_l,
reschain_l=reschain_l, protcharged=protcharged)
pairings.append(contact)
return filter_contacts(pairings) |
def soapX(args):
"""
%prog soapX folder tag [*.fastq]
Run SOAP on a folder of paired reads and apply tag before assembly.
Optional *.fastq in the argument list will be symlinked in each folder and
co-assembled.
"""
p = OptionParser(soapX.__doc__)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
folder, tag = args[:2]
extra = args[2:]
extra = [get_abs_path(x) for x in extra]
tag = tag.split(",")
for p, pf in iter_project(folder, n=3):
soap_trios(p, pf, tag, extra) | %prog soapX folder tag [*.fastq]
Run SOAP on a folder of paired reads and apply tag before assembly.
Optional *.fastq in the argument list will be symlinked in each folder and
co-assembled. | Below is the the instruction that describes the task:
### Input:
%prog soapX folder tag [*.fastq]
Run SOAP on a folder of paired reads and apply tag before assembly.
Optional *.fastq in the argument list will be symlinked in each folder and
co-assembled.
### Response:
def soapX(args):
"""
%prog soapX folder tag [*.fastq]
Run SOAP on a folder of paired reads and apply tag before assembly.
Optional *.fastq in the argument list will be symlinked in each folder and
co-assembled.
"""
p = OptionParser(soapX.__doc__)
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
folder, tag = args[:2]
extra = args[2:]
extra = [get_abs_path(x) for x in extra]
tag = tag.split(",")
for p, pf in iter_project(folder, n=3):
soap_trios(p, pf, tag, extra) |
def stem(self, word, early_english=False):
"""Return Porter stem.
Parameters
----------
word : str
The word to stem
early_english : bool
Set to True in order to remove -eth & -est (2nd & 3rd person
singular verbal agreement suffixes)
Returns
-------
str
Word stem
Examples
--------
>>> stmr = Porter()
>>> stmr.stem('reading')
'read'
>>> stmr.stem('suspension')
'suspens'
>>> stmr.stem('elusiveness')
'elus'
>>> stmr.stem('eateth', early_english=True)
'eat'
"""
# lowercase, normalize, and compose
word = normalize('NFC', text_type(word.lower()))
# Return word if stem is shorter than 2
if len(word) < 3:
return word
# Re-map consonantal y to Y (Y will be C, y will be V)
if word[0] == 'y':
word = 'Y' + word[1:]
for i in range(1, len(word)):
if word[i] == 'y' and word[i - 1] in self._vowels:
word = word[:i] + 'Y' + word[i + 1 :]
# Step 1a
if word[-1] == 's':
if word[-4:] == 'sses':
word = word[:-2]
elif word[-3:] == 'ies':
word = word[:-2]
elif word[-2:] == 'ss':
pass
else:
word = word[:-1]
# Step 1b
step1b_flag = False
if word[-3:] == 'eed':
if self._m_degree(word[:-3]) > 0:
word = word[:-1]
elif word[-2:] == 'ed':
if self._has_vowel(word[:-2]):
word = word[:-2]
step1b_flag = True
elif word[-3:] == 'ing':
if self._has_vowel(word[:-3]):
word = word[:-3]
step1b_flag = True
elif early_english:
if word[-3:] == 'est':
if self._has_vowel(word[:-3]):
word = word[:-3]
step1b_flag = True
elif word[-3:] == 'eth':
if self._has_vowel(word[:-3]):
word = word[:-3]
step1b_flag = True
if step1b_flag:
if word[-2:] in {'at', 'bl', 'iz'}:
word += 'e'
elif self._ends_in_doubled_cons(word) and word[-1] not in {
'l',
's',
'z',
}:
word = word[:-1]
elif self._m_degree(word) == 1 and self._ends_in_cvc(word):
word += 'e'
# Step 1c
if word[-1] in {'Y', 'y'} and self._has_vowel(word[:-1]):
word = word[:-1] + 'i'
# Step 2
if len(word) > 1:
if word[-2] == 'a':
if word[-7:] == 'ational':
if self._m_degree(word[:-7]) > 0:
word = word[:-5] + 'e'
elif word[-6:] == 'tional':
if self._m_degree(word[:-6]) > 0:
word = word[:-2]
elif word[-2] == 'c':
if word[-4:] in {'enci', 'anci'}:
if self._m_degree(word[:-4]) > 0:
word = word[:-1] + 'e'
elif word[-2] == 'e':
if word[-4:] == 'izer':
if self._m_degree(word[:-4]) > 0:
word = word[:-1]
elif word[-2] == 'g':
if word[-4:] == 'logi':
if self._m_degree(word[:-4]) > 0:
word = word[:-1]
elif word[-2] == 'l':
if word[-3:] == 'bli':
if self._m_degree(word[:-3]) > 0:
word = word[:-1] + 'e'
elif word[-4:] == 'alli':
if self._m_degree(word[:-4]) > 0:
word = word[:-2]
elif word[-5:] == 'entli':
if self._m_degree(word[:-5]) > 0:
word = word[:-2]
elif word[-3:] == 'eli':
if self._m_degree(word[:-3]) > 0:
word = word[:-2]
elif word[-5:] == 'ousli':
if self._m_degree(word[:-5]) > 0:
word = word[:-2]
elif word[-2] == 'o':
if word[-7:] == 'ization':
if self._m_degree(word[:-7]) > 0:
word = word[:-5] + 'e'
elif word[-5:] == 'ation':
if self._m_degree(word[:-5]) > 0:
word = word[:-3] + 'e'
elif word[-4:] == 'ator':
if self._m_degree(word[:-4]) > 0:
word = word[:-2] + 'e'
elif word[-2] == 's':
if word[-5:] == 'alism':
if self._m_degree(word[:-5]) > 0:
word = word[:-3]
elif word[-7:] in {'iveness', 'fulness', 'ousness'}:
if self._m_degree(word[:-7]) > 0:
word = word[:-4]
elif word[-2] == 't':
if word[-5:] == 'aliti':
if self._m_degree(word[:-5]) > 0:
word = word[:-3]
elif word[-5:] == 'iviti':
if self._m_degree(word[:-5]) > 0:
word = word[:-3] + 'e'
elif word[-6:] == 'biliti':
if self._m_degree(word[:-6]) > 0:
word = word[:-5] + 'le'
# Step 3
if word[-5:] in 'icate':
if self._m_degree(word[:-5]) > 0:
word = word[:-3]
elif word[-5:] == 'ative':
if self._m_degree(word[:-5]) > 0:
word = word[:-5]
elif word[-5:] in {'alize', 'iciti'}:
if self._m_degree(word[:-5]) > 0:
word = word[:-3]
elif word[-4:] == 'ical':
if self._m_degree(word[:-4]) > 0:
word = word[:-2]
elif word[-3:] == 'ful':
if self._m_degree(word[:-3]) > 0:
word = word[:-3]
elif word[-4:] == 'ness':
if self._m_degree(word[:-4]) > 0:
word = word[:-4]
# Step 4
if word[-2:] == 'al':
if self._m_degree(word[:-2]) > 1:
word = word[:-2]
elif word[-4:] in {'ance', 'ence'}:
if self._m_degree(word[:-4]) > 1:
word = word[:-4]
elif word[-2:] in {'er', 'ic'}:
if self._m_degree(word[:-2]) > 1:
word = word[:-2]
elif word[-4:] in {'able', 'ible'}:
if self._m_degree(word[:-4]) > 1:
word = word[:-4]
elif word[-3:] == 'ant':
if self._m_degree(word[:-3]) > 1:
word = word[:-3]
elif word[-5:] == 'ement':
if self._m_degree(word[:-5]) > 1:
word = word[:-5]
elif word[-4:] == 'ment':
if self._m_degree(word[:-4]) > 1:
word = word[:-4]
elif word[-3:] == 'ent':
if self._m_degree(word[:-3]) > 1:
word = word[:-3]
elif word[-4:] in {'sion', 'tion'}:
if self._m_degree(word[:-3]) > 1:
word = word[:-3]
elif word[-2:] == 'ou':
if self._m_degree(word[:-2]) > 1:
word = word[:-2]
elif word[-3:] in {'ism', 'ate', 'iti', 'ous', 'ive', 'ize'}:
if self._m_degree(word[:-3]) > 1:
word = word[:-3]
# Step 5a
if word[-1] == 'e':
if self._m_degree(word[:-1]) > 1:
word = word[:-1]
elif self._m_degree(word[:-1]) == 1 and not self._ends_in_cvc(
word[:-1]
):
word = word[:-1]
# Step 5b
if word[-2:] == 'll' and self._m_degree(word) > 1:
word = word[:-1]
# Change 'Y' back to 'y' if it survived stemming
for i in range(len(word)):
if word[i] == 'Y':
word = word[:i] + 'y' + word[i + 1 :]
return word | Return Porter stem.
Parameters
----------
word : str
The word to stem
early_english : bool
Set to True in order to remove -eth & -est (2nd & 3rd person
singular verbal agreement suffixes)
Returns
-------
str
Word stem
Examples
--------
>>> stmr = Porter()
>>> stmr.stem('reading')
'read'
>>> stmr.stem('suspension')
'suspens'
>>> stmr.stem('elusiveness')
'elus'
>>> stmr.stem('eateth', early_english=True)
'eat' | Below is the the instruction that describes the task:
### Input:
Return Porter stem.
Parameters
----------
word : str
The word to stem
early_english : bool
Set to True in order to remove -eth & -est (2nd & 3rd person
singular verbal agreement suffixes)
Returns
-------
str
Word stem
Examples
--------
>>> stmr = Porter()
>>> stmr.stem('reading')
'read'
>>> stmr.stem('suspension')
'suspens'
>>> stmr.stem('elusiveness')
'elus'
>>> stmr.stem('eateth', early_english=True)
'eat'
### Response:
def stem(self, word, early_english=False):
"""Return Porter stem.
Parameters
----------
word : str
The word to stem
early_english : bool
Set to True in order to remove -eth & -est (2nd & 3rd person
singular verbal agreement suffixes)
Returns
-------
str
Word stem
Examples
--------
>>> stmr = Porter()
>>> stmr.stem('reading')
'read'
>>> stmr.stem('suspension')
'suspens'
>>> stmr.stem('elusiveness')
'elus'
>>> stmr.stem('eateth', early_english=True)
'eat'
"""
# lowercase, normalize, and compose
word = normalize('NFC', text_type(word.lower()))
# Return word if stem is shorter than 2
if len(word) < 3:
return word
# Re-map consonantal y to Y (Y will be C, y will be V)
if word[0] == 'y':
word = 'Y' + word[1:]
for i in range(1, len(word)):
if word[i] == 'y' and word[i - 1] in self._vowels:
word = word[:i] + 'Y' + word[i + 1 :]
# Step 1a
if word[-1] == 's':
if word[-4:] == 'sses':
word = word[:-2]
elif word[-3:] == 'ies':
word = word[:-2]
elif word[-2:] == 'ss':
pass
else:
word = word[:-1]
# Step 1b
step1b_flag = False
if word[-3:] == 'eed':
if self._m_degree(word[:-3]) > 0:
word = word[:-1]
elif word[-2:] == 'ed':
if self._has_vowel(word[:-2]):
word = word[:-2]
step1b_flag = True
elif word[-3:] == 'ing':
if self._has_vowel(word[:-3]):
word = word[:-3]
step1b_flag = True
elif early_english:
if word[-3:] == 'est':
if self._has_vowel(word[:-3]):
word = word[:-3]
step1b_flag = True
elif word[-3:] == 'eth':
if self._has_vowel(word[:-3]):
word = word[:-3]
step1b_flag = True
if step1b_flag:
if word[-2:] in {'at', 'bl', 'iz'}:
word += 'e'
elif self._ends_in_doubled_cons(word) and word[-1] not in {
'l',
's',
'z',
}:
word = word[:-1]
elif self._m_degree(word) == 1 and self._ends_in_cvc(word):
word += 'e'
# Step 1c
if word[-1] in {'Y', 'y'} and self._has_vowel(word[:-1]):
word = word[:-1] + 'i'
# Step 2
if len(word) > 1:
if word[-2] == 'a':
if word[-7:] == 'ational':
if self._m_degree(word[:-7]) > 0:
word = word[:-5] + 'e'
elif word[-6:] == 'tional':
if self._m_degree(word[:-6]) > 0:
word = word[:-2]
elif word[-2] == 'c':
if word[-4:] in {'enci', 'anci'}:
if self._m_degree(word[:-4]) > 0:
word = word[:-1] + 'e'
elif word[-2] == 'e':
if word[-4:] == 'izer':
if self._m_degree(word[:-4]) > 0:
word = word[:-1]
elif word[-2] == 'g':
if word[-4:] == 'logi':
if self._m_degree(word[:-4]) > 0:
word = word[:-1]
elif word[-2] == 'l':
if word[-3:] == 'bli':
if self._m_degree(word[:-3]) > 0:
word = word[:-1] + 'e'
elif word[-4:] == 'alli':
if self._m_degree(word[:-4]) > 0:
word = word[:-2]
elif word[-5:] == 'entli':
if self._m_degree(word[:-5]) > 0:
word = word[:-2]
elif word[-3:] == 'eli':
if self._m_degree(word[:-3]) > 0:
word = word[:-2]
elif word[-5:] == 'ousli':
if self._m_degree(word[:-5]) > 0:
word = word[:-2]
elif word[-2] == 'o':
if word[-7:] == 'ization':
if self._m_degree(word[:-7]) > 0:
word = word[:-5] + 'e'
elif word[-5:] == 'ation':
if self._m_degree(word[:-5]) > 0:
word = word[:-3] + 'e'
elif word[-4:] == 'ator':
if self._m_degree(word[:-4]) > 0:
word = word[:-2] + 'e'
elif word[-2] == 's':
if word[-5:] == 'alism':
if self._m_degree(word[:-5]) > 0:
word = word[:-3]
elif word[-7:] in {'iveness', 'fulness', 'ousness'}:
if self._m_degree(word[:-7]) > 0:
word = word[:-4]
elif word[-2] == 't':
if word[-5:] == 'aliti':
if self._m_degree(word[:-5]) > 0:
word = word[:-3]
elif word[-5:] == 'iviti':
if self._m_degree(word[:-5]) > 0:
word = word[:-3] + 'e'
elif word[-6:] == 'biliti':
if self._m_degree(word[:-6]) > 0:
word = word[:-5] + 'le'
# Step 3
if word[-5:] in 'icate':
if self._m_degree(word[:-5]) > 0:
word = word[:-3]
elif word[-5:] == 'ative':
if self._m_degree(word[:-5]) > 0:
word = word[:-5]
elif word[-5:] in {'alize', 'iciti'}:
if self._m_degree(word[:-5]) > 0:
word = word[:-3]
elif word[-4:] == 'ical':
if self._m_degree(word[:-4]) > 0:
word = word[:-2]
elif word[-3:] == 'ful':
if self._m_degree(word[:-3]) > 0:
word = word[:-3]
elif word[-4:] == 'ness':
if self._m_degree(word[:-4]) > 0:
word = word[:-4]
# Step 4
if word[-2:] == 'al':
if self._m_degree(word[:-2]) > 1:
word = word[:-2]
elif word[-4:] in {'ance', 'ence'}:
if self._m_degree(word[:-4]) > 1:
word = word[:-4]
elif word[-2:] in {'er', 'ic'}:
if self._m_degree(word[:-2]) > 1:
word = word[:-2]
elif word[-4:] in {'able', 'ible'}:
if self._m_degree(word[:-4]) > 1:
word = word[:-4]
elif word[-3:] == 'ant':
if self._m_degree(word[:-3]) > 1:
word = word[:-3]
elif word[-5:] == 'ement':
if self._m_degree(word[:-5]) > 1:
word = word[:-5]
elif word[-4:] == 'ment':
if self._m_degree(word[:-4]) > 1:
word = word[:-4]
elif word[-3:] == 'ent':
if self._m_degree(word[:-3]) > 1:
word = word[:-3]
elif word[-4:] in {'sion', 'tion'}:
if self._m_degree(word[:-3]) > 1:
word = word[:-3]
elif word[-2:] == 'ou':
if self._m_degree(word[:-2]) > 1:
word = word[:-2]
elif word[-3:] in {'ism', 'ate', 'iti', 'ous', 'ive', 'ize'}:
if self._m_degree(word[:-3]) > 1:
word = word[:-3]
# Step 5a
if word[-1] == 'e':
if self._m_degree(word[:-1]) > 1:
word = word[:-1]
elif self._m_degree(word[:-1]) == 1 and not self._ends_in_cvc(
word[:-1]
):
word = word[:-1]
# Step 5b
if word[-2:] == 'll' and self._m_degree(word) > 1:
word = word[:-1]
# Change 'Y' back to 'y' if it survived stemming
for i in range(len(word)):
if word[i] == 'Y':
word = word[:i] + 'y' + word[i + 1 :]
return word |
def get_report_status(self, report):
"""
Returns the status of a report.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.show
"""
if (report.account_id is None or report.type is None or
report.report_id is None):
raise ReportFailureException(report)
url = ACCOUNTS_API.format(report.account_id) + "/reports/{}/{}".format(
report.type, report.report_id)
data = self._get_resource(url)
data["account_id"] = report.account_id
return Report(data=data) | Returns the status of a report.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.show | Below is the the instruction that describes the task:
### Input:
Returns the status of a report.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.show
### Response:
def get_report_status(self, report):
"""
Returns the status of a report.
https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.show
"""
if (report.account_id is None or report.type is None or
report.report_id is None):
raise ReportFailureException(report)
url = ACCOUNTS_API.format(report.account_id) + "/reports/{}/{}".format(
report.type, report.report_id)
data = self._get_resource(url)
data["account_id"] = report.account_id
return Report(data=data) |
async def delete(self, service_id: str) -> bool:
"""
Remove a service
Args:
service_id: ID or name of the service
Returns:
True if successful
"""
await self.docker._query(
"services/{service_id}".format(service_id=service_id), method="DELETE"
)
return True | Remove a service
Args:
service_id: ID or name of the service
Returns:
True if successful | Below is the the instruction that describes the task:
### Input:
Remove a service
Args:
service_id: ID or name of the service
Returns:
True if successful
### Response:
async def delete(self, service_id: str) -> bool:
"""
Remove a service
Args:
service_id: ID or name of the service
Returns:
True if successful
"""
await self.docker._query(
"services/{service_id}".format(service_id=service_id), method="DELETE"
)
return True |
def update_schema(schema_old, schema_new):
"""
Given an old BigQuery schema, update it with a new one.
Where a field name is the same, the new will replace the old. Any
new fields not present in the old schema will be added.
Arguments:
schema_old: the old schema to update
schema_new: the new schema which will overwrite/extend the old
"""
old_fields = schema_old["fields"]
new_fields = schema_new["fields"]
output_fields = list(old_fields)
field_indices = {field["name"]: i for i, field in enumerate(output_fields)}
for field in new_fields:
name = field["name"]
if name in field_indices:
# replace old field with new field of same name
output_fields[field_indices[name]] = field
else:
# add new field
output_fields.append(field)
return {"fields": output_fields} | Given an old BigQuery schema, update it with a new one.
Where a field name is the same, the new will replace the old. Any
new fields not present in the old schema will be added.
Arguments:
schema_old: the old schema to update
schema_new: the new schema which will overwrite/extend the old | Below is the the instruction that describes the task:
### Input:
Given an old BigQuery schema, update it with a new one.
Where a field name is the same, the new will replace the old. Any
new fields not present in the old schema will be added.
Arguments:
schema_old: the old schema to update
schema_new: the new schema which will overwrite/extend the old
### Response:
def update_schema(schema_old, schema_new):
"""
Given an old BigQuery schema, update it with a new one.
Where a field name is the same, the new will replace the old. Any
new fields not present in the old schema will be added.
Arguments:
schema_old: the old schema to update
schema_new: the new schema which will overwrite/extend the old
"""
old_fields = schema_old["fields"]
new_fields = schema_new["fields"]
output_fields = list(old_fields)
field_indices = {field["name"]: i for i, field in enumerate(output_fields)}
for field in new_fields:
name = field["name"]
if name in field_indices:
# replace old field with new field of same name
output_fields[field_indices[name]] = field
else:
# add new field
output_fields.append(field)
return {"fields": output_fields} |
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False | Creates a new group | Below is the the instruction that describes the task:
### Input:
Creates a new group
### Response:
def add_role(self, role, description=None):
""" Creates a new group """
new_group = AuthGroup(role=role, creator=self.client)
try:
new_group.save()
return True
except NotUniqueError:
return False |
def server(self):
"""
UDP server to listen for responses.
"""
server = getattr(self, "_server", None)
if server is None:
server = WSGIServer(('', self.port), self._handle, log=None)
self._server = server
return server | UDP server to listen for responses. | Below is the the instruction that describes the task:
### Input:
UDP server to listen for responses.
### Response:
def server(self):
"""
UDP server to listen for responses.
"""
server = getattr(self, "_server", None)
if server is None:
server = WSGIServer(('', self.port), self._handle, log=None)
self._server = server
return server |
def is_in_plane(self, pp, dist_tolerance):
"""
Determines if point pp is in the plane within the tolerance dist_tolerance
:param pp: point to be tested
:param dist_tolerance: tolerance on the distance to the plane within which point pp is considered in the plane
:return: True if pp is in the plane, False otherwise
"""
return np.abs(np.dot(self.normal_vector, pp) + self._coefficients[3]) <= dist_tolerance | Determines if point pp is in the plane within the tolerance dist_tolerance
:param pp: point to be tested
:param dist_tolerance: tolerance on the distance to the plane within which point pp is considered in the plane
:return: True if pp is in the plane, False otherwise | Below is the the instruction that describes the task:
### Input:
Determines if point pp is in the plane within the tolerance dist_tolerance
:param pp: point to be tested
:param dist_tolerance: tolerance on the distance to the plane within which point pp is considered in the plane
:return: True if pp is in the plane, False otherwise
### Response:
def is_in_plane(self, pp, dist_tolerance):
"""
Determines if point pp is in the plane within the tolerance dist_tolerance
:param pp: point to be tested
:param dist_tolerance: tolerance on the distance to the plane within which point pp is considered in the plane
:return: True if pp is in the plane, False otherwise
"""
return np.abs(np.dot(self.normal_vector, pp) + self._coefficients[3]) <= dist_tolerance |
def start_receive(self, stream):
"""
Mark the :attr:`receive_side <Stream.receive_side>` on `stream` as
ready for reading. Safe to call from any thread. When the associated
file descriptor becomes ready for reading,
:meth:`BasicStream.on_receive` will be called.
"""
_vv and IOLOG.debug('%r.start_receive(%r)', self, stream)
side = stream.receive_side
assert side and side.fd is not None
self.defer(self.poller.start_receive,
side.fd, (side, stream.on_receive)) | Mark the :attr:`receive_side <Stream.receive_side>` on `stream` as
ready for reading. Safe to call from any thread. When the associated
file descriptor becomes ready for reading,
:meth:`BasicStream.on_receive` will be called. | Below is the the instruction that describes the task:
### Input:
Mark the :attr:`receive_side <Stream.receive_side>` on `stream` as
ready for reading. Safe to call from any thread. When the associated
file descriptor becomes ready for reading,
:meth:`BasicStream.on_receive` will be called.
### Response:
def start_receive(self, stream):
"""
Mark the :attr:`receive_side <Stream.receive_side>` on `stream` as
ready for reading. Safe to call from any thread. When the associated
file descriptor becomes ready for reading,
:meth:`BasicStream.on_receive` will be called.
"""
_vv and IOLOG.debug('%r.start_receive(%r)', self, stream)
side = stream.receive_side
assert side and side.fd is not None
self.defer(self.poller.start_receive,
side.fd, (side, stream.on_receive)) |
def get_xid_devices():
"""
Returns a list of all Xid devices connected to your computer.
"""
devices = []
scanner = XidScanner()
for i in range(scanner.device_count()):
com = scanner.device_at_index(i)
com.open()
device = XidDevice(com)
devices.append(device)
return devices | Returns a list of all Xid devices connected to your computer. | Below is the the instruction that describes the task:
### Input:
Returns a list of all Xid devices connected to your computer.
### Response:
def get_xid_devices():
"""
Returns a list of all Xid devices connected to your computer.
"""
devices = []
scanner = XidScanner()
for i in range(scanner.device_count()):
com = scanner.device_at_index(i)
com.open()
device = XidDevice(com)
devices.append(device)
return devices |
def create_access_service(price, service_endpoint, consume_endpoint, timeout=None):
"""
Publish an asset with an `Access` service according to the supplied attributes.
:param price: Asset price, int
:param service_endpoint: str URL for initiating service access request
:param consume_endpoint: str URL to consume service
:param timeout: int amount of time in seconds before the agreement expires
:return: Service instance or None
"""
timeout = timeout or 3600 # default to one hour timeout
service = ServiceDescriptor.access_service_descriptor(
price, service_endpoint, consume_endpoint, timeout, ''
)
return service | Publish an asset with an `Access` service according to the supplied attributes.
:param price: Asset price, int
:param service_endpoint: str URL for initiating service access request
:param consume_endpoint: str URL to consume service
:param timeout: int amount of time in seconds before the agreement expires
:return: Service instance or None | Below is the the instruction that describes the task:
### Input:
Publish an asset with an `Access` service according to the supplied attributes.
:param price: Asset price, int
:param service_endpoint: str URL for initiating service access request
:param consume_endpoint: str URL to consume service
:param timeout: int amount of time in seconds before the agreement expires
:return: Service instance or None
### Response:
def create_access_service(price, service_endpoint, consume_endpoint, timeout=None):
"""
Publish an asset with an `Access` service according to the supplied attributes.
:param price: Asset price, int
:param service_endpoint: str URL for initiating service access request
:param consume_endpoint: str URL to consume service
:param timeout: int amount of time in seconds before the agreement expires
:return: Service instance or None
"""
timeout = timeout or 3600 # default to one hour timeout
service = ServiceDescriptor.access_service_descriptor(
price, service_endpoint, consume_endpoint, timeout, ''
)
return service |
def setMethod(self, method, override_analyses=False):
""" Sets the specified method to the Analyses from the
Worksheet. Only sets the method if the Analysis
allows to keep the integrity.
If an analysis has already been assigned to a method, it won't
be overriden.
Returns the number of analyses affected.
"""
analyses = [an for an in self.getAnalyses()
if (not an.getMethod() or
not an.getInstrument() or
override_analyses) and an.isMethodAllowed(method)]
total = 0
for an in analyses:
success = False
if an.isMethodAllowed(method):
success = an.setMethod(method)
if success is True:
total += 1
self.getField('Method').set(self, method)
return total | Sets the specified method to the Analyses from the
Worksheet. Only sets the method if the Analysis
allows to keep the integrity.
If an analysis has already been assigned to a method, it won't
be overriden.
Returns the number of analyses affected. | Below is the the instruction that describes the task:
### Input:
Sets the specified method to the Analyses from the
Worksheet. Only sets the method if the Analysis
allows to keep the integrity.
If an analysis has already been assigned to a method, it won't
be overriden.
Returns the number of analyses affected.
### Response:
def setMethod(self, method, override_analyses=False):
""" Sets the specified method to the Analyses from the
Worksheet. Only sets the method if the Analysis
allows to keep the integrity.
If an analysis has already been assigned to a method, it won't
be overriden.
Returns the number of analyses affected.
"""
analyses = [an for an in self.getAnalyses()
if (not an.getMethod() or
not an.getInstrument() or
override_analyses) and an.isMethodAllowed(method)]
total = 0
for an in analyses:
success = False
if an.isMethodAllowed(method):
success = an.setMethod(method)
if success is True:
total += 1
self.getField('Method').set(self, method)
return total |
def get_sqltext(self, format_=1):
"""retourne les requêtes actuellement lancées sur le serveur"""
if format_ == 1:
_sql = """SELECT u.sid, substr(u.username,1,12) user_name, s.sql_text
FROM v$sql s,v$session u
WHERE s.hash_value = u.sql_hash_value
AND sql_text NOT LIKE '%from v$sql s, v$session u%'
AND u.username NOT LIKE 'None'
ORDER BY u.sid"""
if format_ == 2:
_sql = """SELECT u.username, s.first_load_time, s.executions, s.sql_text
FROM dba_users u,v$sqlarea s
WHERE u.user_id=s.parsing_user_id
AND u.username LIKE 'LIONEL'
AND sql_text NOT LIKE '%FROM dba_users u,v$sqlarea s%'
ORDER BY s.first_load_time"""
return psql.read_sql(_sql, self.conn) | retourne les requêtes actuellement lancées sur le serveur | Below is the the instruction that describes the task:
### Input:
retourne les requêtes actuellement lancées sur le serveur
### Response:
def get_sqltext(self, format_=1):
"""retourne les requêtes actuellement lancées sur le serveur"""
if format_ == 1:
_sql = """SELECT u.sid, substr(u.username,1,12) user_name, s.sql_text
FROM v$sql s,v$session u
WHERE s.hash_value = u.sql_hash_value
AND sql_text NOT LIKE '%from v$sql s, v$session u%'
AND u.username NOT LIKE 'None'
ORDER BY u.sid"""
if format_ == 2:
_sql = """SELECT u.username, s.first_load_time, s.executions, s.sql_text
FROM dba_users u,v$sqlarea s
WHERE u.user_id=s.parsing_user_id
AND u.username LIKE 'LIONEL'
AND sql_text NOT LIKE '%FROM dba_users u,v$sqlarea s%'
ORDER BY s.first_load_time"""
return psql.read_sql(_sql, self.conn) |
def WriteVarBytes(self, value, endian="<"):
"""
Write an integer value in a space saving way to the stream.
Read more about variable size encoding here: http://docs.neo.org/en-us/node/network-protocol.html#convention
Args:
value (bytes):
endian (str): specify the endianness. (Default) Little endian ('<'). Use '>' for big endian.
Returns:
int: the number of bytes written.
"""
length = len(value)
self.WriteVarInt(length, endian)
return self.WriteBytes(value, unhex=False) | Write an integer value in a space saving way to the stream.
Read more about variable size encoding here: http://docs.neo.org/en-us/node/network-protocol.html#convention
Args:
value (bytes):
endian (str): specify the endianness. (Default) Little endian ('<'). Use '>' for big endian.
Returns:
int: the number of bytes written. | Below is the the instruction that describes the task:
### Input:
Write an integer value in a space saving way to the stream.
Read more about variable size encoding here: http://docs.neo.org/en-us/node/network-protocol.html#convention
Args:
value (bytes):
endian (str): specify the endianness. (Default) Little endian ('<'). Use '>' for big endian.
Returns:
int: the number of bytes written.
### Response:
def WriteVarBytes(self, value, endian="<"):
"""
Write an integer value in a space saving way to the stream.
Read more about variable size encoding here: http://docs.neo.org/en-us/node/network-protocol.html#convention
Args:
value (bytes):
endian (str): specify the endianness. (Default) Little endian ('<'). Use '>' for big endian.
Returns:
int: the number of bytes written.
"""
length = len(value)
self.WriteVarInt(length, endian)
return self.WriteBytes(value, unhex=False) |
def Read(self, length=None):
"""Reads data."""
if length is None:
length = self._length - self._offset
if length > self._max_unbound_read:
raise OversizedReadError("Attempted to read %d bytes when "
"Server.max_unbound_read_size is %d" %
(length, self._max_unbound_read))
result = io.BytesIO()
while result.tell() < length:
chunk, ref = self._GetChunk()
if not chunk:
break
part = chunk[self._offset - ref.offset:]
if not part:
break
result.write(part)
self._offset += min(length, len(part))
return result.getvalue()[:length] | Reads data. | Below is the the instruction that describes the task:
### Input:
Reads data.
### Response:
def Read(self, length=None):
"""Reads data."""
if length is None:
length = self._length - self._offset
if length > self._max_unbound_read:
raise OversizedReadError("Attempted to read %d bytes when "
"Server.max_unbound_read_size is %d" %
(length, self._max_unbound_read))
result = io.BytesIO()
while result.tell() < length:
chunk, ref = self._GetChunk()
if not chunk:
break
part = chunk[self._offset - ref.offset:]
if not part:
break
result.write(part)
self._offset += min(length, len(part))
return result.getvalue()[:length] |
def split_batches(self, data, minibatch_size= None):
"""Split data into minibatches with a specified size
Parameters
----------
data: iterable and indexable
List-like data to be split into batches. Includes spark_contextipy matrices and Pandas DataFrames.
minibatch_size: int
Expected sizes of minibatches split from the data.
Returns
-------
data_split: list
List of minibatches, each entry is a list-like object representing the data subset in a batch.
"""
if minibatch_size==None: minibatch_size= self.minibatch_size
if isinstance(data, list) or isinstance(data, tuple): len_data= len(data)
else: len_data= data.shape[0]
if isinstance(data,pd.DataFrame):
data_split = [data.iloc[x * minibatch_size:(x + 1) * minibatch_size] for x in
range(int(ceil(len_data / minibatch_size)))]
else:
data_split= [data[x* minibatch_size:min(len_data, (x+1)*minibatch_size)]
for x in range(int(ceil(len_data/minibatch_size)))]
return data_split | Split data into minibatches with a specified size
Parameters
----------
data: iterable and indexable
List-like data to be split into batches. Includes spark_contextipy matrices and Pandas DataFrames.
minibatch_size: int
Expected sizes of minibatches split from the data.
Returns
-------
data_split: list
List of minibatches, each entry is a list-like object representing the data subset in a batch. | Below is the the instruction that describes the task:
### Input:
Split data into minibatches with a specified size
Parameters
----------
data: iterable and indexable
List-like data to be split into batches. Includes spark_contextipy matrices and Pandas DataFrames.
minibatch_size: int
Expected sizes of minibatches split from the data.
Returns
-------
data_split: list
List of minibatches, each entry is a list-like object representing the data subset in a batch.
### Response:
def split_batches(self, data, minibatch_size= None):
"""Split data into minibatches with a specified size
Parameters
----------
data: iterable and indexable
List-like data to be split into batches. Includes spark_contextipy matrices and Pandas DataFrames.
minibatch_size: int
Expected sizes of minibatches split from the data.
Returns
-------
data_split: list
List of minibatches, each entry is a list-like object representing the data subset in a batch.
"""
if minibatch_size==None: minibatch_size= self.minibatch_size
if isinstance(data, list) or isinstance(data, tuple): len_data= len(data)
else: len_data= data.shape[0]
if isinstance(data,pd.DataFrame):
data_split = [data.iloc[x * minibatch_size:(x + 1) * minibatch_size] for x in
range(int(ceil(len_data / minibatch_size)))]
else:
data_split= [data[x* minibatch_size:min(len_data, (x+1)*minibatch_size)]
for x in range(int(ceil(len_data/minibatch_size)))]
return data_split |
def endnodes(self):
"""|Nodes| object containing all |Node| objects currently handled by
the |HydPy| object which define a downstream end point of a network."""
endnodes = devicetools.Nodes()
for node in self.nodes:
for element in node.exits:
if ((element in self.elements) and
(node not in element.receivers)):
break
else:
endnodes += node
return endnodes | |Nodes| object containing all |Node| objects currently handled by
the |HydPy| object which define a downstream end point of a network. | Below is the the instruction that describes the task:
### Input:
|Nodes| object containing all |Node| objects currently handled by
the |HydPy| object which define a downstream end point of a network.
### Response:
def endnodes(self):
"""|Nodes| object containing all |Node| objects currently handled by
the |HydPy| object which define a downstream end point of a network."""
endnodes = devicetools.Nodes()
for node in self.nodes:
for element in node.exits:
if ((element in self.elements) and
(node not in element.receivers)):
break
else:
endnodes += node
return endnodes |
def start_worker(
self):
"""start_worker
Start the helper worker process to package queued messages
and send them to Splunk
"""
# Start a worker thread responsible for sending logs
if not self.is_shutting_down(shutdown_event=self.shutdown_event) \
and self.sleep_interval > 0.1:
self.processes = []
self.debug_log(
'start_worker - start multiprocessing.Process')
p = multiprocessing.Process(
target=self.perform_work,
args=(
self.queue,
self.shutdown_event,
self.shutdown_ack,
self.already_done))
p.daemon = True
p.start()
self.processes.append({
'process': p,
'shutdown_event': self.shutdown_event,
'shutdown_ack_event': self.shutdown_ack,
'already_done_event': self.already_done
})
self.debug_log(
'start_worker - done') | start_worker
Start the helper worker process to package queued messages
and send them to Splunk | Below is the the instruction that describes the task:
### Input:
start_worker
Start the helper worker process to package queued messages
and send them to Splunk
### Response:
def start_worker(
self):
"""start_worker
Start the helper worker process to package queued messages
and send them to Splunk
"""
# Start a worker thread responsible for sending logs
if not self.is_shutting_down(shutdown_event=self.shutdown_event) \
and self.sleep_interval > 0.1:
self.processes = []
self.debug_log(
'start_worker - start multiprocessing.Process')
p = multiprocessing.Process(
target=self.perform_work,
args=(
self.queue,
self.shutdown_event,
self.shutdown_ack,
self.already_done))
p.daemon = True
p.start()
self.processes.append({
'process': p,
'shutdown_event': self.shutdown_event,
'shutdown_ack_event': self.shutdown_ack,
'already_done_event': self.already_done
})
self.debug_log(
'start_worker - done') |
def get_filename_and_extension_of(target_file):
"""Gets the base filename and extension of the target file.
:param target_file: the complete path of the target file
:returns: base filename and extension
"""
base_target_filename = os.path.basename(target_file)
file_name, file_ext_with_dot = os.path.splitext(base_target_filename)
return file_name, file_ext_with_dot | Gets the base filename and extension of the target file.
:param target_file: the complete path of the target file
:returns: base filename and extension | Below is the the instruction that describes the task:
### Input:
Gets the base filename and extension of the target file.
:param target_file: the complete path of the target file
:returns: base filename and extension
### Response:
def get_filename_and_extension_of(target_file):
"""Gets the base filename and extension of the target file.
:param target_file: the complete path of the target file
:returns: base filename and extension
"""
base_target_filename = os.path.basename(target_file)
file_name, file_ext_with_dot = os.path.splitext(base_target_filename)
return file_name, file_ext_with_dot |
def schedule(self, kind='R'):
"""Returns a list of BoxScore IDs for every game in the season.
Only needs to handle 'R' or 'P' options because decorator handles 'B'.
:param kind: 'R' for regular season, 'P' for playoffs, 'B' for both.
Defaults to 'R'.
:returns: DataFrame of schedule information.
:rtype: pd.DataFrame
"""
kind = kind.upper()[0]
dfs = []
# get games from each month
for month in ('october', 'november', 'december', 'january', 'february',
'march', 'april', 'may', 'june'):
try:
doc = self.get_sub_doc('games-{}'.format(month))
except ValueError:
continue
table = doc('table#schedule')
df = sportsref.utils.parse_table(table)
dfs.append(df)
df = pd.concat(dfs).reset_index(drop=True)
# figure out how many regular season games
try:
sportsref.utils.get_html('{}/playoffs/NBA_{}.html'.format(
sportsref.nba.BASE_URL, self.yr)
)
is_past_season = True
except ValueError:
is_past_season = False
if is_past_season:
team_per_game = self.team_stats_per_game()
n_reg_games = int(team_per_game.g.sum() // 2)
else:
n_reg_games = len(df)
# subset appropriately based on `kind`
if kind == 'P':
return df.iloc[n_reg_games:]
else:
return df.iloc[:n_reg_games] | Returns a list of BoxScore IDs for every game in the season.
Only needs to handle 'R' or 'P' options because decorator handles 'B'.
:param kind: 'R' for regular season, 'P' for playoffs, 'B' for both.
Defaults to 'R'.
:returns: DataFrame of schedule information.
:rtype: pd.DataFrame | Below is the the instruction that describes the task:
### Input:
Returns a list of BoxScore IDs for every game in the season.
Only needs to handle 'R' or 'P' options because decorator handles 'B'.
:param kind: 'R' for regular season, 'P' for playoffs, 'B' for both.
Defaults to 'R'.
:returns: DataFrame of schedule information.
:rtype: pd.DataFrame
### Response:
def schedule(self, kind='R'):
"""Returns a list of BoxScore IDs for every game in the season.
Only needs to handle 'R' or 'P' options because decorator handles 'B'.
:param kind: 'R' for regular season, 'P' for playoffs, 'B' for both.
Defaults to 'R'.
:returns: DataFrame of schedule information.
:rtype: pd.DataFrame
"""
kind = kind.upper()[0]
dfs = []
# get games from each month
for month in ('october', 'november', 'december', 'january', 'february',
'march', 'april', 'may', 'june'):
try:
doc = self.get_sub_doc('games-{}'.format(month))
except ValueError:
continue
table = doc('table#schedule')
df = sportsref.utils.parse_table(table)
dfs.append(df)
df = pd.concat(dfs).reset_index(drop=True)
# figure out how many regular season games
try:
sportsref.utils.get_html('{}/playoffs/NBA_{}.html'.format(
sportsref.nba.BASE_URL, self.yr)
)
is_past_season = True
except ValueError:
is_past_season = False
if is_past_season:
team_per_game = self.team_stats_per_game()
n_reg_games = int(team_per_game.g.sum() // 2)
else:
n_reg_games = len(df)
# subset appropriately based on `kind`
if kind == 'P':
return df.iloc[n_reg_games:]
else:
return df.iloc[:n_reg_games] |
async def _reconnect(self, last_error):
"""
Cleanly disconnects and then reconnects.
"""
self._log.debug('Closing current connection...')
await self._connection.disconnect()
await helpers._cancel(
self._log,
send_loop_handle=self._send_loop_handle,
recv_loop_handle=self._recv_loop_handle
)
# TODO See comment in `_start_reconnect`
# Perhaps this should be the last thing to do?
# But _connect() creates tasks which may run and,
# if they see that reconnecting is True, they will end.
# Perhaps that task creation should not belong in connect?
self._reconnecting = False
# Start with a clean state (and thus session ID) to avoid old msgs
self._state.reset()
retries = self._retries if self._auto_reconnect else 0
for attempt in retry_range(retries):
try:
await self._connect()
except (IOError, asyncio.TimeoutError) as e:
last_error = e
self._log.info('Failed reconnection attempt %d with %s',
attempt, e.__class__.__name__)
await asyncio.sleep(self._delay)
except Exception as e:
last_error = e
self._log.exception('Unexpected exception reconnecting on '
'attempt %d', attempt)
await asyncio.sleep(self._delay)
else:
self._send_queue.extend(self._pending_state.values())
self._pending_state.clear()
if self._auto_reconnect_callback:
self._loop.create_task(self._auto_reconnect_callback())
break
else:
self._log.error('Automatic reconnection failed {} time(s)'
.format(attempt))
await self._disconnect(error=last_error.with_traceback(None)) | Cleanly disconnects and then reconnects. | Below is the the instruction that describes the task:
### Input:
Cleanly disconnects and then reconnects.
### Response:
async def _reconnect(self, last_error):
"""
Cleanly disconnects and then reconnects.
"""
self._log.debug('Closing current connection...')
await self._connection.disconnect()
await helpers._cancel(
self._log,
send_loop_handle=self._send_loop_handle,
recv_loop_handle=self._recv_loop_handle
)
# TODO See comment in `_start_reconnect`
# Perhaps this should be the last thing to do?
# But _connect() creates tasks which may run and,
# if they see that reconnecting is True, they will end.
# Perhaps that task creation should not belong in connect?
self._reconnecting = False
# Start with a clean state (and thus session ID) to avoid old msgs
self._state.reset()
retries = self._retries if self._auto_reconnect else 0
for attempt in retry_range(retries):
try:
await self._connect()
except (IOError, asyncio.TimeoutError) as e:
last_error = e
self._log.info('Failed reconnection attempt %d with %s',
attempt, e.__class__.__name__)
await asyncio.sleep(self._delay)
except Exception as e:
last_error = e
self._log.exception('Unexpected exception reconnecting on '
'attempt %d', attempt)
await asyncio.sleep(self._delay)
else:
self._send_queue.extend(self._pending_state.values())
self._pending_state.clear()
if self._auto_reconnect_callback:
self._loop.create_task(self._auto_reconnect_callback())
break
else:
self._log.error('Automatic reconnection failed {} time(s)'
.format(attempt))
await self._disconnect(error=last_error.with_traceback(None)) |
def _get_config_path(self):
"""
Return a sensible configuration path for caching config
settings.
"""
org = self.service.space.org.name
space = self.service.space.name
name = self.name
return "~/.predix/%s/%s/%s.json" % (org, space, name) | Return a sensible configuration path for caching config
settings. | Below is the the instruction that describes the task:
### Input:
Return a sensible configuration path for caching config
settings.
### Response:
def _get_config_path(self):
"""
Return a sensible configuration path for caching config
settings.
"""
org = self.service.space.org.name
space = self.service.space.name
name = self.name
return "~/.predix/%s/%s/%s.json" % (org, space, name) |
def is_audit_type(*args):
"""This audit is included in the specified kinds of audits.
:param *args: List of AuditTypes to include this audit in
:type args: List[AuditType]
:rtype: Callable[Dict]
"""
def _is_audit_type(audit_options):
if audit_options.get('audit_type') in args:
return True
else:
return False
return _is_audit_type | This audit is included in the specified kinds of audits.
:param *args: List of AuditTypes to include this audit in
:type args: List[AuditType]
:rtype: Callable[Dict] | Below is the the instruction that describes the task:
### Input:
This audit is included in the specified kinds of audits.
:param *args: List of AuditTypes to include this audit in
:type args: List[AuditType]
:rtype: Callable[Dict]
### Response:
def is_audit_type(*args):
"""This audit is included in the specified kinds of audits.
:param *args: List of AuditTypes to include this audit in
:type args: List[AuditType]
:rtype: Callable[Dict]
"""
def _is_audit_type(audit_options):
if audit_options.get('audit_type') in args:
return True
else:
return False
return _is_audit_type |
def to_json(self, schema_info=True):
""" JSON serialization method that account for all information-wise important part of breakpoint graph
"""
genomes = set()
result = {}
result["edges"] = []
for bgedge in self.edges():
genomes |= bgedge.multicolor.colors
result["edges"].append(bgedge.to_json(schema_info=schema_info))
result["vertices"] = [bgvertex.to_json(schema_info=schema_info) for bgvertex in self.nodes()]
result["genomes"] = [bggenome.to_json(schema_info=schema_info) for bggenome in genomes]
return result | JSON serialization method that account for all information-wise important part of breakpoint graph | Below is the the instruction that describes the task:
### Input:
JSON serialization method that account for all information-wise important part of breakpoint graph
### Response:
def to_json(self, schema_info=True):
""" JSON serialization method that account for all information-wise important part of breakpoint graph
"""
genomes = set()
result = {}
result["edges"] = []
for bgedge in self.edges():
genomes |= bgedge.multicolor.colors
result["edges"].append(bgedge.to_json(schema_info=schema_info))
result["vertices"] = [bgvertex.to_json(schema_info=schema_info) for bgvertex in self.nodes()]
result["genomes"] = [bggenome.to_json(schema_info=schema_info) for bggenome in genomes]
return result |
def to_frame(self, data, state):
"""
Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes.
"""
# Find the next newline
data_len = data.find(b'\n')
if data_len < 0:
# No line to extract
raise exc.NoFrames()
# Track how much to exclude
frame_len = data_len + 1
# Are we to exclude carriage returns?
if (self.carriage_return and data_len and
data[data_len - 1] == ord(b'\r')):
data_len -= 1
# Extract the frame
frame = six.binary_type(data[:data_len])
del data[:frame_len]
# Return the frame
return frame | Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes. | Below is the the instruction that describes the task:
### Input:
Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes.
### Response:
def to_frame(self, data, state):
"""
Extract a single frame from the data buffer. The consumed
data should be removed from the buffer. If no complete frame
can be read, must raise a ``NoFrames`` exception.
:param data: A ``bytearray`` instance containing the data so
far read.
:param state: An instance of ``FramerState``. If the buffer
contains a partial frame, this object can be
used to store state information to allow the
remainder of the frame to be read.
:returns: A frame. The frame may be any object. The stock
framers always return bytes.
"""
# Find the next newline
data_len = data.find(b'\n')
if data_len < 0:
# No line to extract
raise exc.NoFrames()
# Track how much to exclude
frame_len = data_len + 1
# Are we to exclude carriage returns?
if (self.carriage_return and data_len and
data[data_len - 1] == ord(b'\r')):
data_len -= 1
# Extract the frame
frame = six.binary_type(data[:data_len])
del data[:frame_len]
# Return the frame
return frame |
def validate_query(self, query):
"""Validate a query.
Determines whether `query` is well-formed. This includes checking for all
required parameters, as well as checking parameters for valid values.
Parameters
----------
query : RadarQuery
The query to validate
Returns
-------
valid : bool
Whether `query` is valid.
"""
valid = True
# Make sure all stations are in the table
if 'stn' in query.spatial_query:
valid = valid and all(stid in self.stations
for stid in query.spatial_query['stn'])
if query.var:
valid = valid and all(var in self.variables for var in query.var)
return valid | Validate a query.
Determines whether `query` is well-formed. This includes checking for all
required parameters, as well as checking parameters for valid values.
Parameters
----------
query : RadarQuery
The query to validate
Returns
-------
valid : bool
Whether `query` is valid. | Below is the the instruction that describes the task:
### Input:
Validate a query.
Determines whether `query` is well-formed. This includes checking for all
required parameters, as well as checking parameters for valid values.
Parameters
----------
query : RadarQuery
The query to validate
Returns
-------
valid : bool
Whether `query` is valid.
### Response:
def validate_query(self, query):
"""Validate a query.
Determines whether `query` is well-formed. This includes checking for all
required parameters, as well as checking parameters for valid values.
Parameters
----------
query : RadarQuery
The query to validate
Returns
-------
valid : bool
Whether `query` is valid.
"""
valid = True
# Make sure all stations are in the table
if 'stn' in query.spatial_query:
valid = valid and all(stid in self.stations
for stid in query.spatial_query['stn'])
if query.var:
valid = valid and all(var in self.variables for var in query.var)
return valid |
def set_model_config(model_name, config, replace=False):
"""
This function should be only used in initialization phrase
:param model_name: model name it's should be string
:param config: config should be dict. e.g.
{'__mapping_only__', '__tablename__', '__ext_model__'}
:param replace: if True, then replace original config, False will update
"""
assert isinstance(model_name, str)
assert isinstance(config, dict)
d = __models__.setdefault(model_name, {})
if replace:
d['config'] = config
else:
c = d.setdefault('config', {})
c.update(config) | This function should be only used in initialization phrase
:param model_name: model name it's should be string
:param config: config should be dict. e.g.
{'__mapping_only__', '__tablename__', '__ext_model__'}
:param replace: if True, then replace original config, False will update | Below is the the instruction that describes the task:
### Input:
This function should be only used in initialization phrase
:param model_name: model name it's should be string
:param config: config should be dict. e.g.
{'__mapping_only__', '__tablename__', '__ext_model__'}
:param replace: if True, then replace original config, False will update
### Response:
def set_model_config(model_name, config, replace=False):
"""
This function should be only used in initialization phrase
:param model_name: model name it's should be string
:param config: config should be dict. e.g.
{'__mapping_only__', '__tablename__', '__ext_model__'}
:param replace: if True, then replace original config, False will update
"""
assert isinstance(model_name, str)
assert isinstance(config, dict)
d = __models__.setdefault(model_name, {})
if replace:
d['config'] = config
else:
c = d.setdefault('config', {})
c.update(config) |
def parse_str(text):
"""
Given a string of characters, for each normal ASCII character, yields
a directive consisting of a 'putch' instruction followed by the character
itself.
If a valid ANSI escape sequence is detected within the string, the
supported codes are translated into directives. For example ``\\033[42m``
would emit a directive of ``["background_color", "green"]``.
Note that unrecognised escape sequences are silently ignored: Only reset,
reverse colours and 8 foreground and background colours are supported.
It is up to the consumer to interpret the directives and update its state
accordingly.
:param text: An ASCII string which may or may not include valid ANSI Color
escape codes.
:type text: str
"""
prog = re.compile(r'^\033\[(\d+(;\d+)*)m', re.UNICODE)
while text != "":
result = prog.match(text)
if result:
for code in result.group(1).split(";"):
directive = valid_attributes.get(int(code), None)
if directive:
yield directive
n = len(result.group(0))
text = text[n:]
else:
yield ["putch", text[0]]
text = text[1:] | Given a string of characters, for each normal ASCII character, yields
a directive consisting of a 'putch' instruction followed by the character
itself.
If a valid ANSI escape sequence is detected within the string, the
supported codes are translated into directives. For example ``\\033[42m``
would emit a directive of ``["background_color", "green"]``.
Note that unrecognised escape sequences are silently ignored: Only reset,
reverse colours and 8 foreground and background colours are supported.
It is up to the consumer to interpret the directives and update its state
accordingly.
:param text: An ASCII string which may or may not include valid ANSI Color
escape codes.
:type text: str | Below is the the instruction that describes the task:
### Input:
Given a string of characters, for each normal ASCII character, yields
a directive consisting of a 'putch' instruction followed by the character
itself.
If a valid ANSI escape sequence is detected within the string, the
supported codes are translated into directives. For example ``\\033[42m``
would emit a directive of ``["background_color", "green"]``.
Note that unrecognised escape sequences are silently ignored: Only reset,
reverse colours and 8 foreground and background colours are supported.
It is up to the consumer to interpret the directives and update its state
accordingly.
:param text: An ASCII string which may or may not include valid ANSI Color
escape codes.
:type text: str
### Response:
def parse_str(text):
"""
Given a string of characters, for each normal ASCII character, yields
a directive consisting of a 'putch' instruction followed by the character
itself.
If a valid ANSI escape sequence is detected within the string, the
supported codes are translated into directives. For example ``\\033[42m``
would emit a directive of ``["background_color", "green"]``.
Note that unrecognised escape sequences are silently ignored: Only reset,
reverse colours and 8 foreground and background colours are supported.
It is up to the consumer to interpret the directives and update its state
accordingly.
:param text: An ASCII string which may or may not include valid ANSI Color
escape codes.
:type text: str
"""
prog = re.compile(r'^\033\[(\d+(;\d+)*)m', re.UNICODE)
while text != "":
result = prog.match(text)
if result:
for code in result.group(1).split(";"):
directive = valid_attributes.get(int(code), None)
if directive:
yield directive
n = len(result.group(0))
text = text[n:]
else:
yield ["putch", text[0]]
text = text[1:] |
def get_recipe(self, recipe_name):
"""Get a recipe by name.
Args:
recipe_name (str): The name of the recipe to fetch. Can be either the
yaml file name or the name of the recipe.
"""
if recipe_name.endswith('.yaml'):
recipe = self._recipes.get(RecipeObject.FromFile(recipe_name, self._recipe_actions, self._recipe_resources).name)
else:
recipe = self._recipes.get(recipe_name)
if recipe is None:
raise RecipeNotFoundError("Could not find recipe", recipe_name=recipe_name, known_recipes=[x for x in self._recipes.keys()])
return recipe | Get a recipe by name.
Args:
recipe_name (str): The name of the recipe to fetch. Can be either the
yaml file name or the name of the recipe. | Below is the the instruction that describes the task:
### Input:
Get a recipe by name.
Args:
recipe_name (str): The name of the recipe to fetch. Can be either the
yaml file name or the name of the recipe.
### Response:
def get_recipe(self, recipe_name):
"""Get a recipe by name.
Args:
recipe_name (str): The name of the recipe to fetch. Can be either the
yaml file name or the name of the recipe.
"""
if recipe_name.endswith('.yaml'):
recipe = self._recipes.get(RecipeObject.FromFile(recipe_name, self._recipe_actions, self._recipe_resources).name)
else:
recipe = self._recipes.get(recipe_name)
if recipe is None:
raise RecipeNotFoundError("Could not find recipe", recipe_name=recipe_name, known_recipes=[x for x in self._recipes.keys()])
return recipe |
def get_org_smarthost(self, orgid, serverid):
"""Get an organization smarthost"""
return self.api_call(
ENDPOINTS['orgsmarthosts']['get'],
dict(orgid=orgid, serverid=serverid)) | Get an organization smarthost | Below is the the instruction that describes the task:
### Input:
Get an organization smarthost
### Response:
def get_org_smarthost(self, orgid, serverid):
"""Get an organization smarthost"""
return self.api_call(
ENDPOINTS['orgsmarthosts']['get'],
dict(orgid=orgid, serverid=serverid)) |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'scope') and self.scope is not None:
_dict['scope'] = self.scope
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(
self,
'status_description') and self.status_description is not None:
_dict['status_description'] = self.status_description
if hasattr(self, 'last_trained') and self.last_trained is not None:
_dict['last_trained'] = self.last_trained
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'scope') and self.scope is not None:
_dict['scope'] = self.scope
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(
self,
'status_description') and self.status_description is not None:
_dict['status_description'] = self.status_description
if hasattr(self, 'last_trained') and self.last_trained is not None:
_dict['last_trained'] = self.last_trained
return _dict |
def our_IsUsableForDesktopGUI(m):
""" A more leniant version of CGDisplayModeIsUsableForDesktopGUI """
if guess_bitDepth(Q.CGDisplayModeCopyPixelEncoding(m)) != 24:
return False
if Q.CGDisplayModeGetWidth(m) < 640:
return False
if Q.CGDisplayModeGetHeight(m) < 480:
return False
return True | A more leniant version of CGDisplayModeIsUsableForDesktopGUI | Below is the the instruction that describes the task:
### Input:
A more leniant version of CGDisplayModeIsUsableForDesktopGUI
### Response:
def our_IsUsableForDesktopGUI(m):
""" A more leniant version of CGDisplayModeIsUsableForDesktopGUI """
if guess_bitDepth(Q.CGDisplayModeCopyPixelEncoding(m)) != 24:
return False
if Q.CGDisplayModeGetWidth(m) < 640:
return False
if Q.CGDisplayModeGetHeight(m) < 480:
return False
return True |
def get_queryset(self, request):
''' Restrict the listed submission files for the current user.'''
qs = super(SubmissionFileAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
else:
return qs.filter(Q(submissions__assignment__course__tutors__pk=request.user.pk) | Q(submissions__assignment__course__owner=request.user)).distinct() | Restrict the listed submission files for the current user. | Below is the the instruction that describes the task:
### Input:
Restrict the listed submission files for the current user.
### Response:
def get_queryset(self, request):
''' Restrict the listed submission files for the current user.'''
qs = super(SubmissionFileAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
else:
return qs.filter(Q(submissions__assignment__course__tutors__pk=request.user.pk) | Q(submissions__assignment__course__owner=request.user)).distinct() |
def _Aff4Read(aff4_obj, offset, length):
"""Reads contents of given AFF4 file.
Args:
aff4_obj: An AFF4 stream instance to retrieve contents for.
offset: An offset to start the reading from.
length: A number of bytes to read. Reads the whole file if 0.
Returns:
Contents of specified AFF4 stream.
Raises:
TypeError: If `aff4_obj` is not an instance of AFF4 stream.
"""
length = length or (_Aff4Size(aff4_obj) - offset)
aff4_obj.Seek(offset)
return aff4_obj.Read(length) | Reads contents of given AFF4 file.
Args:
aff4_obj: An AFF4 stream instance to retrieve contents for.
offset: An offset to start the reading from.
length: A number of bytes to read. Reads the whole file if 0.
Returns:
Contents of specified AFF4 stream.
Raises:
TypeError: If `aff4_obj` is not an instance of AFF4 stream. | Below is the the instruction that describes the task:
### Input:
Reads contents of given AFF4 file.
Args:
aff4_obj: An AFF4 stream instance to retrieve contents for.
offset: An offset to start the reading from.
length: A number of bytes to read. Reads the whole file if 0.
Returns:
Contents of specified AFF4 stream.
Raises:
TypeError: If `aff4_obj` is not an instance of AFF4 stream.
### Response:
def _Aff4Read(aff4_obj, offset, length):
"""Reads contents of given AFF4 file.
Args:
aff4_obj: An AFF4 stream instance to retrieve contents for.
offset: An offset to start the reading from.
length: A number of bytes to read. Reads the whole file if 0.
Returns:
Contents of specified AFF4 stream.
Raises:
TypeError: If `aff4_obj` is not an instance of AFF4 stream.
"""
length = length or (_Aff4Size(aff4_obj) - offset)
aff4_obj.Seek(offset)
return aff4_obj.Read(length) |
def get_data_iters_and_vocabs(args: argparse.Namespace,
model_folder: Optional[str]) -> Tuple['data_io.BaseParallelSampleIter',
List[vocab.Vocab], vocab.Vocab, model.ModelConfig]:
"""
Loads the data iterators and vocabularies.
:param args: Arguments as returned by argparse.
:param model_folder: Output folder.
:return: The scoring data iterator as well as the source and target vocabularies.
"""
model_config = model.SockeyeModel.load_config(os.path.join(args.model, C.CONFIG_NAME))
if args.max_seq_len is None:
max_seq_len_source = model_config.config_data.max_seq_len_source
max_seq_len_target = model_config.config_data.max_seq_len_target
else:
max_seq_len_source, max_seq_len_target = args.max_seq_len
batch_num_devices = 1 if args.use_cpu else sum(-di if di < 0 else 1 for di in args.device_ids)
# Load the existing vocabs created when starting the training run.
source_vocabs = vocab.load_source_vocabs(model_folder)
target_vocab = vocab.load_target_vocab(model_folder)
sources = [args.source] + args.source_factors
sources = [str(os.path.abspath(source)) for source in sources]
score_iter = data_io.get_scoring_data_iters(
sources=sources,
target=os.path.abspath(args.target),
source_vocabs=source_vocabs,
target_vocab=target_vocab,
batch_size=args.batch_size,
batch_num_devices=batch_num_devices,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target)
return score_iter, source_vocabs, target_vocab, model_config | Loads the data iterators and vocabularies.
:param args: Arguments as returned by argparse.
:param model_folder: Output folder.
:return: The scoring data iterator as well as the source and target vocabularies. | Below is the the instruction that describes the task:
### Input:
Loads the data iterators and vocabularies.
:param args: Arguments as returned by argparse.
:param model_folder: Output folder.
:return: The scoring data iterator as well as the source and target vocabularies.
### Response:
def get_data_iters_and_vocabs(args: argparse.Namespace,
model_folder: Optional[str]) -> Tuple['data_io.BaseParallelSampleIter',
List[vocab.Vocab], vocab.Vocab, model.ModelConfig]:
"""
Loads the data iterators and vocabularies.
:param args: Arguments as returned by argparse.
:param model_folder: Output folder.
:return: The scoring data iterator as well as the source and target vocabularies.
"""
model_config = model.SockeyeModel.load_config(os.path.join(args.model, C.CONFIG_NAME))
if args.max_seq_len is None:
max_seq_len_source = model_config.config_data.max_seq_len_source
max_seq_len_target = model_config.config_data.max_seq_len_target
else:
max_seq_len_source, max_seq_len_target = args.max_seq_len
batch_num_devices = 1 if args.use_cpu else sum(-di if di < 0 else 1 for di in args.device_ids)
# Load the existing vocabs created when starting the training run.
source_vocabs = vocab.load_source_vocabs(model_folder)
target_vocab = vocab.load_target_vocab(model_folder)
sources = [args.source] + args.source_factors
sources = [str(os.path.abspath(source)) for source in sources]
score_iter = data_io.get_scoring_data_iters(
sources=sources,
target=os.path.abspath(args.target),
source_vocabs=source_vocabs,
target_vocab=target_vocab,
batch_size=args.batch_size,
batch_num_devices=batch_num_devices,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target)
return score_iter, source_vocabs, target_vocab, model_config |
def remove_child_objectives(self, objective_id=None):
"""Removes all children from an objective.
arg: objective_id (osid.id.Id): the Id of an objective
raise: NotFound - objective_id not found
raise: NullArgument - objective_id is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
"""
if objective_id is None:
raise NullArgument()
ols = ObjectiveLookupSession(self._objective_bank_id, runtime=self._runtime)
try:
ols.get_objective(objective_id)
except:
raise # If no objective, get_objectives will raise NotFound
ids_arg = {'ids': []}
url_path = construct_url('childids',
bank_id=self._catalog_idstr,
obj_id=objective_id)
try:
result = self._put_request(url_path, ids_arg)
except Exception:
raise
id_list = list()
for identifier in result['ids']:
id_list.append(Id(idstr=identifier))
return id_objects.IdList(id_list) | Removes all children from an objective.
arg: objective_id (osid.id.Id): the Id of an objective
raise: NotFound - objective_id not found
raise: NullArgument - objective_id is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented. | Below is the the instruction that describes the task:
### Input:
Removes all children from an objective.
arg: objective_id (osid.id.Id): the Id of an objective
raise: NotFound - objective_id not found
raise: NullArgument - objective_id is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
### Response:
def remove_child_objectives(self, objective_id=None):
"""Removes all children from an objective.
arg: objective_id (osid.id.Id): the Id of an objective
raise: NotFound - objective_id not found
raise: NullArgument - objective_id is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
"""
if objective_id is None:
raise NullArgument()
ols = ObjectiveLookupSession(self._objective_bank_id, runtime=self._runtime)
try:
ols.get_objective(objective_id)
except:
raise # If no objective, get_objectives will raise NotFound
ids_arg = {'ids': []}
url_path = construct_url('childids',
bank_id=self._catalog_idstr,
obj_id=objective_id)
try:
result = self._put_request(url_path, ids_arg)
except Exception:
raise
id_list = list()
for identifier in result['ids']:
id_list.append(Id(idstr=identifier))
return id_objects.IdList(id_list) |
def _getConfig(general_config_dir: str = None):
"""
Reads a file called config.py in the project directory
:raises: FileNotFoundError
:return: the configuration as a python object
"""
stp_config = STPConfig()
plenum_config = import_module("plenum.config")
config = stp_config
config.__dict__.update(plenum_config.__dict__)
if general_config_dir:
config.GENERAL_CONFIG_DIR = general_config_dir
if not config.GENERAL_CONFIG_DIR:
raise Exception('GENERAL_CONFIG_DIR must be set')
extend_with_external_config(config, (config.GENERAL_CONFIG_DIR,
config.GENERAL_CONFIG_FILE))
# "unsafe" is a set of attributes that can set certain behaviors that
# are not safe, for example, 'disable_view_change' disables view changes
# from happening. This might be useful in testing scenarios, but never
# in a live network.
if not hasattr(config, 'unsafe'):
setattr(config, 'unsafe', set())
return config | Reads a file called config.py in the project directory
:raises: FileNotFoundError
:return: the configuration as a python object | Below is the the instruction that describes the task:
### Input:
Reads a file called config.py in the project directory
:raises: FileNotFoundError
:return: the configuration as a python object
### Response:
def _getConfig(general_config_dir: str = None):
"""
Reads a file called config.py in the project directory
:raises: FileNotFoundError
:return: the configuration as a python object
"""
stp_config = STPConfig()
plenum_config = import_module("plenum.config")
config = stp_config
config.__dict__.update(plenum_config.__dict__)
if general_config_dir:
config.GENERAL_CONFIG_DIR = general_config_dir
if not config.GENERAL_CONFIG_DIR:
raise Exception('GENERAL_CONFIG_DIR must be set')
extend_with_external_config(config, (config.GENERAL_CONFIG_DIR,
config.GENERAL_CONFIG_FILE))
# "unsafe" is a set of attributes that can set certain behaviors that
# are not safe, for example, 'disable_view_change' disables view changes
# from happening. This might be useful in testing scenarios, but never
# in a live network.
if not hasattr(config, 'unsafe'):
setattr(config, 'unsafe', set())
return config |
def fetch(self, path, sender, msg):
"""
Start a transfer for a registered path.
:param str path:
File path.
:param mitogen.core.Sender sender:
Sender to receive file data.
:returns:
Dict containing the file metadata:
* ``size``: File size in bytes.
* ``mode``: Integer file mode.
* ``owner``: Owner account name on host machine.
* ``group``: Owner group name on host machine.
* ``mtime``: Floating point modification time.
* ``ctime``: Floating point change time.
:raises Error:
Unregistered path, or Sender did not match requestee context.
"""
if path not in self._paths and not self._prefix_is_authorized(path):
msg.reply(mitogen.core.CallError(
Error(self.unregistered_msg % (path,))
))
return
if msg.src_id != sender.context.context_id:
msg.reply(mitogen.core.CallError(
Error(self.context_mismatch_msg)
))
return
LOG.debug('Serving %r', path)
# Response must arrive first so requestee can begin receive loop,
# otherwise first ack won't arrive until all pending chunks were
# delivered. In that case max BDP would always be 128KiB, aka. max
# ~10Mbit/sec over a 100ms link.
try:
fp = open(path, 'rb', self.IO_SIZE)
msg.reply(self._generate_stat(path))
except IOError:
msg.reply(mitogen.core.CallError(
sys.exc_info()[1]
))
return
stream = self.router.stream_by_id(sender.context.context_id)
state = self._state_by_stream.setdefault(stream, FileStreamState())
state.lock.acquire()
try:
state.jobs.append((sender, fp))
self._schedule_pending_unlocked(state)
finally:
state.lock.release() | Start a transfer for a registered path.
:param str path:
File path.
:param mitogen.core.Sender sender:
Sender to receive file data.
:returns:
Dict containing the file metadata:
* ``size``: File size in bytes.
* ``mode``: Integer file mode.
* ``owner``: Owner account name on host machine.
* ``group``: Owner group name on host machine.
* ``mtime``: Floating point modification time.
* ``ctime``: Floating point change time.
:raises Error:
Unregistered path, or Sender did not match requestee context. | Below is the the instruction that describes the task:
### Input:
Start a transfer for a registered path.
:param str path:
File path.
:param mitogen.core.Sender sender:
Sender to receive file data.
:returns:
Dict containing the file metadata:
* ``size``: File size in bytes.
* ``mode``: Integer file mode.
* ``owner``: Owner account name on host machine.
* ``group``: Owner group name on host machine.
* ``mtime``: Floating point modification time.
* ``ctime``: Floating point change time.
:raises Error:
Unregistered path, or Sender did not match requestee context.
### Response:
def fetch(self, path, sender, msg):
"""
Start a transfer for a registered path.
:param str path:
File path.
:param mitogen.core.Sender sender:
Sender to receive file data.
:returns:
Dict containing the file metadata:
* ``size``: File size in bytes.
* ``mode``: Integer file mode.
* ``owner``: Owner account name on host machine.
* ``group``: Owner group name on host machine.
* ``mtime``: Floating point modification time.
* ``ctime``: Floating point change time.
:raises Error:
Unregistered path, or Sender did not match requestee context.
"""
if path not in self._paths and not self._prefix_is_authorized(path):
msg.reply(mitogen.core.CallError(
Error(self.unregistered_msg % (path,))
))
return
if msg.src_id != sender.context.context_id:
msg.reply(mitogen.core.CallError(
Error(self.context_mismatch_msg)
))
return
LOG.debug('Serving %r', path)
# Response must arrive first so requestee can begin receive loop,
# otherwise first ack won't arrive until all pending chunks were
# delivered. In that case max BDP would always be 128KiB, aka. max
# ~10Mbit/sec over a 100ms link.
try:
fp = open(path, 'rb', self.IO_SIZE)
msg.reply(self._generate_stat(path))
except IOError:
msg.reply(mitogen.core.CallError(
sys.exc_info()[1]
))
return
stream = self.router.stream_by_id(sender.context.context_id)
state = self._state_by_stream.setdefault(stream, FileStreamState())
state.lock.acquire()
try:
state.jobs.append((sender, fp))
self._schedule_pending_unlocked(state)
finally:
state.lock.release() |
def allStockQoutation(self):
'''
订阅多只股票的行情数据
:return:
'''
logger = Logs().getNewLogger('allStockQoutation', QoutationAsynPush.dir)
markets= [Market.HK,Market.US,Market.SH,Market.SZ] #,Market.HK_FUTURE,Market.US_OPTION
stockTypes = [SecurityType.STOCK,SecurityType.WARRANT,SecurityType.IDX,SecurityType.BOND,SecurityType.ETF]
for stockType in stockTypes:
for market in markets:
ret_code_stock_basicinfo ,ret_data_stock_basicinfo = self.quote_ctx.get_stock_basicinfo(market,stockType)
codes = ret_data_stock_basicinfo['code'].tolist()
codes_len = len(codes)
code_sub = 0
code_sub_succ = 0
for code in codes:
ret_code = self.aStockQoutation(code)
code_sub += 1
if ret_code is RET_OK:
code_sub_succ += 1
logger.info('市场 = %s,股票类型 = %s, 股票总数 = %d, 已发起订阅 = %d,订阅成功 = %d' % (market, stockType, codes_len, code_sub,code_sub_succ)) # 记录
logger.info('end-------->市场 = %s,股票类型 = %s, 股票总数 = %d, 已发起订阅 = %d,订阅成功 = %d' % ( market, stockType, codes_len, code_sub,code_sub_succ)) # 记录
time.sleep(5)
self.quote_ctx.stop()
self.quote_ctx.close() | 订阅多只股票的行情数据
:return: | Below is the the instruction that describes the task:
### Input:
订阅多只股票的行情数据
:return:
### Response:
def allStockQoutation(self):
'''
订阅多只股票的行情数据
:return:
'''
logger = Logs().getNewLogger('allStockQoutation', QoutationAsynPush.dir)
markets= [Market.HK,Market.US,Market.SH,Market.SZ] #,Market.HK_FUTURE,Market.US_OPTION
stockTypes = [SecurityType.STOCK,SecurityType.WARRANT,SecurityType.IDX,SecurityType.BOND,SecurityType.ETF]
for stockType in stockTypes:
for market in markets:
ret_code_stock_basicinfo ,ret_data_stock_basicinfo = self.quote_ctx.get_stock_basicinfo(market,stockType)
codes = ret_data_stock_basicinfo['code'].tolist()
codes_len = len(codes)
code_sub = 0
code_sub_succ = 0
for code in codes:
ret_code = self.aStockQoutation(code)
code_sub += 1
if ret_code is RET_OK:
code_sub_succ += 1
logger.info('市场 = %s,股票类型 = %s, 股票总数 = %d, 已发起订阅 = %d,订阅成功 = %d' % (market, stockType, codes_len, code_sub,code_sub_succ)) # 记录
logger.info('end-------->市场 = %s,股票类型 = %s, 股票总数 = %d, 已发起订阅 = %d,订阅成功 = %d' % ( market, stockType, codes_len, code_sub,code_sub_succ)) # 记录
time.sleep(5)
self.quote_ctx.stop()
self.quote_ctx.close() |
def content():
"""Helper method that returns just the content.
This method was added so that the text could be reused in the
dock_help module.
.. versionadded:: 3.2.2
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message
"""
message = m.Message()
message.add(m.Paragraph(tr(
'To start report generation you need to click on the Print '
'button in the buttons area. This will open the Impact report '
'dialog which has three main areas.')))
bullets = m.BulletedList()
bullets.add(m.Text(
m.ImportantText(tr('InaSAFE reports')),
tr(
' - There are four checkboxes available which are representing '
'the type of report component that will be generated.'
)))
text = tr(
' - Here you can select desired template for your report. All '
'templates bundled with InaSAFE are available here, plus '
'templates from user-defined template directory (see Options '
'for information how to set templates directory) and from qgis '
'setting directory ({qgis_directory}). It is also '
'possible to select custom template from any location: just '
'activate radiobutton under combobox and provide path to template '
'using the "..." button.')
qgis_directory = join(QgsApplication.qgisSettingsDirPath(), 'inasafe')
bullets.add(m.Text(
m.ImportantText(
tr('Map reports')),
text.format(qgis_directory=qgis_directory)))
bullets.add(m.Text(
m.ImportantText(tr('Buttons area')),
tr(
' - In this area you will find buttons to open the report as '
'a PDF or in the QGIS print composer. You can also get help by '
'clicking on the help button or using the close button to close '
'the print dialog.'
)))
message.add(bullets)
message.add(m.Paragraph(tr(
'There are four options on which template would you use to generate '
'a map report.')))
bullets = m.BulletedList()
bullets.add(m.Text(
m.ImportantText(tr('InaSAFE default templates')),
tr(
' - The map report will be generated using InaSAFE default '
'landscape and portrait map templates. Override template will '
'not be used.')))
bullets.add(m.Text(
m.ImportantText(tr('Override template')),
tr(
' - The map report will be generated using override template '
'found from qgis setting directory. InaSAFE default map templates '
'will not be printed.'
)))
bullets.add(m.Text(
m.ImportantText(tr('Template from search directory')),
tr(
' - The map report will be generated using selected template on '
'template dropdown selector. InaSAFE default map templates will '
'not be printed and override template will not be used.'
)))
bullets.add(m.Text(
m.ImportantText(tr('Template from file system')),
tr(
' - The map report will be generated using selected template on '
'file system. InaSAFE default map templates will not be printed '
'and override template will not be used.'
)))
message.add(bullets)
return message | Helper method that returns just the content.
This method was added so that the text could be reused in the
dock_help module.
.. versionadded:: 3.2.2
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message | Below is the the instruction that describes the task:
### Input:
Helper method that returns just the content.
This method was added so that the text could be reused in the
dock_help module.
.. versionadded:: 3.2.2
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message
### Response:
def content():
"""Helper method that returns just the content.
This method was added so that the text could be reused in the
dock_help module.
.. versionadded:: 3.2.2
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message
"""
message = m.Message()
message.add(m.Paragraph(tr(
'To start report generation you need to click on the Print '
'button in the buttons area. This will open the Impact report '
'dialog which has three main areas.')))
bullets = m.BulletedList()
bullets.add(m.Text(
m.ImportantText(tr('InaSAFE reports')),
tr(
' - There are four checkboxes available which are representing '
'the type of report component that will be generated.'
)))
text = tr(
' - Here you can select desired template for your report. All '
'templates bundled with InaSAFE are available here, plus '
'templates from user-defined template directory (see Options '
'for information how to set templates directory) and from qgis '
'setting directory ({qgis_directory}). It is also '
'possible to select custom template from any location: just '
'activate radiobutton under combobox and provide path to template '
'using the "..." button.')
qgis_directory = join(QgsApplication.qgisSettingsDirPath(), 'inasafe')
bullets.add(m.Text(
m.ImportantText(
tr('Map reports')),
text.format(qgis_directory=qgis_directory)))
bullets.add(m.Text(
m.ImportantText(tr('Buttons area')),
tr(
' - In this area you will find buttons to open the report as '
'a PDF or in the QGIS print composer. You can also get help by '
'clicking on the help button or using the close button to close '
'the print dialog.'
)))
message.add(bullets)
message.add(m.Paragraph(tr(
'There are four options on which template would you use to generate '
'a map report.')))
bullets = m.BulletedList()
bullets.add(m.Text(
m.ImportantText(tr('InaSAFE default templates')),
tr(
' - The map report will be generated using InaSAFE default '
'landscape and portrait map templates. Override template will '
'not be used.')))
bullets.add(m.Text(
m.ImportantText(tr('Override template')),
tr(
' - The map report will be generated using override template '
'found from qgis setting directory. InaSAFE default map templates '
'will not be printed.'
)))
bullets.add(m.Text(
m.ImportantText(tr('Template from search directory')),
tr(
' - The map report will be generated using selected template on '
'template dropdown selector. InaSAFE default map templates will '
'not be printed and override template will not be used.'
)))
bullets.add(m.Text(
m.ImportantText(tr('Template from file system')),
tr(
' - The map report will be generated using selected template on '
'file system. InaSAFE default map templates will not be printed '
'and override template will not be used.'
)))
message.add(bullets)
return message |
def setup_column(self, widget, column=0, attribute=None, renderer=None,
property=None, from_python=None, to_python=None, model=None):
# Maybe this is too overloaded.
"""
Set up a :class:`TreeView` to display attributes of Python objects
stored in its :class:`TreeModel`.
This assumes that :class:`TreeViewColumn` instances have already
been added and :class:`CellRenderer` instances packed into them.
Both can be done in Glade.
*model* is the instance displayed by the widget. You only need to pass
this if you set *renderer* to be editable.
If you use sorting or filtering this may not be the actual data store,
but all tree paths and column indexes are relative to this.
Defaults to our model.
*widget* is a column, or a string naming one in our view.
*column* is an integer addressing the column in *model* that holds your
objects.
*attribute* is a string naming an object attribute to display. Defaults
to the name of *widget*.
*renderer* defaults to the first one found in *widget*.
*property* is a string naming the property of *renderer* to set. If not
given this is guessed based on the type of *renderer*.
*from_python* is a callable. It gets passed a value from the object and
must return it in a format suitable for *renderer*. If not given this
is guessed based on *property*.
*to_python* is a callable. It gets passed a value from *renderer* and
must return it in a format suitable for the attribute. If not given a
cast to the type of the previous attribute value is attempted.
If you need more flexibility, like setting multiple properties, setting
your own cell data function will override the internal one.
Returns an integer you can use to disconnect the internal editing
callback from *renderer*, or None.
.. versionadded:: 1.99.2
"""
if isinstance(widget, str):
widget = self.view[widget]
if not model and isinstance(self.model, Gtk.TreeModel):
model = self.model
return setup_column(widget, column=column, attribute=attribute,
renderer=renderer, property=property, from_python=from_python,
to_python=to_python, model=model) | Set up a :class:`TreeView` to display attributes of Python objects
stored in its :class:`TreeModel`.
This assumes that :class:`TreeViewColumn` instances have already
been added and :class:`CellRenderer` instances packed into them.
Both can be done in Glade.
*model* is the instance displayed by the widget. You only need to pass
this if you set *renderer* to be editable.
If you use sorting or filtering this may not be the actual data store,
but all tree paths and column indexes are relative to this.
Defaults to our model.
*widget* is a column, or a string naming one in our view.
*column* is an integer addressing the column in *model* that holds your
objects.
*attribute* is a string naming an object attribute to display. Defaults
to the name of *widget*.
*renderer* defaults to the first one found in *widget*.
*property* is a string naming the property of *renderer* to set. If not
given this is guessed based on the type of *renderer*.
*from_python* is a callable. It gets passed a value from the object and
must return it in a format suitable for *renderer*. If not given this
is guessed based on *property*.
*to_python* is a callable. It gets passed a value from *renderer* and
must return it in a format suitable for the attribute. If not given a
cast to the type of the previous attribute value is attempted.
If you need more flexibility, like setting multiple properties, setting
your own cell data function will override the internal one.
Returns an integer you can use to disconnect the internal editing
callback from *renderer*, or None.
.. versionadded:: 1.99.2 | Below is the the instruction that describes the task:
### Input:
Set up a :class:`TreeView` to display attributes of Python objects
stored in its :class:`TreeModel`.
This assumes that :class:`TreeViewColumn` instances have already
been added and :class:`CellRenderer` instances packed into them.
Both can be done in Glade.
*model* is the instance displayed by the widget. You only need to pass
this if you set *renderer* to be editable.
If you use sorting or filtering this may not be the actual data store,
but all tree paths and column indexes are relative to this.
Defaults to our model.
*widget* is a column, or a string naming one in our view.
*column* is an integer addressing the column in *model* that holds your
objects.
*attribute* is a string naming an object attribute to display. Defaults
to the name of *widget*.
*renderer* defaults to the first one found in *widget*.
*property* is a string naming the property of *renderer* to set. If not
given this is guessed based on the type of *renderer*.
*from_python* is a callable. It gets passed a value from the object and
must return it in a format suitable for *renderer*. If not given this
is guessed based on *property*.
*to_python* is a callable. It gets passed a value from *renderer* and
must return it in a format suitable for the attribute. If not given a
cast to the type of the previous attribute value is attempted.
If you need more flexibility, like setting multiple properties, setting
your own cell data function will override the internal one.
Returns an integer you can use to disconnect the internal editing
callback from *renderer*, or None.
.. versionadded:: 1.99.2
### Response:
def setup_column(self, widget, column=0, attribute=None, renderer=None,
property=None, from_python=None, to_python=None, model=None):
# Maybe this is too overloaded.
"""
Set up a :class:`TreeView` to display attributes of Python objects
stored in its :class:`TreeModel`.
This assumes that :class:`TreeViewColumn` instances have already
been added and :class:`CellRenderer` instances packed into them.
Both can be done in Glade.
*model* is the instance displayed by the widget. You only need to pass
this if you set *renderer* to be editable.
If you use sorting or filtering this may not be the actual data store,
but all tree paths and column indexes are relative to this.
Defaults to our model.
*widget* is a column, or a string naming one in our view.
*column* is an integer addressing the column in *model* that holds your
objects.
*attribute* is a string naming an object attribute to display. Defaults
to the name of *widget*.
*renderer* defaults to the first one found in *widget*.
*property* is a string naming the property of *renderer* to set. If not
given this is guessed based on the type of *renderer*.
*from_python* is a callable. It gets passed a value from the object and
must return it in a format suitable for *renderer*. If not given this
is guessed based on *property*.
*to_python* is a callable. It gets passed a value from *renderer* and
must return it in a format suitable for the attribute. If not given a
cast to the type of the previous attribute value is attempted.
If you need more flexibility, like setting multiple properties, setting
your own cell data function will override the internal one.
Returns an integer you can use to disconnect the internal editing
callback from *renderer*, or None.
.. versionadded:: 1.99.2
"""
if isinstance(widget, str):
widget = self.view[widget]
if not model and isinstance(self.model, Gtk.TreeModel):
model = self.model
return setup_column(widget, column=column, attribute=attribute,
renderer=renderer, property=property, from_python=from_python,
to_python=to_python, model=model) |
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise | Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port). | Below is the the instruction that describes the task:
### Input:
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
### Response:
def connect(self, dest_pair):
"""
Connects to the specified destination through a proxy.
Uses the same API as socket's connect().
To select the proxy server, use set_proxy().
dest_pair - 2-tuple of (IP/hostname, port).
"""
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
# Probably IPv6, not supported -- raise an error, and hope
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
# connection works...
raise socket.error("PySocks doesn't support IPv6: %s" % str(dest_pair))
dest_addr, dest_port = dest_pair
if self.type == socket.SOCK_DGRAM:
if not self._proxyconn:
self.bind(("", 0))
dest_addr = socket.gethostbyname(dest_addr)
# If the host address is INADDR_ANY or similar, reset the peer
# address so that packets are received from any peer
if dest_addr == "0.0.0.0" and not dest_port:
self.proxy_peername = None
else:
self.proxy_peername = (dest_addr, dest_port)
return
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
# Do a minimal input check first
if (not isinstance(dest_pair, (list, tuple))
or len(dest_pair) != 2
or not dest_addr
or not isinstance(dest_port, int)):
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
# We set the timeout here so that we don't hang in connection or during
# negotiation.
super(socksocket, self).settimeout(self._timeout)
if proxy_type is None:
# Treat like regular socket object
self.proxy_peername = dest_pair
super(socksocket, self).settimeout(self._timeout)
super(socksocket, self).connect((dest_addr, dest_port))
return
proxy_addr = self._proxy_addr()
try:
# Initial connection to proxy server.
super(socksocket, self).connect(proxy_addr)
except socket.error as error:
# Error while connecting to proxy
self.close()
proxy_addr, proxy_port = proxy_addr
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
msg = "Error connecting to {0} proxy {1}".format(printable_type,
proxy_server)
log.debug("%s due to: %s", msg, error)
raise ProxyConnectionError(msg, error)
else:
# Connected to proxy server, now negotiate
try:
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
negotiate = self._proxy_negotiators[proxy_type]
negotiate(self, dest_addr, dest_port)
except socket.error as error:
# Wrap socket errors
self.close()
raise GeneralProxyError("Socket error", error)
except ProxyError:
# Protocol error while negotiating with proxy
self.close()
raise |
def data_filler_simple_registration(self, number_of_rows, db):
'''creates and fills the table with simple regis. information
'''
try:
simple_registration = db
for i in range(0, number_of_rows):
post_simple_reg = {
"id": rnd_id_generator(self),
"email": self.faker.safe_email(),
"password": self.faker.md5(raw_output=False)
}
simple_registration.save(post_simple_reg)
logger.warning(
'simple_registration Commits are successful after write job!',
extra=d)
except Exception as e:
logger.error(e, extra=d) | creates and fills the table with simple regis. information | Below is the the instruction that describes the task:
### Input:
creates and fills the table with simple regis. information
### Response:
def data_filler_simple_registration(self, number_of_rows, db):
'''creates and fills the table with simple regis. information
'''
try:
simple_registration = db
for i in range(0, number_of_rows):
post_simple_reg = {
"id": rnd_id_generator(self),
"email": self.faker.safe_email(),
"password": self.faker.md5(raw_output=False)
}
simple_registration.save(post_simple_reg)
logger.warning(
'simple_registration Commits are successful after write job!',
extra=d)
except Exception as e:
logger.error(e, extra=d) |
def backout_last(self, updated_singles, num_coincs):
"""Remove the recently added singles and coincs
Parameters
----------
updated_singles: dict of numpy.ndarrays
Array of indices that have been just updated in the internal
buffers of single detector triggers.
num_coincs: int
The number of coincs that were just added to the internal buffer
of coincident triggers
"""
for ifo in updated_singles:
self.singles[ifo].discard_last(updated_singles[ifo])
self.coincs.remove(num_coincs) | Remove the recently added singles and coincs
Parameters
----------
updated_singles: dict of numpy.ndarrays
Array of indices that have been just updated in the internal
buffers of single detector triggers.
num_coincs: int
The number of coincs that were just added to the internal buffer
of coincident triggers | Below is the the instruction that describes the task:
### Input:
Remove the recently added singles and coincs
Parameters
----------
updated_singles: dict of numpy.ndarrays
Array of indices that have been just updated in the internal
buffers of single detector triggers.
num_coincs: int
The number of coincs that were just added to the internal buffer
of coincident triggers
### Response:
def backout_last(self, updated_singles, num_coincs):
"""Remove the recently added singles and coincs
Parameters
----------
updated_singles: dict of numpy.ndarrays
Array of indices that have been just updated in the internal
buffers of single detector triggers.
num_coincs: int
The number of coincs that were just added to the internal buffer
of coincident triggers
"""
for ifo in updated_singles:
self.singles[ifo].discard_last(updated_singles[ifo])
self.coincs.remove(num_coincs) |
def _forObject(self, obj):
"""
Create a new `Router` instance, with it's own set of routes, for
``obj``.
"""
router = type(self)()
router._routes = list(self._routes)
router._self = obj
return router | Create a new `Router` instance, with it's own set of routes, for
``obj``. | Below is the the instruction that describes the task:
### Input:
Create a new `Router` instance, with it's own set of routes, for
``obj``.
### Response:
def _forObject(self, obj):
"""
Create a new `Router` instance, with it's own set of routes, for
``obj``.
"""
router = type(self)()
router._routes = list(self._routes)
router._self = obj
return router |
def _visit_pyfiles(list, dirname, names):
"""Helper for getFilesForName()."""
# get extension for python source files
if not globals().has_key('_py_ext'):
global _py_ext
# _py_ext = [triple[0] for triple in imp.get_suffixes()
# if triple[2] == imp.PY_SOURCE][0]
_py_ext = [triple[0] for triple in imp.get_suffixes()
if triple[2] == imp.PY_SOURCE]
# don't recurse into CVS directories
if 'CVS' in names:
names.remove('CVS')
if '.svn' in names:
names.remove('.svn')
if '.git' in names:
names.remove('.git')
if 'static' in names:
names.remove('static')
# add all *.py files to list
list.extend(
[os.path.join(dirname, file) for file in names
if os.path.splitext(file)[1] in _py_ext]
) | Helper for getFilesForName(). | Below is the the instruction that describes the task:
### Input:
Helper for getFilesForName().
### Response:
def _visit_pyfiles(list, dirname, names):
"""Helper for getFilesForName()."""
# get extension for python source files
if not globals().has_key('_py_ext'):
global _py_ext
# _py_ext = [triple[0] for triple in imp.get_suffixes()
# if triple[2] == imp.PY_SOURCE][0]
_py_ext = [triple[0] for triple in imp.get_suffixes()
if triple[2] == imp.PY_SOURCE]
# don't recurse into CVS directories
if 'CVS' in names:
names.remove('CVS')
if '.svn' in names:
names.remove('.svn')
if '.git' in names:
names.remove('.git')
if 'static' in names:
names.remove('static')
# add all *.py files to list
list.extend(
[os.path.join(dirname, file) for file in names
if os.path.splitext(file)[1] in _py_ext]
) |
def ind_zero_freq(self):
"""
Index of the first point for which the freqencies are equal or greater than zero.
"""
ind = np.searchsorted(self.frequencies, 0)
if ind >= len(self.frequencies):
raise ValueError("No positive frequencies found")
return ind | Index of the first point for which the freqencies are equal or greater than zero. | Below is the the instruction that describes the task:
### Input:
Index of the first point for which the freqencies are equal or greater than zero.
### Response:
def ind_zero_freq(self):
"""
Index of the first point for which the freqencies are equal or greater than zero.
"""
ind = np.searchsorted(self.frequencies, 0)
if ind >= len(self.frequencies):
raise ValueError("No positive frequencies found")
return ind |
def f_get_results(self, fast_access=False, copy=True):
""" Returns a dictionary containing the full result names as keys and the corresponding
result objects or result data items as values.
:param fast_access:
Determines whether the result objects or their values are returned
in the dictionary. Works only for results if they contain a single item with
the name of the result.
:param copy:
Whether the original dictionary or a shallow copy is returned.
If you want the real dictionary please do not modify it at all!
Not Copying and fast access do not work at the same time! Raises ValueError
if fast access is true and copy false.
:return: Dictionary containing the results.
:raises: ValueError
"""
return self._return_item_dictionary(self._results, fast_access, copy) | Returns a dictionary containing the full result names as keys and the corresponding
result objects or result data items as values.
:param fast_access:
Determines whether the result objects or their values are returned
in the dictionary. Works only for results if they contain a single item with
the name of the result.
:param copy:
Whether the original dictionary or a shallow copy is returned.
If you want the real dictionary please do not modify it at all!
Not Copying and fast access do not work at the same time! Raises ValueError
if fast access is true and copy false.
:return: Dictionary containing the results.
:raises: ValueError | Below is the the instruction that describes the task:
### Input:
Returns a dictionary containing the full result names as keys and the corresponding
result objects or result data items as values.
:param fast_access:
Determines whether the result objects or their values are returned
in the dictionary. Works only for results if they contain a single item with
the name of the result.
:param copy:
Whether the original dictionary or a shallow copy is returned.
If you want the real dictionary please do not modify it at all!
Not Copying and fast access do not work at the same time! Raises ValueError
if fast access is true and copy false.
:return: Dictionary containing the results.
:raises: ValueError
### Response:
def f_get_results(self, fast_access=False, copy=True):
""" Returns a dictionary containing the full result names as keys and the corresponding
result objects or result data items as values.
:param fast_access:
Determines whether the result objects or their values are returned
in the dictionary. Works only for results if they contain a single item with
the name of the result.
:param copy:
Whether the original dictionary or a shallow copy is returned.
If you want the real dictionary please do not modify it at all!
Not Copying and fast access do not work at the same time! Raises ValueError
if fast access is true and copy false.
:return: Dictionary containing the results.
:raises: ValueError
"""
return self._return_item_dictionary(self._results, fast_access, copy) |
def alias_exists(aliases, indices=None, hosts=None, profile=None):
'''
Return a boolean indicating whether given alias exists
indices
Single or multiple indices separated by comma, use _all to perform the operation on all indices.
aliases
Alias names separated by comma
CLI example::
salt myminion elasticsearch.alias_exists None testindex
'''
es = _get_instance(hosts, profile)
try:
return es.indices.exists_alias(name=aliases, index=indices)
except elasticsearch.exceptions.NotFoundError:
return False
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot get alias {0} in index {1}, server returned code {2} with message {3}".format(aliases, indices, e.status_code, e.error)) | Return a boolean indicating whether given alias exists
indices
Single or multiple indices separated by comma, use _all to perform the operation on all indices.
aliases
Alias names separated by comma
CLI example::
salt myminion elasticsearch.alias_exists None testindex | Below is the the instruction that describes the task:
### Input:
Return a boolean indicating whether given alias exists
indices
Single or multiple indices separated by comma, use _all to perform the operation on all indices.
aliases
Alias names separated by comma
CLI example::
salt myminion elasticsearch.alias_exists None testindex
### Response:
def alias_exists(aliases, indices=None, hosts=None, profile=None):
'''
Return a boolean indicating whether given alias exists
indices
Single or multiple indices separated by comma, use _all to perform the operation on all indices.
aliases
Alias names separated by comma
CLI example::
salt myminion elasticsearch.alias_exists None testindex
'''
es = _get_instance(hosts, profile)
try:
return es.indices.exists_alias(name=aliases, index=indices)
except elasticsearch.exceptions.NotFoundError:
return False
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot get alias {0} in index {1}, server returned code {2} with message {3}".format(aliases, indices, e.status_code, e.error)) |
def patch_worker_factory():
"""
Patches the ``luigi.interface._WorkerSchedulerFactory`` to include sandboxing information when
create a worker instance.
"""
def create_worker(self, scheduler, worker_processes, assistant=False):
worker = luigi.worker.Worker(scheduler=scheduler, worker_processes=worker_processes,
assistant=assistant, worker_id=os.getenv("LAW_SANDBOX_WORKER_ID"))
worker._first_task = os.getenv("LAW_SANDBOX_WORKER_TASK")
return worker
luigi.interface._WorkerSchedulerFactory.create_worker = create_worker | Patches the ``luigi.interface._WorkerSchedulerFactory`` to include sandboxing information when
create a worker instance. | Below is the the instruction that describes the task:
### Input:
Patches the ``luigi.interface._WorkerSchedulerFactory`` to include sandboxing information when
create a worker instance.
### Response:
def patch_worker_factory():
"""
Patches the ``luigi.interface._WorkerSchedulerFactory`` to include sandboxing information when
create a worker instance.
"""
def create_worker(self, scheduler, worker_processes, assistant=False):
worker = luigi.worker.Worker(scheduler=scheduler, worker_processes=worker_processes,
assistant=assistant, worker_id=os.getenv("LAW_SANDBOX_WORKER_ID"))
worker._first_task = os.getenv("LAW_SANDBOX_WORKER_TASK")
return worker
luigi.interface._WorkerSchedulerFactory.create_worker = create_worker |
def press(self, key_code):
""" Sends a 'down' event for the specified scan code """
if key_code >= 128:
# Media key
ev = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
14, # type
(0, 0), # location
0xa00, # flags
0, # timestamp
0, # window
0, # ctx
8, # subtype
((key_code-128) << 16) | (0xa << 8), # data1
-1 # data2
)
Quartz.CGEventPost(0, ev.CGEvent())
else:
# Regular key
# Apply modifiers if necessary
event_flags = 0
if self.current_modifiers["shift"]:
event_flags += Quartz.kCGEventFlagMaskShift
if self.current_modifiers["caps"]:
event_flags += Quartz.kCGEventFlagMaskAlphaShift
if self.current_modifiers["alt"]:
event_flags += Quartz.kCGEventFlagMaskAlternate
if self.current_modifiers["ctrl"]:
event_flags += Quartz.kCGEventFlagMaskControl
if self.current_modifiers["cmd"]:
event_flags += Quartz.kCGEventFlagMaskCommand
# Update modifiers if necessary
if key_code == 0x37: # cmd
self.current_modifiers["cmd"] = True
elif key_code == 0x38 or key_code == 0x3C: # shift or right shift
self.current_modifiers["shift"] = True
elif key_code == 0x39: # caps lock
self.current_modifiers["caps"] = True
elif key_code == 0x3A: # alt
self.current_modifiers["alt"] = True
elif key_code == 0x3B: # ctrl
self.current_modifiers["ctrl"] = True
event = Quartz.CGEventCreateKeyboardEvent(None, key_code, True)
Quartz.CGEventSetFlags(event, event_flags)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
time.sleep(0.01) | Sends a 'down' event for the specified scan code | Below is the the instruction that describes the task:
### Input:
Sends a 'down' event for the specified scan code
### Response:
def press(self, key_code):
""" Sends a 'down' event for the specified scan code """
if key_code >= 128:
# Media key
ev = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
14, # type
(0, 0), # location
0xa00, # flags
0, # timestamp
0, # window
0, # ctx
8, # subtype
((key_code-128) << 16) | (0xa << 8), # data1
-1 # data2
)
Quartz.CGEventPost(0, ev.CGEvent())
else:
# Regular key
# Apply modifiers if necessary
event_flags = 0
if self.current_modifiers["shift"]:
event_flags += Quartz.kCGEventFlagMaskShift
if self.current_modifiers["caps"]:
event_flags += Quartz.kCGEventFlagMaskAlphaShift
if self.current_modifiers["alt"]:
event_flags += Quartz.kCGEventFlagMaskAlternate
if self.current_modifiers["ctrl"]:
event_flags += Quartz.kCGEventFlagMaskControl
if self.current_modifiers["cmd"]:
event_flags += Quartz.kCGEventFlagMaskCommand
# Update modifiers if necessary
if key_code == 0x37: # cmd
self.current_modifiers["cmd"] = True
elif key_code == 0x38 or key_code == 0x3C: # shift or right shift
self.current_modifiers["shift"] = True
elif key_code == 0x39: # caps lock
self.current_modifiers["caps"] = True
elif key_code == 0x3A: # alt
self.current_modifiers["alt"] = True
elif key_code == 0x3B: # ctrl
self.current_modifiers["ctrl"] = True
event = Quartz.CGEventCreateKeyboardEvent(None, key_code, True)
Quartz.CGEventSetFlags(event, event_flags)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
time.sleep(0.01) |
def publish(self, cat, **kwargs):
"""
This method is used for creating objects in the facebook graph.
The first paramter is "cat", the category of publish. In addition to "cat"
"id" must also be passed and is catched by "kwargs"
"""
res=request.publish_cat1("POST", self.con, self.token, cat, kwargs)
return res | This method is used for creating objects in the facebook graph.
The first paramter is "cat", the category of publish. In addition to "cat"
"id" must also be passed and is catched by "kwargs" | Below is the the instruction that describes the task:
### Input:
This method is used for creating objects in the facebook graph.
The first paramter is "cat", the category of publish. In addition to "cat"
"id" must also be passed and is catched by "kwargs"
### Response:
def publish(self, cat, **kwargs):
"""
This method is used for creating objects in the facebook graph.
The first paramter is "cat", the category of publish. In addition to "cat"
"id" must also be passed and is catched by "kwargs"
"""
res=request.publish_cat1("POST", self.con, self.token, cat, kwargs)
return res |
def findNestedDirectories(self, lst):
'''
Recursive helper function for finding nested directories. If this node is a
directory node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedDirectories`` with the same list.
:Parameters:
``lst`` (list)
The list each directory node is to be appended to.
'''
if self.kind == "dir":
lst.append(self)
for c in self.children:
c.findNestedDirectories(lst) | Recursive helper function for finding nested directories. If this node is a
directory node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedDirectories`` with the same list.
:Parameters:
``lst`` (list)
The list each directory node is to be appended to. | Below is the the instruction that describes the task:
### Input:
Recursive helper function for finding nested directories. If this node is a
directory node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedDirectories`` with the same list.
:Parameters:
``lst`` (list)
The list each directory node is to be appended to.
### Response:
def findNestedDirectories(self, lst):
'''
Recursive helper function for finding nested directories. If this node is a
directory node, it is appended to ``lst``. Each node also calls each of its
child ``findNestedDirectories`` with the same list.
:Parameters:
``lst`` (list)
The list each directory node is to be appended to.
'''
if self.kind == "dir":
lst.append(self)
for c in self.children:
c.findNestedDirectories(lst) |
def GrabObject(self, identifier):
"""Grabs a cached object based on the identifier.
This method increments the cache value reference count.
Args:
identifier (str): VFS object identifier.
Raises:
KeyError: if the VFS object is not found in the cache.
RuntimeError: if the cache value is missing.
"""
if identifier not in self._values:
raise KeyError('Missing cached object for identifier: {0:s}'.format(
identifier))
cache_value = self._values[identifier]
if not cache_value:
raise RuntimeError('Missing cache value for identifier: {0:s}'.format(
identifier))
cache_value.IncrementReferenceCount() | Grabs a cached object based on the identifier.
This method increments the cache value reference count.
Args:
identifier (str): VFS object identifier.
Raises:
KeyError: if the VFS object is not found in the cache.
RuntimeError: if the cache value is missing. | Below is the the instruction that describes the task:
### Input:
Grabs a cached object based on the identifier.
This method increments the cache value reference count.
Args:
identifier (str): VFS object identifier.
Raises:
KeyError: if the VFS object is not found in the cache.
RuntimeError: if the cache value is missing.
### Response:
def GrabObject(self, identifier):
"""Grabs a cached object based on the identifier.
This method increments the cache value reference count.
Args:
identifier (str): VFS object identifier.
Raises:
KeyError: if the VFS object is not found in the cache.
RuntimeError: if the cache value is missing.
"""
if identifier not in self._values:
raise KeyError('Missing cached object for identifier: {0:s}'.format(
identifier))
cache_value = self._values[identifier]
if not cache_value:
raise RuntimeError('Missing cache value for identifier: {0:s}'.format(
identifier))
cache_value.IncrementReferenceCount() |
def rlist_classes(module, cls_filter=None):
"""
Attempts to list all of the classes within a given module namespace.
This method, unlike list_classes, will recurse into discovered
submodules.
If a type filter is set, it will be called with each class as its
parameter. This filter's return value must be interpretable as a
boolean. Results that evaluate as True will include the type in the
list of returned classes. Results that evaluate as False will exclude
the type in the list of returned classes.
:param mname: of the module to descend into
:param cls_filter: a function to call to determine what classes should be
included.
"""
found = list()
mnames = rlist_modules(module)
for mname in mnames:
[found.append(c) for c in list_classes(mname, cls_filter)]
return found | Attempts to list all of the classes within a given module namespace.
This method, unlike list_classes, will recurse into discovered
submodules.
If a type filter is set, it will be called with each class as its
parameter. This filter's return value must be interpretable as a
boolean. Results that evaluate as True will include the type in the
list of returned classes. Results that evaluate as False will exclude
the type in the list of returned classes.
:param mname: of the module to descend into
:param cls_filter: a function to call to determine what classes should be
included. | Below is the the instruction that describes the task:
### Input:
Attempts to list all of the classes within a given module namespace.
This method, unlike list_classes, will recurse into discovered
submodules.
If a type filter is set, it will be called with each class as its
parameter. This filter's return value must be interpretable as a
boolean. Results that evaluate as True will include the type in the
list of returned classes. Results that evaluate as False will exclude
the type in the list of returned classes.
:param mname: of the module to descend into
:param cls_filter: a function to call to determine what classes should be
included.
### Response:
def rlist_classes(module, cls_filter=None):
"""
Attempts to list all of the classes within a given module namespace.
This method, unlike list_classes, will recurse into discovered
submodules.
If a type filter is set, it will be called with each class as its
parameter. This filter's return value must be interpretable as a
boolean. Results that evaluate as True will include the type in the
list of returned classes. Results that evaluate as False will exclude
the type in the list of returned classes.
:param mname: of the module to descend into
:param cls_filter: a function to call to determine what classes should be
included.
"""
found = list()
mnames = rlist_modules(module)
for mname in mnames:
[found.append(c) for c in list_classes(mname, cls_filter)]
return found |
def _get_non_empty_list(cls, iter):
"""Return a list of the input, excluding all ``None`` values."""
res = []
for value in iter:
if hasattr(value, 'items'):
value = cls._get_non_empty_dict(value) or None
if value is not None:
res.append(value)
return res | Return a list of the input, excluding all ``None`` values. | Below is the the instruction that describes the task:
### Input:
Return a list of the input, excluding all ``None`` values.
### Response:
def _get_non_empty_list(cls, iter):
"""Return a list of the input, excluding all ``None`` values."""
res = []
for value in iter:
if hasattr(value, 'items'):
value = cls._get_non_empty_dict(value) or None
if value is not None:
res.append(value)
return res |
def ensure_indexes(self):
"""Update the indexes"""
for collection_name in INDEXES:
existing_indexes = self.indexes(collection_name)
indexes = INDEXES[collection_name]
for index in indexes:
index_name = index.document.get('name')
if index_name in existing_indexes:
logger.debug("Index exists: %s" % index_name)
self.db[collection_name].drop_index(index_name)
logger.info("creating indexes for collection {0}: {1}".format(
collection_name,
', '.join([index.document.get('name') for index in indexes]),
)
)
self.db[collection_name].create_indexes(indexes) | Update the indexes | Below is the the instruction that describes the task:
### Input:
Update the indexes
### Response:
def ensure_indexes(self):
"""Update the indexes"""
for collection_name in INDEXES:
existing_indexes = self.indexes(collection_name)
indexes = INDEXES[collection_name]
for index in indexes:
index_name = index.document.get('name')
if index_name in existing_indexes:
logger.debug("Index exists: %s" % index_name)
self.db[collection_name].drop_index(index_name)
logger.info("creating indexes for collection {0}: {1}".format(
collection_name,
', '.join([index.document.get('name') for index in indexes]),
)
)
self.db[collection_name].create_indexes(indexes) |
def set_property(self, key, value):
"""
Update only one property in the dict
"""
self.properties[key] = value
self.sync_properties() | Update only one property in the dict | Below is the the instruction that describes the task:
### Input:
Update only one property in the dict
### Response:
def set_property(self, key, value):
"""
Update only one property in the dict
"""
self.properties[key] = value
self.sync_properties() |
def set(key, value={}, reset=False, init=False):
"""
Set data
:param key: A unique to set, best to use __name__
:param value: dict - the value to save
:param reset: bool - If true, it will reset the value to the current one.
if False, it will just update the stored value with the current
one
:param init: bool - If True, it will create the entry if it doesn't exits
next time invoked, it will not save anything
:return:
"""
if not isinstance(value, dict):
raise ValueError("App Data value must be a dict")
k = AppData.get_by_key(key, True)
if not k:
AppData.create(key=make_key(key), value=value)
else:
if init is False:
if reset is False:
nv = copy.deepcopy(value)
value = copy.deepcopy(k.value)
value.update(nv)
k.update(value=value) | Set data
:param key: A unique to set, best to use __name__
:param value: dict - the value to save
:param reset: bool - If true, it will reset the value to the current one.
if False, it will just update the stored value with the current
one
:param init: bool - If True, it will create the entry if it doesn't exits
next time invoked, it will not save anything
:return: | Below is the the instruction that describes the task:
### Input:
Set data
:param key: A unique to set, best to use __name__
:param value: dict - the value to save
:param reset: bool - If true, it will reset the value to the current one.
if False, it will just update the stored value with the current
one
:param init: bool - If True, it will create the entry if it doesn't exits
next time invoked, it will not save anything
:return:
### Response:
def set(key, value={}, reset=False, init=False):
"""
Set data
:param key: A unique to set, best to use __name__
:param value: dict - the value to save
:param reset: bool - If true, it will reset the value to the current one.
if False, it will just update the stored value with the current
one
:param init: bool - If True, it will create the entry if it doesn't exits
next time invoked, it will not save anything
:return:
"""
if not isinstance(value, dict):
raise ValueError("App Data value must be a dict")
k = AppData.get_by_key(key, True)
if not k:
AppData.create(key=make_key(key), value=value)
else:
if init is False:
if reset is False:
nv = copy.deepcopy(value)
value = copy.deepcopy(k.value)
value.update(nv)
k.update(value=value) |
def sens_power_send(self, adc121_vspb_volt, adc121_cspb_amp, adc121_cs1_amp, adc121_cs2_amp, force_mavlink1=False):
'''
Voltage and current sensor data
adc121_vspb_volt : Power board voltage sensor reading in volts (float)
adc121_cspb_amp : Power board current sensor reading in amps (float)
adc121_cs1_amp : Board current sensor 1 reading in amps (float)
adc121_cs2_amp : Board current sensor 2 reading in amps (float)
'''
return self.send(self.sens_power_encode(adc121_vspb_volt, adc121_cspb_amp, adc121_cs1_amp, adc121_cs2_amp), force_mavlink1=force_mavlink1) | Voltage and current sensor data
adc121_vspb_volt : Power board voltage sensor reading in volts (float)
adc121_cspb_amp : Power board current sensor reading in amps (float)
adc121_cs1_amp : Board current sensor 1 reading in amps (float)
adc121_cs2_amp : Board current sensor 2 reading in amps (float) | Below is the the instruction that describes the task:
### Input:
Voltage and current sensor data
adc121_vspb_volt : Power board voltage sensor reading in volts (float)
adc121_cspb_amp : Power board current sensor reading in amps (float)
adc121_cs1_amp : Board current sensor 1 reading in amps (float)
adc121_cs2_amp : Board current sensor 2 reading in amps (float)
### Response:
def sens_power_send(self, adc121_vspb_volt, adc121_cspb_amp, adc121_cs1_amp, adc121_cs2_amp, force_mavlink1=False):
'''
Voltage and current sensor data
adc121_vspb_volt : Power board voltage sensor reading in volts (float)
adc121_cspb_amp : Power board current sensor reading in amps (float)
adc121_cs1_amp : Board current sensor 1 reading in amps (float)
adc121_cs2_amp : Board current sensor 2 reading in amps (float)
'''
return self.send(self.sens_power_encode(adc121_vspb_volt, adc121_cspb_amp, adc121_cs1_amp, adc121_cs2_amp), force_mavlink1=force_mavlink1) |
def MaskSolveSlow(A, b, w=5, progress=True, niter=None):
'''
Identical to `MaskSolve`, but computes the solution
the brute-force way.
'''
# Number of data points
N = b.shape[0]
# How many iterations? Default is to go through
# the entire dataset
if niter is None:
niter = N - w + 1
# Our result matrix
X = np.empty((niter, N - w))
# Iterate! The mask at step `n` goes from
# data index `n` to data index `n+w-1` (inclusive).
for n in prange(niter):
mask = np.arange(n, n + w)
An = np.delete(np.delete(A, mask, axis=0), mask, axis=1)
Un = cholesky(An)
bn = np.delete(b, mask)
X[n] = cho_solve((Un, False), bn)
return X | Identical to `MaskSolve`, but computes the solution
the brute-force way. | Below is the the instruction that describes the task:
### Input:
Identical to `MaskSolve`, but computes the solution
the brute-force way.
### Response:
def MaskSolveSlow(A, b, w=5, progress=True, niter=None):
'''
Identical to `MaskSolve`, but computes the solution
the brute-force way.
'''
# Number of data points
N = b.shape[0]
# How many iterations? Default is to go through
# the entire dataset
if niter is None:
niter = N - w + 1
# Our result matrix
X = np.empty((niter, N - w))
# Iterate! The mask at step `n` goes from
# data index `n` to data index `n+w-1` (inclusive).
for n in prange(niter):
mask = np.arange(n, n + w)
An = np.delete(np.delete(A, mask, axis=0), mask, axis=1)
Un = cholesky(An)
bn = np.delete(b, mask)
X[n] = cho_solve((Un, False), bn)
return X |
def calculate_nfft(samplerate, winlen):
"""Calculates the FFT size as a power of two greater than or equal to
the number of samples in a single window length.
Having an FFT less than the window length loses precision by dropping
many of the samples; a longer FFT than the window allows zero-padding
of the FFT buffer which is neutral in terms of frequency domain conversion.
:param samplerate: The sample rate of the signal we are working with, in Hz.
:param winlen: The length of the analysis window in seconds.
"""
window_length_samples = winlen * samplerate
nfft = 1
while nfft < window_length_samples:
nfft *= 2
return nfft | Calculates the FFT size as a power of two greater than or equal to
the number of samples in a single window length.
Having an FFT less than the window length loses precision by dropping
many of the samples; a longer FFT than the window allows zero-padding
of the FFT buffer which is neutral in terms of frequency domain conversion.
:param samplerate: The sample rate of the signal we are working with, in Hz.
:param winlen: The length of the analysis window in seconds. | Below is the the instruction that describes the task:
### Input:
Calculates the FFT size as a power of two greater than or equal to
the number of samples in a single window length.
Having an FFT less than the window length loses precision by dropping
many of the samples; a longer FFT than the window allows zero-padding
of the FFT buffer which is neutral in terms of frequency domain conversion.
:param samplerate: The sample rate of the signal we are working with, in Hz.
:param winlen: The length of the analysis window in seconds.
### Response:
def calculate_nfft(samplerate, winlen):
"""Calculates the FFT size as a power of two greater than or equal to
the number of samples in a single window length.
Having an FFT less than the window length loses precision by dropping
many of the samples; a longer FFT than the window allows zero-padding
of the FFT buffer which is neutral in terms of frequency domain conversion.
:param samplerate: The sample rate of the signal we are working with, in Hz.
:param winlen: The length of the analysis window in seconds.
"""
window_length_samples = winlen * samplerate
nfft = 1
while nfft < window_length_samples:
nfft *= 2
return nfft |
def to_JSON(self):
"""Dumps object fields into a JSON formatted string
:returns: the JSON string
"""
return json.dumps({"reception_time": self._reception_time,
"Location": json.loads(self._location.to_JSON()),
"Weather": json.loads(self._weather.to_JSON())
}) | Dumps object fields into a JSON formatted string
:returns: the JSON string | Below is the the instruction that describes the task:
### Input:
Dumps object fields into a JSON formatted string
:returns: the JSON string
### Response:
def to_JSON(self):
"""Dumps object fields into a JSON formatted string
:returns: the JSON string
"""
return json.dumps({"reception_time": self._reception_time,
"Location": json.loads(self._location.to_JSON()),
"Weather": json.loads(self._weather.to_JSON())
}) |
def concat(bed_files, catted=None):
"""
recursively concat a set of BED files, returning a
sorted bedtools object of the result
"""
bed_files = [x for x in bed_files if x]
if len(bed_files) == 0:
if catted:
# move to a .bed extension for downstream tools if not already
sorted_bed = catted.sort()
if not sorted_bed.fn.endswith(".bed"):
return sorted_bed.moveto(sorted_bed.fn + ".bed")
else:
return sorted_bed
else:
return catted
if not catted:
bed_files = list(bed_files)
catted = bt.BedTool(bed_files.pop())
else:
catted = catted.cat(bed_files.pop(), postmerge=False,
force_truncate=False)
return concat(bed_files, catted) | recursively concat a set of BED files, returning a
sorted bedtools object of the result | Below is the the instruction that describes the task:
### Input:
recursively concat a set of BED files, returning a
sorted bedtools object of the result
### Response:
def concat(bed_files, catted=None):
"""
recursively concat a set of BED files, returning a
sorted bedtools object of the result
"""
bed_files = [x for x in bed_files if x]
if len(bed_files) == 0:
if catted:
# move to a .bed extension for downstream tools if not already
sorted_bed = catted.sort()
if not sorted_bed.fn.endswith(".bed"):
return sorted_bed.moveto(sorted_bed.fn + ".bed")
else:
return sorted_bed
else:
return catted
if not catted:
bed_files = list(bed_files)
catted = bt.BedTool(bed_files.pop())
else:
catted = catted.cat(bed_files.pop(), postmerge=False,
force_truncate=False)
return concat(bed_files, catted) |
def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options):
"""Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \
(default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
self._jwrite.saveAsTable(name) | Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \
(default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options | Below is the the instruction that describes the task:
### Input:
Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \
(default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options
### Response:
def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options):
"""Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \
(default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
self._jwrite.saveAsTable(name) |
def resolve_all(self, import_items):
"""Resolves a list of imports.
Yields filenames.
"""
for import_item in import_items:
try:
yield self.resolve_import(import_item)
except ImportException as err:
logging.info('unknown module %s', err.module_name) | Resolves a list of imports.
Yields filenames. | Below is the the instruction that describes the task:
### Input:
Resolves a list of imports.
Yields filenames.
### Response:
def resolve_all(self, import_items):
"""Resolves a list of imports.
Yields filenames.
"""
for import_item in import_items:
try:
yield self.resolve_import(import_item)
except ImportException as err:
logging.info('unknown module %s', err.module_name) |
def trace_parser(self):
"""Method that parses the trace file once and updates the
:attr:`status_info` attribute with the new entries.
"""
# Check the timestamp of the tracefile. Only proceed with the parsing
# if it changed from the previous time.
size_stamp = os.path.getsize(self.trace_file)
self.trace_retry = 0
if size_stamp and size_stamp == self.trace_sizestamp:
return
else:
logger.debug("Updating trace size stamp to: {}".format(size_stamp))
self.trace_sizestamp = size_stamp
with open(self.trace_file) as fh:
# Skip potential empty lines at the start of file
header = next(fh).strip()
while not header:
header = next(fh).strip()
# Get header mappings before parsing the file
hm = self._header_mapping(header)
for line in fh:
# Skip empty lines
if line.strip() == "":
continue
fields = line.strip().split("\t")
# Skip if task ID was already processes
if fields[hm["task_id"]] in self.stored_ids:
continue
# Parse trace entry and update status_info attribute
self._update_trace_info(fields, hm)
self.send = True
self._update_process_stats()
self._update_barrier_status() | Method that parses the trace file once and updates the
:attr:`status_info` attribute with the new entries. | Below is the the instruction that describes the task:
### Input:
Method that parses the trace file once and updates the
:attr:`status_info` attribute with the new entries.
### Response:
def trace_parser(self):
"""Method that parses the trace file once and updates the
:attr:`status_info` attribute with the new entries.
"""
# Check the timestamp of the tracefile. Only proceed with the parsing
# if it changed from the previous time.
size_stamp = os.path.getsize(self.trace_file)
self.trace_retry = 0
if size_stamp and size_stamp == self.trace_sizestamp:
return
else:
logger.debug("Updating trace size stamp to: {}".format(size_stamp))
self.trace_sizestamp = size_stamp
with open(self.trace_file) as fh:
# Skip potential empty lines at the start of file
header = next(fh).strip()
while not header:
header = next(fh).strip()
# Get header mappings before parsing the file
hm = self._header_mapping(header)
for line in fh:
# Skip empty lines
if line.strip() == "":
continue
fields = line.strip().split("\t")
# Skip if task ID was already processes
if fields[hm["task_id"]] in self.stored_ids:
continue
# Parse trace entry and update status_info attribute
self._update_trace_info(fields, hm)
self.send = True
self._update_process_stats()
self._update_barrier_status() |
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return ErrorCode(key)
if key not in ErrorCode._member_map_:
extend_enum(ErrorCode, key, default)
return ErrorCode[key] | Backport support for original codes. | Below is the the instruction that describes the task:
### Input:
Backport support for original codes.
### Response:
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return ErrorCode(key)
if key not in ErrorCode._member_map_:
extend_enum(ErrorCode, key, default)
return ErrorCode[key] |
def _get_example_csv(self):
"""For dimension parsing
"""
station_key = self.json["station"][0]["key"]
period = "corrected-archive"
url = self.url\
.replace(".json", "/station/{}/period/{}/data.csv"\
.format(station_key, period))
r = requests.get(url)
if r.status_code == 200:
return DataCsv().from_string(r.content)
else:
raise Exception("Error connecting to api") | For dimension parsing | Below is the the instruction that describes the task:
### Input:
For dimension parsing
### Response:
def _get_example_csv(self):
"""For dimension parsing
"""
station_key = self.json["station"][0]["key"]
period = "corrected-archive"
url = self.url\
.replace(".json", "/station/{}/period/{}/data.csv"\
.format(station_key, period))
r = requests.get(url)
if r.status_code == 200:
return DataCsv().from_string(r.content)
else:
raise Exception("Error connecting to api") |
def inject_method(self, func, name=None):
"""
Injects a function into an object instance as a bound method
The main use case of this function is for monkey patching. While monkey
patching is sometimes necessary it should generally be avoided. Thus, we
simply remind the developer that there might be a better way.
Args:
self (object): instance to inject a function into
func (func): the function to inject (must contain an arg for self)
name (str): name of the method. optional. If not specified the name
of the function is used.
Example:
>>> class Foo(object):
>>> def bar(self):
>>> return 'bar'
>>> def baz(self):
>>> return 'baz'
>>> self = Foo()
>>> assert self.bar() == 'bar'
>>> assert not hasattr(self, 'baz')
>>> inject_method(self, baz)
>>> assert not hasattr(Foo, 'baz'), 'should only change one instance'
>>> assert self.baz() == 'baz'
>>> inject_method(self, baz, 'bar')
>>> assert self.bar() == 'baz'
"""
# TODO: if func is a bound method we should probably unbind it
new_method = func.__get__(self, self.__class__)
if name is None:
name = func.__name__
setattr(self, name, new_method) | Injects a function into an object instance as a bound method
The main use case of this function is for monkey patching. While monkey
patching is sometimes necessary it should generally be avoided. Thus, we
simply remind the developer that there might be a better way.
Args:
self (object): instance to inject a function into
func (func): the function to inject (must contain an arg for self)
name (str): name of the method. optional. If not specified the name
of the function is used.
Example:
>>> class Foo(object):
>>> def bar(self):
>>> return 'bar'
>>> def baz(self):
>>> return 'baz'
>>> self = Foo()
>>> assert self.bar() == 'bar'
>>> assert not hasattr(self, 'baz')
>>> inject_method(self, baz)
>>> assert not hasattr(Foo, 'baz'), 'should only change one instance'
>>> assert self.baz() == 'baz'
>>> inject_method(self, baz, 'bar')
>>> assert self.bar() == 'baz' | Below is the the instruction that describes the task:
### Input:
Injects a function into an object instance as a bound method
The main use case of this function is for monkey patching. While monkey
patching is sometimes necessary it should generally be avoided. Thus, we
simply remind the developer that there might be a better way.
Args:
self (object): instance to inject a function into
func (func): the function to inject (must contain an arg for self)
name (str): name of the method. optional. If not specified the name
of the function is used.
Example:
>>> class Foo(object):
>>> def bar(self):
>>> return 'bar'
>>> def baz(self):
>>> return 'baz'
>>> self = Foo()
>>> assert self.bar() == 'bar'
>>> assert not hasattr(self, 'baz')
>>> inject_method(self, baz)
>>> assert not hasattr(Foo, 'baz'), 'should only change one instance'
>>> assert self.baz() == 'baz'
>>> inject_method(self, baz, 'bar')
>>> assert self.bar() == 'baz'
### Response:
def inject_method(self, func, name=None):
"""
Injects a function into an object instance as a bound method
The main use case of this function is for monkey patching. While monkey
patching is sometimes necessary it should generally be avoided. Thus, we
simply remind the developer that there might be a better way.
Args:
self (object): instance to inject a function into
func (func): the function to inject (must contain an arg for self)
name (str): name of the method. optional. If not specified the name
of the function is used.
Example:
>>> class Foo(object):
>>> def bar(self):
>>> return 'bar'
>>> def baz(self):
>>> return 'baz'
>>> self = Foo()
>>> assert self.bar() == 'bar'
>>> assert not hasattr(self, 'baz')
>>> inject_method(self, baz)
>>> assert not hasattr(Foo, 'baz'), 'should only change one instance'
>>> assert self.baz() == 'baz'
>>> inject_method(self, baz, 'bar')
>>> assert self.bar() == 'baz'
"""
# TODO: if func is a bound method we should probably unbind it
new_method = func.__get__(self, self.__class__)
if name is None:
name = func.__name__
setattr(self, name, new_method) |
def search_device_by_id(self, deviceID) -> Device:
""" searches a device by given id
Args:
deviceID(str): the device to search for
Returns
the Device object or None if it couldn't find a device
"""
for d in self.devices:
if d.id == deviceID:
return d
return None | searches a device by given id
Args:
deviceID(str): the device to search for
Returns
the Device object or None if it couldn't find a device | Below is the the instruction that describes the task:
### Input:
searches a device by given id
Args:
deviceID(str): the device to search for
Returns
the Device object or None if it couldn't find a device
### Response:
def search_device_by_id(self, deviceID) -> Device:
""" searches a device by given id
Args:
deviceID(str): the device to search for
Returns
the Device object or None if it couldn't find a device
"""
for d in self.devices:
if d.id == deviceID:
return d
return None |
def serializationDecision(cls, obj, serializedClasses,
serializedConfiguredUnits):
"""
Decide if this unit should be serialized or not eventually fix name
to fit same already serialized unit
:param obj: object to serialize
:param serializedClasses: dict {unitCls : unitobj}
:param serializedConfiguredUnits: (unitCls, paramsValues) : unitObj
where paramsValues are named tuple name:value
"""
isDeclaration = isinstance(obj, Entity)
isDefinition = isinstance(obj, Architecture)
if isDeclaration:
unit = obj.origin
elif isDefinition:
unit = obj.entity.origin
else:
return True
assert isinstance(unit, Unit)
sd = unit._serializeDecision
if sd is None:
return True
else:
prevPriv = serializedClasses.get(unit.__class__, None)
seriazlize, nextPriv = sd(unit, obj, isDeclaration, prevPriv)
serializedClasses[unit.__class__] = nextPriv
return seriazlize | Decide if this unit should be serialized or not eventually fix name
to fit same already serialized unit
:param obj: object to serialize
:param serializedClasses: dict {unitCls : unitobj}
:param serializedConfiguredUnits: (unitCls, paramsValues) : unitObj
where paramsValues are named tuple name:value | Below is the the instruction that describes the task:
### Input:
Decide if this unit should be serialized or not eventually fix name
to fit same already serialized unit
:param obj: object to serialize
:param serializedClasses: dict {unitCls : unitobj}
:param serializedConfiguredUnits: (unitCls, paramsValues) : unitObj
where paramsValues are named tuple name:value
### Response:
def serializationDecision(cls, obj, serializedClasses,
serializedConfiguredUnits):
"""
Decide if this unit should be serialized or not eventually fix name
to fit same already serialized unit
:param obj: object to serialize
:param serializedClasses: dict {unitCls : unitobj}
:param serializedConfiguredUnits: (unitCls, paramsValues) : unitObj
where paramsValues are named tuple name:value
"""
isDeclaration = isinstance(obj, Entity)
isDefinition = isinstance(obj, Architecture)
if isDeclaration:
unit = obj.origin
elif isDefinition:
unit = obj.entity.origin
else:
return True
assert isinstance(unit, Unit)
sd = unit._serializeDecision
if sd is None:
return True
else:
prevPriv = serializedClasses.get(unit.__class__, None)
seriazlize, nextPriv = sd(unit, obj, isDeclaration, prevPriv)
serializedClasses[unit.__class__] = nextPriv
return seriazlize |
def emit_stanza(self, element):
""""Serialize a stanza.
Must be called after `emit_head`.
:Parameters:
- `element`: the element to serialize
:Types:
- `element`: :etree:`ElementTree.Element`
:Return: serialized element
:Returntype: `unicode`
"""
if not self._head_emitted:
raise RuntimeError(".emit_head() must be called first.")
string = self._emit_element(element, level = 1,
declared_prefixes = self._root_prefixes)
return remove_evil_characters(string) | Serialize a stanza.
Must be called after `emit_head`.
:Parameters:
- `element`: the element to serialize
:Types:
- `element`: :etree:`ElementTree.Element`
:Return: serialized element
:Returntype: `unicode` | Below is the the instruction that describes the task:
### Input:
Serialize a stanza.
Must be called after `emit_head`.
:Parameters:
- `element`: the element to serialize
:Types:
- `element`: :etree:`ElementTree.Element`
:Return: serialized element
:Returntype: `unicode`
### Response:
def emit_stanza(self, element):
""""Serialize a stanza.
Must be called after `emit_head`.
:Parameters:
- `element`: the element to serialize
:Types:
- `element`: :etree:`ElementTree.Element`
:Return: serialized element
:Returntype: `unicode`
"""
if not self._head_emitted:
raise RuntimeError(".emit_head() must be called first.")
string = self._emit_element(element, level = 1,
declared_prefixes = self._root_prefixes)
return remove_evil_characters(string) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.