code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def __checkLock(self):
"""
Lock checking.
Check if there is lock pending and check if enough time
passed so requests can be unlocked.
"""
if self._isLocked:
if (time.time() - self._timer) > TIMEOUT_TIMES[self._operation]:
self.__unLock()
return False
return True
return False | Lock checking.
Check if there is lock pending and check if enough time
passed so requests can be unlocked. | Below is the the instruction that describes the task:
### Input:
Lock checking.
Check if there is lock pending and check if enough time
passed so requests can be unlocked.
### Response:
def __checkLock(self):
"""
Lock checking.
Check if there is lock pending and check if enough time
passed so requests can be unlocked.
"""
if self._isLocked:
if (time.time() - self._timer) > TIMEOUT_TIMES[self._operation]:
self.__unLock()
return False
return True
return False |
def rate_limit(client, headers, atexit=False):
"""Pause the process based on suggested rate interval."""
if not client or not headers:
return False
if not getattr(client.config, "rate_limit", False):
return False
rate_info = RateLimitsInfo.from_headers(headers)
if not rate_info or not rate_info.interval:
return False
if rate_info.interval:
cb = getattr(client.config, "rate_limit_callback", None)
if cb and callable(cb):
cb(rate_info, atexit=atexit)
time.sleep(rate_info.interval)
return True | Pause the process based on suggested rate interval. | Below is the the instruction that describes the task:
### Input:
Pause the process based on suggested rate interval.
### Response:
def rate_limit(client, headers, atexit=False):
"""Pause the process based on suggested rate interval."""
if not client or not headers:
return False
if not getattr(client.config, "rate_limit", False):
return False
rate_info = RateLimitsInfo.from_headers(headers)
if not rate_info or not rate_info.interval:
return False
if rate_info.interval:
cb = getattr(client.config, "rate_limit_callback", None)
if cb and callable(cb):
cb(rate_info, atexit=atexit)
time.sleep(rate_info.interval)
return True |
def evaluate(self, env):
"""Evaluate the entire expression in the environment, returning
a Unicode string.
"""
out = []
for part in self.parts:
if isinstance(part, str):
out.append(part)
else:
out.append(part.evaluate(env))
return u''.join(map(str, out)) | Evaluate the entire expression in the environment, returning
a Unicode string. | Below is the the instruction that describes the task:
### Input:
Evaluate the entire expression in the environment, returning
a Unicode string.
### Response:
def evaluate(self, env):
"""Evaluate the entire expression in the environment, returning
a Unicode string.
"""
out = []
for part in self.parts:
if isinstance(part, str):
out.append(part)
else:
out.append(part.evaluate(env))
return u''.join(map(str, out)) |
def parse_doc_fields(self, doc_term):
"""Parses the version, data license, name, SPDX Identifier, namespace,
and comment."""
try:
self.builder.set_doc_spdx_id(self.doc, doc_term)
except SPDXValueError:
self.value_error('DOC_SPDX_ID_VALUE', doc_term)
try:
if doc_term.count('#', 0, len(doc_term)) <= 1:
doc_namespace = doc_term.split('#')[0]
self.builder.set_doc_namespace(self.doc, doc_namespace)
else:
self.value_error('DOC_NAMESPACE_VALUE', doc_term)
except SPDXValueError:
self.value_error('DOC_NAMESPACE_VALUE', doc_term)
for _s, _p, o in self.graph.triples((doc_term, self.spdx_namespace['specVersion'], None)):
try:
self.builder.set_doc_version(self.doc, six.text_type(o))
except SPDXValueError:
self.value_error('DOC_VERS_VALUE', o)
except CardinalityError:
self.more_than_one_error('specVersion')
break
for _s, _p, o in self.graph.triples((doc_term, self.spdx_namespace['dataLicense'], None)):
try:
self.builder.set_doc_data_lic(self.doc, six.text_type(o))
except SPDXValueError:
self.value_error('DOC_D_LICS', o)
except CardinalityError:
self.more_than_one_error('dataLicense')
break
for _s, _p, o in self.graph.triples(
(doc_term, self.spdx_namespace['name'], None)):
try:
self.builder.set_doc_name(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('name')
break
for _s, _p, o in self.graph.triples((doc_term, RDFS.comment, None)):
try:
self.builder.set_doc_comment(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('Document comment')
break | Parses the version, data license, name, SPDX Identifier, namespace,
and comment. | Below is the the instruction that describes the task:
### Input:
Parses the version, data license, name, SPDX Identifier, namespace,
and comment.
### Response:
def parse_doc_fields(self, doc_term):
"""Parses the version, data license, name, SPDX Identifier, namespace,
and comment."""
try:
self.builder.set_doc_spdx_id(self.doc, doc_term)
except SPDXValueError:
self.value_error('DOC_SPDX_ID_VALUE', doc_term)
try:
if doc_term.count('#', 0, len(doc_term)) <= 1:
doc_namespace = doc_term.split('#')[0]
self.builder.set_doc_namespace(self.doc, doc_namespace)
else:
self.value_error('DOC_NAMESPACE_VALUE', doc_term)
except SPDXValueError:
self.value_error('DOC_NAMESPACE_VALUE', doc_term)
for _s, _p, o in self.graph.triples((doc_term, self.spdx_namespace['specVersion'], None)):
try:
self.builder.set_doc_version(self.doc, six.text_type(o))
except SPDXValueError:
self.value_error('DOC_VERS_VALUE', o)
except CardinalityError:
self.more_than_one_error('specVersion')
break
for _s, _p, o in self.graph.triples((doc_term, self.spdx_namespace['dataLicense'], None)):
try:
self.builder.set_doc_data_lic(self.doc, six.text_type(o))
except SPDXValueError:
self.value_error('DOC_D_LICS', o)
except CardinalityError:
self.more_than_one_error('dataLicense')
break
for _s, _p, o in self.graph.triples(
(doc_term, self.spdx_namespace['name'], None)):
try:
self.builder.set_doc_name(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('name')
break
for _s, _p, o in self.graph.triples((doc_term, RDFS.comment, None)):
try:
self.builder.set_doc_comment(self.doc, six.text_type(o))
except CardinalityError:
self.more_than_one_error('Document comment')
break |
def depth(self):
"""
The number of hierarchy levels in this category graph. Returns 0 if
it contains no categories.
"""
categories = self._categories
if not categories:
return 0
first_depth = categories[0].depth
for category in categories[1:]:
if category.depth != first_depth:
raise ValueError('category depth not uniform')
return first_depth | The number of hierarchy levels in this category graph. Returns 0 if
it contains no categories. | Below is the the instruction that describes the task:
### Input:
The number of hierarchy levels in this category graph. Returns 0 if
it contains no categories.
### Response:
def depth(self):
"""
The number of hierarchy levels in this category graph. Returns 0 if
it contains no categories.
"""
categories = self._categories
if not categories:
return 0
first_depth = categories[0].depth
for category in categories[1:]:
if category.depth != first_depth:
raise ValueError('category depth not uniform')
return first_depth |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CompositionHookContext for this CompositionHookInstance
:rtype: twilio.rest.video.v1.composition_hook.CompositionHookContext
"""
if self._context is None:
self._context = CompositionHookContext(self._version, sid=self._solution['sid'], )
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CompositionHookContext for this CompositionHookInstance
:rtype: twilio.rest.video.v1.composition_hook.CompositionHookContext | Below is the the instruction that describes the task:
### Input:
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CompositionHookContext for this CompositionHookInstance
:rtype: twilio.rest.video.v1.composition_hook.CompositionHookContext
### Response:
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CompositionHookContext for this CompositionHookInstance
:rtype: twilio.rest.video.v1.composition_hook.CompositionHookContext
"""
if self._context is None:
self._context = CompositionHookContext(self._version, sid=self._solution['sid'], )
return self._context |
def parse_options(self, options):
"""Parses ssh options string."""
quote_open = False
parsed_options = {}
def parse_add_single_option(opt):
"""Parses and validates a single option, and adds it to parsed_options field."""
if "=" in opt:
opt_name, opt_value = opt.split("=", 1)
opt_value = opt_value.replace('"', '')
else:
opt_name = opt
opt_value = True
if " " in opt_name or not self.OPTION_NAME_RE.match(opt_name):
raise InvalidOptionNameError("%s is not valid option name." % opt_name)
if self.strict_mode:
for valid_opt_name, value_required in self.OPTIONS_SPEC:
if opt_name.lower() == valid_opt_name:
if value_required and opt_value is True:
raise MissingMandatoryOptionValueError("%s is missing mandatory value." % opt_name)
break
else:
raise UnknownOptionNameError("%s is unrecognized option name." % opt_name)
if opt_name not in parsed_options:
parsed_options[opt_name] = []
parsed_options[opt_name].append(opt_value)
start_of_current_opt = 0
i = 1 # Need to be set for empty options strings
for i, character in enumerate(options):
if character == '"': # only double quotes are allowed, no need to care about single quotes
quote_open = not quote_open
if quote_open:
continue
if character == ",":
opt = options[start_of_current_opt:i]
parse_add_single_option(opt)
start_of_current_opt = i + 1
# Data begins after the first space
if start_of_current_opt + 1 != i:
opt = options[start_of_current_opt:]
parse_add_single_option(opt)
if quote_open:
raise InvalidOptionsError("Unbalanced quotes.")
return parsed_options | Parses ssh options string. | Below is the the instruction that describes the task:
### Input:
Parses ssh options string.
### Response:
def parse_options(self, options):
"""Parses ssh options string."""
quote_open = False
parsed_options = {}
def parse_add_single_option(opt):
"""Parses and validates a single option, and adds it to parsed_options field."""
if "=" in opt:
opt_name, opt_value = opt.split("=", 1)
opt_value = opt_value.replace('"', '')
else:
opt_name = opt
opt_value = True
if " " in opt_name or not self.OPTION_NAME_RE.match(opt_name):
raise InvalidOptionNameError("%s is not valid option name." % opt_name)
if self.strict_mode:
for valid_opt_name, value_required in self.OPTIONS_SPEC:
if opt_name.lower() == valid_opt_name:
if value_required and opt_value is True:
raise MissingMandatoryOptionValueError("%s is missing mandatory value." % opt_name)
break
else:
raise UnknownOptionNameError("%s is unrecognized option name." % opt_name)
if opt_name not in parsed_options:
parsed_options[opt_name] = []
parsed_options[opt_name].append(opt_value)
start_of_current_opt = 0
i = 1 # Need to be set for empty options strings
for i, character in enumerate(options):
if character == '"': # only double quotes are allowed, no need to care about single quotes
quote_open = not quote_open
if quote_open:
continue
if character == ",":
opt = options[start_of_current_opt:i]
parse_add_single_option(opt)
start_of_current_opt = i + 1
# Data begins after the first space
if start_of_current_opt + 1 != i:
opt = options[start_of_current_opt:]
parse_add_single_option(opt)
if quote_open:
raise InvalidOptionsError("Unbalanced quotes.")
return parsed_options |
def getActionReflexRules(self, analysis, wf_action):
"""
This function returns a list of dictionaries with the rules to be done
for the analysis service.
:analysis: the analysis full object which we want to obtain the
rules for.
:wf_action: it is the workflow action that the analysis is doing, we
have to act in consideration of the action_set 'trigger' variable
:returns: [{'action': 'duplicate', ...}, {,}, ...]
"""
# Setting up the analyses catalog
self.analyses_catalog = getToolByName(self, CATALOG_ANALYSIS_LISTING)
# Getting the action sets, those that contain action rows
action_sets = self.getReflexRules()
rules_list = []
condition = False
for action_set in action_sets:
# Validate the trigger
if action_set.get('trigger', '') == wf_action:
# Getting the conditions resolution
condition = self._areConditionsMet(action_set, analysis)
if condition:
actions = action_set.get('actions', [])
for act in actions:
# Adding the rule number inside each action row because
# need to get the rule number from a row action later.
# we will need to get the rule number from a row
# action later.
act['rulenumber'] = action_set.get('rulenumber', '0')
act['rulename'] = self.Title()
rules_list.append(act)
return rules_list | This function returns a list of dictionaries with the rules to be done
for the analysis service.
:analysis: the analysis full object which we want to obtain the
rules for.
:wf_action: it is the workflow action that the analysis is doing, we
have to act in consideration of the action_set 'trigger' variable
:returns: [{'action': 'duplicate', ...}, {,}, ...] | Below is the the instruction that describes the task:
### Input:
This function returns a list of dictionaries with the rules to be done
for the analysis service.
:analysis: the analysis full object which we want to obtain the
rules for.
:wf_action: it is the workflow action that the analysis is doing, we
have to act in consideration of the action_set 'trigger' variable
:returns: [{'action': 'duplicate', ...}, {,}, ...]
### Response:
def getActionReflexRules(self, analysis, wf_action):
"""
This function returns a list of dictionaries with the rules to be done
for the analysis service.
:analysis: the analysis full object which we want to obtain the
rules for.
:wf_action: it is the workflow action that the analysis is doing, we
have to act in consideration of the action_set 'trigger' variable
:returns: [{'action': 'duplicate', ...}, {,}, ...]
"""
# Setting up the analyses catalog
self.analyses_catalog = getToolByName(self, CATALOG_ANALYSIS_LISTING)
# Getting the action sets, those that contain action rows
action_sets = self.getReflexRules()
rules_list = []
condition = False
for action_set in action_sets:
# Validate the trigger
if action_set.get('trigger', '') == wf_action:
# Getting the conditions resolution
condition = self._areConditionsMet(action_set, analysis)
if condition:
actions = action_set.get('actions', [])
for act in actions:
# Adding the rule number inside each action row because
# need to get the rule number from a row action later.
# we will need to get the rule number from a row
# action later.
act['rulenumber'] = action_set.get('rulenumber', '0')
act['rulename'] = self.Title()
rules_list.append(act)
return rules_list |
def roots(self):
"""get the nodes with no children"""
return [x for x in self._nodes.values() if x.id not in self._c2p] | get the nodes with no children | Below is the the instruction that describes the task:
### Input:
get the nodes with no children
### Response:
def roots(self):
"""get the nodes with no children"""
return [x for x in self._nodes.values() if x.id not in self._c2p] |
def tileXYZToQuadKey(self, x, y, z):
'''
Computes quadKey value based on tile x, y and z values.
'''
quadKey = ''
for i in range(z, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (x & mask) != 0:
digit += 1
if (y & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey | Computes quadKey value based on tile x, y and z values. | Below is the the instruction that describes the task:
### Input:
Computes quadKey value based on tile x, y and z values.
### Response:
def tileXYZToQuadKey(self, x, y, z):
'''
Computes quadKey value based on tile x, y and z values.
'''
quadKey = ''
for i in range(z, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (x & mask) != 0:
digit += 1
if (y & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey |
def incidence_matrix(network, branch_components=None, busorder=None):
"""
Construct a sparse incidence matrix (directed)
Parameters
----------
branch_components : iterable sublist of `branch_components`
Buses connected by any of the selected branches are adjacent
(default: branch_components (network) or passive_branch_components (sub_network))
busorder : pd.Index subset of network.buses.index
Basis to use for the matrix representation of the adjacency matrix
(default: buses.index (network) or buses_i() (sub_network))
Returns
-------
incidence_matrix : sp.sparse.csr_matrix
Directed incidence matrix
"""
from . import components
if isinstance(network, components.Network):
if branch_components is None:
branch_components = network.branch_components
if busorder is None:
busorder = network.buses.index
elif isinstance(network, components.SubNetwork):
if branch_components is None:
branch_components = network.network.passive_branch_components
if busorder is None:
busorder = network.buses_i()
else:
raise TypeError(" must be called with a Network or a SubNetwork")
no_buses = len(busorder)
no_branches = 0
bus0_inds = []
bus1_inds = []
for c in network.iterate_components(branch_components):
if c.ind is None:
sel = slice(None)
no_branches += len(c.df)
else:
sel = c.ind
no_branches += len(c.ind)
bus0_inds.append(busorder.get_indexer(c.df.loc[sel, "bus0"]))
bus1_inds.append(busorder.get_indexer(c.df.loc[sel, "bus1"]))
bus0_inds = np.concatenate(bus0_inds)
bus1_inds = np.concatenate(bus1_inds)
return sp.sparse.csr_matrix((np.r_[np.ones(no_branches), -np.ones(no_branches)],
(np.r_[bus0_inds, bus1_inds], np.r_[:no_branches, :no_branches])),
(no_buses, no_branches)) | Construct a sparse incidence matrix (directed)
Parameters
----------
branch_components : iterable sublist of `branch_components`
Buses connected by any of the selected branches are adjacent
(default: branch_components (network) or passive_branch_components (sub_network))
busorder : pd.Index subset of network.buses.index
Basis to use for the matrix representation of the adjacency matrix
(default: buses.index (network) or buses_i() (sub_network))
Returns
-------
incidence_matrix : sp.sparse.csr_matrix
Directed incidence matrix | Below is the the instruction that describes the task:
### Input:
Construct a sparse incidence matrix (directed)
Parameters
----------
branch_components : iterable sublist of `branch_components`
Buses connected by any of the selected branches are adjacent
(default: branch_components (network) or passive_branch_components (sub_network))
busorder : pd.Index subset of network.buses.index
Basis to use for the matrix representation of the adjacency matrix
(default: buses.index (network) or buses_i() (sub_network))
Returns
-------
incidence_matrix : sp.sparse.csr_matrix
Directed incidence matrix
### Response:
def incidence_matrix(network, branch_components=None, busorder=None):
"""
Construct a sparse incidence matrix (directed)
Parameters
----------
branch_components : iterable sublist of `branch_components`
Buses connected by any of the selected branches are adjacent
(default: branch_components (network) or passive_branch_components (sub_network))
busorder : pd.Index subset of network.buses.index
Basis to use for the matrix representation of the adjacency matrix
(default: buses.index (network) or buses_i() (sub_network))
Returns
-------
incidence_matrix : sp.sparse.csr_matrix
Directed incidence matrix
"""
from . import components
if isinstance(network, components.Network):
if branch_components is None:
branch_components = network.branch_components
if busorder is None:
busorder = network.buses.index
elif isinstance(network, components.SubNetwork):
if branch_components is None:
branch_components = network.network.passive_branch_components
if busorder is None:
busorder = network.buses_i()
else:
raise TypeError(" must be called with a Network or a SubNetwork")
no_buses = len(busorder)
no_branches = 0
bus0_inds = []
bus1_inds = []
for c in network.iterate_components(branch_components):
if c.ind is None:
sel = slice(None)
no_branches += len(c.df)
else:
sel = c.ind
no_branches += len(c.ind)
bus0_inds.append(busorder.get_indexer(c.df.loc[sel, "bus0"]))
bus1_inds.append(busorder.get_indexer(c.df.loc[sel, "bus1"]))
bus0_inds = np.concatenate(bus0_inds)
bus1_inds = np.concatenate(bus1_inds)
return sp.sparse.csr_matrix((np.r_[np.ones(no_branches), -np.ones(no_branches)],
(np.r_[bus0_inds, bus1_inds], np.r_[:no_branches, :no_branches])),
(no_buses, no_branches)) |
def get_layers(self, Psurf=1013.25, Ptop=0.01, **kwargs):
"""
Compute scalars or coordinates associated to the vertical layers.
Parameters
----------
grid_spec : CTMGrid object
CTMGrid containing the information necessary to re-construct grid
levels for a given model coordinate system.
Returns
-------
dictionary of vertical grid components, including eta (unitless),
sigma (unitless), pressure (hPa), and altitude (km) on both layer centers
and edges, ordered from bottom-to-top.
Notes
-----
For pure sigma grids, sigma coordinates are given by the esig (edges) and
csig (centers).
For both pure sigma and hybrid grids, pressures at layers edges L are
calculated as follows:
.. math:: P_e(L) = A_p(L) + B_p(L) * (P_{surf} - C_p)
where
:math:`P_{surf}`, :math:`P_{top}`
Air pressures at the surface and the top of the modeled atmosphere
(:attr:`Psurf` and :attr:`Ptop` attributes of the :class:`CTMGrid`
instance).
:math:`A_p(L)`, :math:`Bp(L)`
Specified in the grid set-up (`Ap` and `Bp` attributes) for hybrid
grids, or respectively equals :math:`P_{top}` and :attr:`esig`
attribute for pure sigma grids.
:math:`Cp(L)`
equals :math:`P_{top}` for pure sigma grids or equals 0 for hybrid
grids.
Pressures at grid centers are averages of pressures at grid edges:
.. math:: P_c(L) = (P_e(L) + P_e(L+1)) / 2
For hybrid grids, ETA coordinates of grid edges and grid centers are
given by;
.. math:: ETA_{e}(L) = (P_e(L) - P_{top}) / (P_{surf} - P_{top})
.. math:: ETA_{c}(L) = (P_c(L) - P_{top}) / (P_{surf} - P_{top})
Altitude values are fit using a 5th-degree polynomial; see
`gridspec.prof_altitude` for more details.
"""
Psurf = np.asarray(Psurf)
output_ndims = Psurf.ndim + 1
if output_ndims > 3:
raise ValueError("`Psurf` argument must be a float or an array"
" with <= 2 dimensions (or None)")
# Compute all variables: takes not much memory, fast
# and better for code reading
SIGe = None
SIGc = None
ETAe = None
ETAc = None
if self.hybrid:
try:
Ap = broadcast_1d_array(self.Ap, output_ndims)
Bp = broadcast_1d_array(self.Bp, output_ndims)
except KeyError:
raise ValueError("Impossible to compute vertical levels,"
" data is missing (Ap, Bp)")
Cp = 0.
else:
try:
Bp = SIGe = broadcast_1d_array(self.esig, output_ndims)
SIGc = broadcast_1d_array(self.csig, output_ndims)
except KeyError:
raise ValueError("Impossible to compute vertical levels,"
" data is missing (esig, csig)")
Ap = Cp = Ptop
Pe = Ap + Bp * (Psurf - Cp)
Pc = 0.5 * (Pe[0:-1] + Pe[1:])
if self.hybrid:
ETAe = (Pe - Ptop)/(Psurf - Ptop)
ETAc = (Pc - Ptop)/(Psurf - Ptop)
else:
SIGe = SIGe * np.ones_like(Psurf)
SIGc = SIGc * np.ones_like(Psurf)
Ze = prof_altitude(Pe, **kwargs)
Zc = prof_altitude(Pc, **kwargs)
all_vars = {'eta_edges': ETAe,
'eta_centers': ETAc,
'sigma_edges': SIGe,
'sigma_centers': SIGc,
'pressure_edges': Pe,
'pressure_centers': Pc,
'altitude_edges': Ze,
'altitude_centers': Zc}
return all_vars | Compute scalars or coordinates associated to the vertical layers.
Parameters
----------
grid_spec : CTMGrid object
CTMGrid containing the information necessary to re-construct grid
levels for a given model coordinate system.
Returns
-------
dictionary of vertical grid components, including eta (unitless),
sigma (unitless), pressure (hPa), and altitude (km) on both layer centers
and edges, ordered from bottom-to-top.
Notes
-----
For pure sigma grids, sigma coordinates are given by the esig (edges) and
csig (centers).
For both pure sigma and hybrid grids, pressures at layers edges L are
calculated as follows:
.. math:: P_e(L) = A_p(L) + B_p(L) * (P_{surf} - C_p)
where
:math:`P_{surf}`, :math:`P_{top}`
Air pressures at the surface and the top of the modeled atmosphere
(:attr:`Psurf` and :attr:`Ptop` attributes of the :class:`CTMGrid`
instance).
:math:`A_p(L)`, :math:`Bp(L)`
Specified in the grid set-up (`Ap` and `Bp` attributes) for hybrid
grids, or respectively equals :math:`P_{top}` and :attr:`esig`
attribute for pure sigma grids.
:math:`Cp(L)`
equals :math:`P_{top}` for pure sigma grids or equals 0 for hybrid
grids.
Pressures at grid centers are averages of pressures at grid edges:
.. math:: P_c(L) = (P_e(L) + P_e(L+1)) / 2
For hybrid grids, ETA coordinates of grid edges and grid centers are
given by;
.. math:: ETA_{e}(L) = (P_e(L) - P_{top}) / (P_{surf} - P_{top})
.. math:: ETA_{c}(L) = (P_c(L) - P_{top}) / (P_{surf} - P_{top})
Altitude values are fit using a 5th-degree polynomial; see
`gridspec.prof_altitude` for more details. | Below is the the instruction that describes the task:
### Input:
Compute scalars or coordinates associated to the vertical layers.
Parameters
----------
grid_spec : CTMGrid object
CTMGrid containing the information necessary to re-construct grid
levels for a given model coordinate system.
Returns
-------
dictionary of vertical grid components, including eta (unitless),
sigma (unitless), pressure (hPa), and altitude (km) on both layer centers
and edges, ordered from bottom-to-top.
Notes
-----
For pure sigma grids, sigma coordinates are given by the esig (edges) and
csig (centers).
For both pure sigma and hybrid grids, pressures at layers edges L are
calculated as follows:
.. math:: P_e(L) = A_p(L) + B_p(L) * (P_{surf} - C_p)
where
:math:`P_{surf}`, :math:`P_{top}`
Air pressures at the surface and the top of the modeled atmosphere
(:attr:`Psurf` and :attr:`Ptop` attributes of the :class:`CTMGrid`
instance).
:math:`A_p(L)`, :math:`Bp(L)`
Specified in the grid set-up (`Ap` and `Bp` attributes) for hybrid
grids, or respectively equals :math:`P_{top}` and :attr:`esig`
attribute for pure sigma grids.
:math:`Cp(L)`
equals :math:`P_{top}` for pure sigma grids or equals 0 for hybrid
grids.
Pressures at grid centers are averages of pressures at grid edges:
.. math:: P_c(L) = (P_e(L) + P_e(L+1)) / 2
For hybrid grids, ETA coordinates of grid edges and grid centers are
given by;
.. math:: ETA_{e}(L) = (P_e(L) - P_{top}) / (P_{surf} - P_{top})
.. math:: ETA_{c}(L) = (P_c(L) - P_{top}) / (P_{surf} - P_{top})
Altitude values are fit using a 5th-degree polynomial; see
`gridspec.prof_altitude` for more details.
### Response:
def get_layers(self, Psurf=1013.25, Ptop=0.01, **kwargs):
"""
Compute scalars or coordinates associated to the vertical layers.
Parameters
----------
grid_spec : CTMGrid object
CTMGrid containing the information necessary to re-construct grid
levels for a given model coordinate system.
Returns
-------
dictionary of vertical grid components, including eta (unitless),
sigma (unitless), pressure (hPa), and altitude (km) on both layer centers
and edges, ordered from bottom-to-top.
Notes
-----
For pure sigma grids, sigma coordinates are given by the esig (edges) and
csig (centers).
For both pure sigma and hybrid grids, pressures at layers edges L are
calculated as follows:
.. math:: P_e(L) = A_p(L) + B_p(L) * (P_{surf} - C_p)
where
:math:`P_{surf}`, :math:`P_{top}`
Air pressures at the surface and the top of the modeled atmosphere
(:attr:`Psurf` and :attr:`Ptop` attributes of the :class:`CTMGrid`
instance).
:math:`A_p(L)`, :math:`Bp(L)`
Specified in the grid set-up (`Ap` and `Bp` attributes) for hybrid
grids, or respectively equals :math:`P_{top}` and :attr:`esig`
attribute for pure sigma grids.
:math:`Cp(L)`
equals :math:`P_{top}` for pure sigma grids or equals 0 for hybrid
grids.
Pressures at grid centers are averages of pressures at grid edges:
.. math:: P_c(L) = (P_e(L) + P_e(L+1)) / 2
For hybrid grids, ETA coordinates of grid edges and grid centers are
given by;
.. math:: ETA_{e}(L) = (P_e(L) - P_{top}) / (P_{surf} - P_{top})
.. math:: ETA_{c}(L) = (P_c(L) - P_{top}) / (P_{surf} - P_{top})
Altitude values are fit using a 5th-degree polynomial; see
`gridspec.prof_altitude` for more details.
"""
Psurf = np.asarray(Psurf)
output_ndims = Psurf.ndim + 1
if output_ndims > 3:
raise ValueError("`Psurf` argument must be a float or an array"
" with <= 2 dimensions (or None)")
# Compute all variables: takes not much memory, fast
# and better for code reading
SIGe = None
SIGc = None
ETAe = None
ETAc = None
if self.hybrid:
try:
Ap = broadcast_1d_array(self.Ap, output_ndims)
Bp = broadcast_1d_array(self.Bp, output_ndims)
except KeyError:
raise ValueError("Impossible to compute vertical levels,"
" data is missing (Ap, Bp)")
Cp = 0.
else:
try:
Bp = SIGe = broadcast_1d_array(self.esig, output_ndims)
SIGc = broadcast_1d_array(self.csig, output_ndims)
except KeyError:
raise ValueError("Impossible to compute vertical levels,"
" data is missing (esig, csig)")
Ap = Cp = Ptop
Pe = Ap + Bp * (Psurf - Cp)
Pc = 0.5 * (Pe[0:-1] + Pe[1:])
if self.hybrid:
ETAe = (Pe - Ptop)/(Psurf - Ptop)
ETAc = (Pc - Ptop)/(Psurf - Ptop)
else:
SIGe = SIGe * np.ones_like(Psurf)
SIGc = SIGc * np.ones_like(Psurf)
Ze = prof_altitude(Pe, **kwargs)
Zc = prof_altitude(Pc, **kwargs)
all_vars = {'eta_edges': ETAe,
'eta_centers': ETAc,
'sigma_edges': SIGe,
'sigma_centers': SIGc,
'pressure_edges': Pe,
'pressure_centers': Pc,
'altitude_edges': Ze,
'altitude_centers': Zc}
return all_vars |
def inspect_last(self, stream, only_allocated=False):
"""Return the last value pushed into a stream.
This function works even if the stream is virtual and no
virtual walker has been created for it. It is primarily
useful to aid in debugging sensor graphs.
Args:
stream (DataStream): The stream to inspect.
only_allocated (bool): Optional parameter to only allow inspection
of allocated virtual streams. This is useful for mimicking the
behavior of an embedded device that does not have a _last_values
array.
Returns:
IOTileReading: The data in the stream
Raises:
StreamEmptyError: if there has never been data written to
the stream.
UnresolvedIdentifierError: if only_allocated is True and there has not
been a virtual stream walker allocated to listen to this stream.
"""
if only_allocated:
found = False
for walker in self._virtual_walkers:
if walker.matches(stream):
found = True
break
if not found:
raise UnresolvedIdentifierError("inspect_last could not find an allocated virtual streamer for the desired stream", stream=stream)
if stream in self._last_values:
return self._last_values[stream]
raise StreamEmptyError(u"inspect_last called on stream that has never been written to", stream=stream) | Return the last value pushed into a stream.
This function works even if the stream is virtual and no
virtual walker has been created for it. It is primarily
useful to aid in debugging sensor graphs.
Args:
stream (DataStream): The stream to inspect.
only_allocated (bool): Optional parameter to only allow inspection
of allocated virtual streams. This is useful for mimicking the
behavior of an embedded device that does not have a _last_values
array.
Returns:
IOTileReading: The data in the stream
Raises:
StreamEmptyError: if there has never been data written to
the stream.
UnresolvedIdentifierError: if only_allocated is True and there has not
been a virtual stream walker allocated to listen to this stream. | Below is the the instruction that describes the task:
### Input:
Return the last value pushed into a stream.
This function works even if the stream is virtual and no
virtual walker has been created for it. It is primarily
useful to aid in debugging sensor graphs.
Args:
stream (DataStream): The stream to inspect.
only_allocated (bool): Optional parameter to only allow inspection
of allocated virtual streams. This is useful for mimicking the
behavior of an embedded device that does not have a _last_values
array.
Returns:
IOTileReading: The data in the stream
Raises:
StreamEmptyError: if there has never been data written to
the stream.
UnresolvedIdentifierError: if only_allocated is True and there has not
been a virtual stream walker allocated to listen to this stream.
### Response:
def inspect_last(self, stream, only_allocated=False):
"""Return the last value pushed into a stream.
This function works even if the stream is virtual and no
virtual walker has been created for it. It is primarily
useful to aid in debugging sensor graphs.
Args:
stream (DataStream): The stream to inspect.
only_allocated (bool): Optional parameter to only allow inspection
of allocated virtual streams. This is useful for mimicking the
behavior of an embedded device that does not have a _last_values
array.
Returns:
IOTileReading: The data in the stream
Raises:
StreamEmptyError: if there has never been data written to
the stream.
UnresolvedIdentifierError: if only_allocated is True and there has not
been a virtual stream walker allocated to listen to this stream.
"""
if only_allocated:
found = False
for walker in self._virtual_walkers:
if walker.matches(stream):
found = True
break
if not found:
raise UnresolvedIdentifierError("inspect_last could not find an allocated virtual streamer for the desired stream", stream=stream)
if stream in self._last_values:
return self._last_values[stream]
raise StreamEmptyError(u"inspect_last called on stream that has never been written to", stream=stream) |
async def create_source_event_stream(
schema: GraphQLSchema,
document: DocumentNode,
root_value: Any = None,
context_value: Any = None,
variable_values: Dict[str, Any] = None,
operation_name: str = None,
field_resolver: GraphQLFieldResolver = None,
) -> Union[AsyncIterable[Any], ExecutionResult]:
"""Create source even stream
Implements the "CreateSourceEventStream" algorithm described in the GraphQL
specification, resolving the subscription source event stream.
Returns a coroutine that yields an AsyncIterable.
If the client provided invalid arguments, the source stream could not be created,
or the resolver did not return an AsyncIterable, this function will throw an error,
which should be caught and handled by the caller.
A Source Event Stream represents a sequence of events, each of which triggers a
GraphQL execution for that event.
This may be useful when hosting the stateful subscription service in a different
process or machine than the stateless GraphQL execution engine, or otherwise
separating these two steps. For more on this, see the "Supporting Subscriptions
at Scale" information in the GraphQL spec.
"""
# If arguments are missing or incorrectly typed, this is an internal developer
# mistake which should throw an early error.
assert_valid_execution_arguments(schema, document, variable_values)
# If a valid context cannot be created due to incorrect arguments, this will throw
# an error.
context = ExecutionContext.build(
schema,
document,
root_value,
context_value,
variable_values,
operation_name,
field_resolver,
)
# Return early errors if execution context failed.
if isinstance(context, list):
return ExecutionResult(data=None, errors=context)
type_ = get_operation_root_type(schema, context.operation)
fields = context.collect_fields(type_, context.operation.selection_set, {}, set())
response_names = list(fields)
response_name = response_names[0]
field_nodes = fields[response_name]
field_node = field_nodes[0]
field_name = field_node.name.value
field_def = get_field_def(schema, type_, field_name)
if not field_def:
raise GraphQLError(
f"The subscription field '{field_name}' is not defined.", field_nodes
)
# Call the `subscribe()` resolver or the default resolver to produce an
# AsyncIterable yielding raw payloads.
resolve_fn = field_def.subscribe or context.field_resolver
resolve_fn = cast(GraphQLFieldResolver, resolve_fn) # help mypy
path = add_path(None, response_name)
info = context.build_resolve_info(field_def, field_nodes, type_, path)
# `resolve_field_value_or_error` implements the "ResolveFieldEventStream" algorithm
# from GraphQL specification. It differs from `resolve_field_value` due to
# providing a different `resolve_fn`.
result = context.resolve_field_value_or_error(
field_def, field_nodes, resolve_fn, root_value, info
)
event_stream = await cast(Awaitable, result) if isawaitable(result) else result
# If `event_stream` is an Error, rethrow a located error.
if isinstance(event_stream, Exception):
raise located_error(event_stream, field_nodes, response_path_as_list(path))
# Assert field returned an event stream, otherwise yield an error.
if isinstance(event_stream, AsyncIterable):
return cast(AsyncIterable, event_stream)
raise TypeError(
f"Subscription field must return AsyncIterable. Received: {event_stream!r}"
) | Create source even stream
Implements the "CreateSourceEventStream" algorithm described in the GraphQL
specification, resolving the subscription source event stream.
Returns a coroutine that yields an AsyncIterable.
If the client provided invalid arguments, the source stream could not be created,
or the resolver did not return an AsyncIterable, this function will throw an error,
which should be caught and handled by the caller.
A Source Event Stream represents a sequence of events, each of which triggers a
GraphQL execution for that event.
This may be useful when hosting the stateful subscription service in a different
process or machine than the stateless GraphQL execution engine, or otherwise
separating these two steps. For more on this, see the "Supporting Subscriptions
at Scale" information in the GraphQL spec. | Below is the the instruction that describes the task:
### Input:
Create source even stream
Implements the "CreateSourceEventStream" algorithm described in the GraphQL
specification, resolving the subscription source event stream.
Returns a coroutine that yields an AsyncIterable.
If the client provided invalid arguments, the source stream could not be created,
or the resolver did not return an AsyncIterable, this function will throw an error,
which should be caught and handled by the caller.
A Source Event Stream represents a sequence of events, each of which triggers a
GraphQL execution for that event.
This may be useful when hosting the stateful subscription service in a different
process or machine than the stateless GraphQL execution engine, or otherwise
separating these two steps. For more on this, see the "Supporting Subscriptions
at Scale" information in the GraphQL spec.
### Response:
async def create_source_event_stream(
schema: GraphQLSchema,
document: DocumentNode,
root_value: Any = None,
context_value: Any = None,
variable_values: Dict[str, Any] = None,
operation_name: str = None,
field_resolver: GraphQLFieldResolver = None,
) -> Union[AsyncIterable[Any], ExecutionResult]:
"""Create source even stream
Implements the "CreateSourceEventStream" algorithm described in the GraphQL
specification, resolving the subscription source event stream.
Returns a coroutine that yields an AsyncIterable.
If the client provided invalid arguments, the source stream could not be created,
or the resolver did not return an AsyncIterable, this function will throw an error,
which should be caught and handled by the caller.
A Source Event Stream represents a sequence of events, each of which triggers a
GraphQL execution for that event.
This may be useful when hosting the stateful subscription service in a different
process or machine than the stateless GraphQL execution engine, or otherwise
separating these two steps. For more on this, see the "Supporting Subscriptions
at Scale" information in the GraphQL spec.
"""
# If arguments are missing or incorrectly typed, this is an internal developer
# mistake which should throw an early error.
assert_valid_execution_arguments(schema, document, variable_values)
# If a valid context cannot be created due to incorrect arguments, this will throw
# an error.
context = ExecutionContext.build(
schema,
document,
root_value,
context_value,
variable_values,
operation_name,
field_resolver,
)
# Return early errors if execution context failed.
if isinstance(context, list):
return ExecutionResult(data=None, errors=context)
type_ = get_operation_root_type(schema, context.operation)
fields = context.collect_fields(type_, context.operation.selection_set, {}, set())
response_names = list(fields)
response_name = response_names[0]
field_nodes = fields[response_name]
field_node = field_nodes[0]
field_name = field_node.name.value
field_def = get_field_def(schema, type_, field_name)
if not field_def:
raise GraphQLError(
f"The subscription field '{field_name}' is not defined.", field_nodes
)
# Call the `subscribe()` resolver or the default resolver to produce an
# AsyncIterable yielding raw payloads.
resolve_fn = field_def.subscribe or context.field_resolver
resolve_fn = cast(GraphQLFieldResolver, resolve_fn) # help mypy
path = add_path(None, response_name)
info = context.build_resolve_info(field_def, field_nodes, type_, path)
# `resolve_field_value_or_error` implements the "ResolveFieldEventStream" algorithm
# from GraphQL specification. It differs from `resolve_field_value` due to
# providing a different `resolve_fn`.
result = context.resolve_field_value_or_error(
field_def, field_nodes, resolve_fn, root_value, info
)
event_stream = await cast(Awaitable, result) if isawaitable(result) else result
# If `event_stream` is an Error, rethrow a located error.
if isinstance(event_stream, Exception):
raise located_error(event_stream, field_nodes, response_path_as_list(path))
# Assert field returned an event stream, otherwise yield an error.
if isinstance(event_stream, AsyncIterable):
return cast(AsyncIterable, event_stream)
raise TypeError(
f"Subscription field must return AsyncIterable. Received: {event_stream!r}"
) |
def format_number(
x, use_rounding=True, is_population=False, coefficient=1):
"""Format a number according to the standards.
:param x: A number to be formatted in a locale friendly way.
:type x: int
:param use_rounding: Flag to enable a rounding.
:type use_rounding: bool
:param is_population: Flag if the number is population. It needs to be
used with enable_rounding.
:type is_population: bool
:param coefficient: Divide the result after the rounding.
:type coefficient:float
:returns: A locale friendly formatted string e.g. 1,000,0000.00
representing the original x. If a ValueError exception occurs,
x is simply returned.
:rtype: basestring
"""
if use_rounding:
x = rounding(x, is_population)
x //= coefficient
number = add_separators(x)
return number | Format a number according to the standards.
:param x: A number to be formatted in a locale friendly way.
:type x: int
:param use_rounding: Flag to enable a rounding.
:type use_rounding: bool
:param is_population: Flag if the number is population. It needs to be
used with enable_rounding.
:type is_population: bool
:param coefficient: Divide the result after the rounding.
:type coefficient:float
:returns: A locale friendly formatted string e.g. 1,000,0000.00
representing the original x. If a ValueError exception occurs,
x is simply returned.
:rtype: basestring | Below is the the instruction that describes the task:
### Input:
Format a number according to the standards.
:param x: A number to be formatted in a locale friendly way.
:type x: int
:param use_rounding: Flag to enable a rounding.
:type use_rounding: bool
:param is_population: Flag if the number is population. It needs to be
used with enable_rounding.
:type is_population: bool
:param coefficient: Divide the result after the rounding.
:type coefficient:float
:returns: A locale friendly formatted string e.g. 1,000,0000.00
representing the original x. If a ValueError exception occurs,
x is simply returned.
:rtype: basestring
### Response:
def format_number(
x, use_rounding=True, is_population=False, coefficient=1):
"""Format a number according to the standards.
:param x: A number to be formatted in a locale friendly way.
:type x: int
:param use_rounding: Flag to enable a rounding.
:type use_rounding: bool
:param is_population: Flag if the number is population. It needs to be
used with enable_rounding.
:type is_population: bool
:param coefficient: Divide the result after the rounding.
:type coefficient:float
:returns: A locale friendly formatted string e.g. 1,000,0000.00
representing the original x. If a ValueError exception occurs,
x is simply returned.
:rtype: basestring
"""
if use_rounding:
x = rounding(x, is_population)
x //= coefficient
number = add_separators(x)
return number |
def read_filterbank(self, filename=None, f_start=None, f_stop=None,
t_start=None, t_stop=None, load_data=True):
""" Populate Filterbank instance with data from Filterbank file
Note:
This is to be deprecated in future, please use Waterfall() to open files.
"""
if filename is None:
filename = self.filename
else:
self.filename = filename
self.header = read_header(filename)
#convert input frequencies into what their corresponding index would be
i_start, i_stop, chan_start_idx, chan_stop_idx = self._setup_freqs(f_start=f_start, f_stop=f_stop)
n_bits = self.header[b'nbits']
n_bytes = int(self.header[b'nbits'] / 8)
n_chans = self.header[b'nchans']
n_chans_selected = self.freqs.shape[0]
n_ifs = self.header[b'nifs']
# Load binary data
self.idx_data = len_header(filename)
f = open(filename, 'rb')
f.seek(self.idx_data)
filesize = os.path.getsize(self.filename)
n_bytes_data = filesize - self.idx_data
# Finally add some other info to the class as objects
self.n_ints_in_file = calc_n_ints_in_file(self.filename)
self.file_size_bytes = filesize
## Setup time axis
ii_start, ii_stop, n_ints = self._setup_time_axis(t_start=t_start, t_stop=t_stop)
# Seek to first integration
f.seek(int(ii_start * n_bits * n_ifs * n_chans / 8), 1)
# Set up indexes used in file read (taken out of loop for speed)
i0 = np.min((chan_start_idx, chan_stop_idx))
i1 = np.max((chan_start_idx, chan_stop_idx))
#Set up the data type (taken out of loop for speed)
if n_bits == 2:
dd_type = b'uint8'
n_chans_selected = int(n_chans_selected/4)
elif n_bytes == 4:
dd_type = b'float32'
elif n_bytes == 2:
dd_type = b'uint16'
elif n_bytes == 1:
dd_type = b'uint8'
if load_data:
if n_ints * n_ifs * n_chans_selected > MAX_DATA_ARRAY_SIZE:
print("[Filterbank] Error: data array is too large to load. Either select fewer points or manually increase MAX_DATA_ARRAY_SIZE. Large files are now handle with Waterfall .")
sys.exit()
if n_bits == 2:
self.data = np.zeros((n_ints, n_ifs, n_chans_selected*4), dtype=dd_type)
else:
self.data = np.zeros((n_ints, n_ifs, n_chans_selected), dtype=dd_type)
for ii in range(n_ints):
"""d = f.read(n_bytes * n_chans * n_ifs)
"""
for jj in range(n_ifs):
f.seek(n_bytes * i0, 1) # 1 = from current location
#d = f.read(n_bytes * n_chans_selected)
#bytes_to_read = n_bytes * n_chans_selected
dd = np.fromfile(f, count=n_chans_selected, dtype=dd_type)
# Reverse array if frequency axis is flipped
# if f_delt < 0:
# dd = dd[::-1]
if n_bits == 2:
dd = unpack_2to8(dd)
self.data[ii, jj] = dd
f.seek(n_bytes * (n_chans - i1), 1) # Seek to start of next block
else:
print("Skipping data load...")
self.data = np.array([0], dtype=dd_type) | Populate Filterbank instance with data from Filterbank file
Note:
This is to be deprecated in future, please use Waterfall() to open files. | Below is the the instruction that describes the task:
### Input:
Populate Filterbank instance with data from Filterbank file
Note:
This is to be deprecated in future, please use Waterfall() to open files.
### Response:
def read_filterbank(self, filename=None, f_start=None, f_stop=None,
t_start=None, t_stop=None, load_data=True):
""" Populate Filterbank instance with data from Filterbank file
Note:
This is to be deprecated in future, please use Waterfall() to open files.
"""
if filename is None:
filename = self.filename
else:
self.filename = filename
self.header = read_header(filename)
#convert input frequencies into what their corresponding index would be
i_start, i_stop, chan_start_idx, chan_stop_idx = self._setup_freqs(f_start=f_start, f_stop=f_stop)
n_bits = self.header[b'nbits']
n_bytes = int(self.header[b'nbits'] / 8)
n_chans = self.header[b'nchans']
n_chans_selected = self.freqs.shape[0]
n_ifs = self.header[b'nifs']
# Load binary data
self.idx_data = len_header(filename)
f = open(filename, 'rb')
f.seek(self.idx_data)
filesize = os.path.getsize(self.filename)
n_bytes_data = filesize - self.idx_data
# Finally add some other info to the class as objects
self.n_ints_in_file = calc_n_ints_in_file(self.filename)
self.file_size_bytes = filesize
## Setup time axis
ii_start, ii_stop, n_ints = self._setup_time_axis(t_start=t_start, t_stop=t_stop)
# Seek to first integration
f.seek(int(ii_start * n_bits * n_ifs * n_chans / 8), 1)
# Set up indexes used in file read (taken out of loop for speed)
i0 = np.min((chan_start_idx, chan_stop_idx))
i1 = np.max((chan_start_idx, chan_stop_idx))
#Set up the data type (taken out of loop for speed)
if n_bits == 2:
dd_type = b'uint8'
n_chans_selected = int(n_chans_selected/4)
elif n_bytes == 4:
dd_type = b'float32'
elif n_bytes == 2:
dd_type = b'uint16'
elif n_bytes == 1:
dd_type = b'uint8'
if load_data:
if n_ints * n_ifs * n_chans_selected > MAX_DATA_ARRAY_SIZE:
print("[Filterbank] Error: data array is too large to load. Either select fewer points or manually increase MAX_DATA_ARRAY_SIZE. Large files are now handle with Waterfall .")
sys.exit()
if n_bits == 2:
self.data = np.zeros((n_ints, n_ifs, n_chans_selected*4), dtype=dd_type)
else:
self.data = np.zeros((n_ints, n_ifs, n_chans_selected), dtype=dd_type)
for ii in range(n_ints):
"""d = f.read(n_bytes * n_chans * n_ifs)
"""
for jj in range(n_ifs):
f.seek(n_bytes * i0, 1) # 1 = from current location
#d = f.read(n_bytes * n_chans_selected)
#bytes_to_read = n_bytes * n_chans_selected
dd = np.fromfile(f, count=n_chans_selected, dtype=dd_type)
# Reverse array if frequency axis is flipped
# if f_delt < 0:
# dd = dd[::-1]
if n_bits == 2:
dd = unpack_2to8(dd)
self.data[ii, jj] = dd
f.seek(n_bytes * (n_chans - i1), 1) # Seek to start of next block
else:
print("Skipping data load...")
self.data = np.array([0], dtype=dd_type) |
def cnst_A0(self, X, Xf=None):
r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A_0 \mathbf{x} = (\Gamma_0^T \;\;
\Gamma_1^T \;\; \ldots )^T \mathbf{x}`.
"""
if Xf is None:
Xf = sl.rfftn(X, axes=self.cri.axisN)
return self.Wtv[..., np.newaxis] * sl.irfftn(
self.Gf * Xf[..., np.newaxis], self.cri.Nv, axes=self.cri.axisN) | r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A_0 \mathbf{x} = (\Gamma_0^T \;\;
\Gamma_1^T \;\; \ldots )^T \mathbf{x}`. | Below is the the instruction that describes the task:
### Input:
r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A_0 \mathbf{x} = (\Gamma_0^T \;\;
\Gamma_1^T \;\; \ldots )^T \mathbf{x}`.
### Response:
def cnst_A0(self, X, Xf=None):
r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A_0 \mathbf{x} = (\Gamma_0^T \;\;
\Gamma_1^T \;\; \ldots )^T \mathbf{x}`.
"""
if Xf is None:
Xf = sl.rfftn(X, axes=self.cri.axisN)
return self.Wtv[..., np.newaxis] * sl.irfftn(
self.Gf * Xf[..., np.newaxis], self.cri.Nv, axes=self.cri.axisN) |
async def download(self, source, destination="", *, write_into=False,
block_size=DEFAULT_BLOCK_SIZE):
"""
:py:func:`asyncio.coroutine`
High level download method for downloading files and directories
recursively and save them to the file system.
:param source: source path of file or directory on server side
:type source: :py:class:`str` or :py:class:`pathlib.PurePosixPath`
:param destination: destination path of file or directory on client
side
:type destination: :py:class:`str` or :py:class:`pathlib.Path`
:param write_into: write source into destination (if you want download
file and change it name, as well with directories)
:type write_into: :py:class:`bool`
:param block_size: block size for transaction
:type block_size: :py:class:`int`
"""
source = pathlib.PurePosixPath(source)
destination = pathlib.Path(destination)
if not write_into:
destination = destination / source.name
if await self.is_file(source):
await self.path_io.mkdir(destination.parent,
parents=True, exist_ok=True)
async with self.path_io.open(destination, mode="wb") as file_out, \
self.download_stream(source) as stream:
async for block in stream.iter_by_block(block_size):
await file_out.write(block)
elif await self.is_dir(source):
await self.path_io.mkdir(destination, parents=True, exist_ok=True)
for name, info in (await self.list(source)):
full = destination / name.relative_to(source)
if info["type"] in ("file", "dir"):
await self.download(name, full, write_into=True,
block_size=block_size) | :py:func:`asyncio.coroutine`
High level download method for downloading files and directories
recursively and save them to the file system.
:param source: source path of file or directory on server side
:type source: :py:class:`str` or :py:class:`pathlib.PurePosixPath`
:param destination: destination path of file or directory on client
side
:type destination: :py:class:`str` or :py:class:`pathlib.Path`
:param write_into: write source into destination (if you want download
file and change it name, as well with directories)
:type write_into: :py:class:`bool`
:param block_size: block size for transaction
:type block_size: :py:class:`int` | Below is the the instruction that describes the task:
### Input:
:py:func:`asyncio.coroutine`
High level download method for downloading files and directories
recursively and save them to the file system.
:param source: source path of file or directory on server side
:type source: :py:class:`str` or :py:class:`pathlib.PurePosixPath`
:param destination: destination path of file or directory on client
side
:type destination: :py:class:`str` or :py:class:`pathlib.Path`
:param write_into: write source into destination (if you want download
file and change it name, as well with directories)
:type write_into: :py:class:`bool`
:param block_size: block size for transaction
:type block_size: :py:class:`int`
### Response:
async def download(self, source, destination="", *, write_into=False,
block_size=DEFAULT_BLOCK_SIZE):
"""
:py:func:`asyncio.coroutine`
High level download method for downloading files and directories
recursively and save them to the file system.
:param source: source path of file or directory on server side
:type source: :py:class:`str` or :py:class:`pathlib.PurePosixPath`
:param destination: destination path of file or directory on client
side
:type destination: :py:class:`str` or :py:class:`pathlib.Path`
:param write_into: write source into destination (if you want download
file and change it name, as well with directories)
:type write_into: :py:class:`bool`
:param block_size: block size for transaction
:type block_size: :py:class:`int`
"""
source = pathlib.PurePosixPath(source)
destination = pathlib.Path(destination)
if not write_into:
destination = destination / source.name
if await self.is_file(source):
await self.path_io.mkdir(destination.parent,
parents=True, exist_ok=True)
async with self.path_io.open(destination, mode="wb") as file_out, \
self.download_stream(source) as stream:
async for block in stream.iter_by_block(block_size):
await file_out.write(block)
elif await self.is_dir(source):
await self.path_io.mkdir(destination, parents=True, exist_ok=True)
for name, info in (await self.list(source)):
full = destination / name.relative_to(source)
if info["type"] in ("file", "dir"):
await self.download(name, full, write_into=True,
block_size=block_size) |
def yosys_area_delay(library, abc_cmd=None, block=None):
""" Synthesize with Yosys and return estimate of area and delay.
:param library: stdcell library file to target in liberty format
:param abc_cmd: string of commands for yosys to pass to abc for synthesis
:param block: pyrtl block to analyze
:return: a tuple of numbers: area, delay
The area and delay are returned in units as defined by the stdcell
library. In the standard vsc 130nm library, the area is in a number of
"tracks", each of which is about 1.74 square um (see area estimation
for more details) and the delay is in ps.
http://www.vlsitechnology.org/html/vsc_description.html
May raise `PyrtlError` if yosys is not configured correctly, and
`PyrtlInternalError` if the call to yosys was not able successfully
"""
if abc_cmd is None:
abc_cmd = 'strash;scorr;ifraig;retime;dch,-f;map;print_stats;'
else:
# first, replace whitespace with commas as per yosys requirements
re.sub(r"\s+", ',', abc_cmd)
# then append with "print_stats" to generate the area and delay info
abc_cmd = '%s;print_stats;' % abc_cmd
def extract_area_delay_from_yosys_output(yosys_output):
report_lines = [line for line in yosys_output.split('\n') if 'ABC: netlist' in line]
area = re.match('.*area\s*=\s*([0-9\.]*)', report_lines[0]).group(1)
delay = re.match('.*delay\s*=\s*([0-9\.]*)', report_lines[0]).group(1)
return float(area), float(delay)
yosys_arg_template = """-p
read_verilog %s;
synth -top toplevel;
dfflibmap -liberty %s;
abc -liberty %s -script +%s
"""
temp_d, temp_path = tempfile.mkstemp(suffix='.v')
try:
# write the verilog to a temp
with os.fdopen(temp_d, 'w') as f:
OutputToVerilog(f, block=block)
# call yosys on the temp, and grab the output
yosys_arg = yosys_arg_template % (temp_path, library, library, abc_cmd)
yosys_output = subprocess.check_output(['yosys', yosys_arg])
area, delay = extract_area_delay_from_yosys_output(yosys_output)
except (subprocess.CalledProcessError, ValueError) as e:
print('Error with call to yosys...', file=sys.stderr)
print('---------------------------------------------', file=sys.stderr)
print(e.output, file=sys.stderr)
print('---------------------------------------------', file=sys.stderr)
raise PyrtlError('Yosys callfailed')
except OSError as e:
print('Error with call to yosys...', file=sys.stderr)
raise PyrtlError('Call to yosys failed (not installed or on path?)')
finally:
os.remove(temp_path)
return area, delay | Synthesize with Yosys and return estimate of area and delay.
:param library: stdcell library file to target in liberty format
:param abc_cmd: string of commands for yosys to pass to abc for synthesis
:param block: pyrtl block to analyze
:return: a tuple of numbers: area, delay
The area and delay are returned in units as defined by the stdcell
library. In the standard vsc 130nm library, the area is in a number of
"tracks", each of which is about 1.74 square um (see area estimation
for more details) and the delay is in ps.
http://www.vlsitechnology.org/html/vsc_description.html
May raise `PyrtlError` if yosys is not configured correctly, and
`PyrtlInternalError` if the call to yosys was not able successfully | Below is the the instruction that describes the task:
### Input:
Synthesize with Yosys and return estimate of area and delay.
:param library: stdcell library file to target in liberty format
:param abc_cmd: string of commands for yosys to pass to abc for synthesis
:param block: pyrtl block to analyze
:return: a tuple of numbers: area, delay
The area and delay are returned in units as defined by the stdcell
library. In the standard vsc 130nm library, the area is in a number of
"tracks", each of which is about 1.74 square um (see area estimation
for more details) and the delay is in ps.
http://www.vlsitechnology.org/html/vsc_description.html
May raise `PyrtlError` if yosys is not configured correctly, and
`PyrtlInternalError` if the call to yosys was not able successfully
### Response:
def yosys_area_delay(library, abc_cmd=None, block=None):
""" Synthesize with Yosys and return estimate of area and delay.
:param library: stdcell library file to target in liberty format
:param abc_cmd: string of commands for yosys to pass to abc for synthesis
:param block: pyrtl block to analyze
:return: a tuple of numbers: area, delay
The area and delay are returned in units as defined by the stdcell
library. In the standard vsc 130nm library, the area is in a number of
"tracks", each of which is about 1.74 square um (see area estimation
for more details) and the delay is in ps.
http://www.vlsitechnology.org/html/vsc_description.html
May raise `PyrtlError` if yosys is not configured correctly, and
`PyrtlInternalError` if the call to yosys was not able successfully
"""
if abc_cmd is None:
abc_cmd = 'strash;scorr;ifraig;retime;dch,-f;map;print_stats;'
else:
# first, replace whitespace with commas as per yosys requirements
re.sub(r"\s+", ',', abc_cmd)
# then append with "print_stats" to generate the area and delay info
abc_cmd = '%s;print_stats;' % abc_cmd
def extract_area_delay_from_yosys_output(yosys_output):
report_lines = [line for line in yosys_output.split('\n') if 'ABC: netlist' in line]
area = re.match('.*area\s*=\s*([0-9\.]*)', report_lines[0]).group(1)
delay = re.match('.*delay\s*=\s*([0-9\.]*)', report_lines[0]).group(1)
return float(area), float(delay)
yosys_arg_template = """-p
read_verilog %s;
synth -top toplevel;
dfflibmap -liberty %s;
abc -liberty %s -script +%s
"""
temp_d, temp_path = tempfile.mkstemp(suffix='.v')
try:
# write the verilog to a temp
with os.fdopen(temp_d, 'w') as f:
OutputToVerilog(f, block=block)
# call yosys on the temp, and grab the output
yosys_arg = yosys_arg_template % (temp_path, library, library, abc_cmd)
yosys_output = subprocess.check_output(['yosys', yosys_arg])
area, delay = extract_area_delay_from_yosys_output(yosys_output)
except (subprocess.CalledProcessError, ValueError) as e:
print('Error with call to yosys...', file=sys.stderr)
print('---------------------------------------------', file=sys.stderr)
print(e.output, file=sys.stderr)
print('---------------------------------------------', file=sys.stderr)
raise PyrtlError('Yosys callfailed')
except OSError as e:
print('Error with call to yosys...', file=sys.stderr)
raise PyrtlError('Call to yosys failed (not installed or on path?)')
finally:
os.remove(temp_path)
return area, delay |
def run(self, args):
"""
Pass in raw arguments, instantiate Slack API and begin client.
"""
args = self.parser.parse_args(args)
if not args.token:
raise ValueError('Supply the slack token through --token or setting DJANGOBOT_TOKEN')
# Import the channel layer
sys.path.insert(0, ".")
module_path, object_path = args.channel_layer.split(':', 1)
channel_layer = importlib.import_module(module_path)
for part in object_path.split('.'):
channel_layer = getattr(channel_layer, part)
# Boot up the client
Client(
channel_layer=channel_layer,
token=args.token,
).run() | Pass in raw arguments, instantiate Slack API and begin client. | Below is the the instruction that describes the task:
### Input:
Pass in raw arguments, instantiate Slack API and begin client.
### Response:
def run(self, args):
"""
Pass in raw arguments, instantiate Slack API and begin client.
"""
args = self.parser.parse_args(args)
if not args.token:
raise ValueError('Supply the slack token through --token or setting DJANGOBOT_TOKEN')
# Import the channel layer
sys.path.insert(0, ".")
module_path, object_path = args.channel_layer.split(':', 1)
channel_layer = importlib.import_module(module_path)
for part in object_path.split('.'):
channel_layer = getattr(channel_layer, part)
# Boot up the client
Client(
channel_layer=channel_layer,
token=args.token,
).run() |
def minutes_from_utc(self):
"""
The timezone offset of this point in time object as +/- minutes from
UTC.
A positive value of the timezone offset indicates minutes east of UTC,
and a negative value indicates minutes west of UTC.
0, if this object represents a time interval.
"""
offset = 0
if self.__datetime is not None and \
self.__datetime.utcoffset() is not None:
offset = self.__datetime.utcoffset().seconds / 60
if self.__datetime.utcoffset().days == -1:
offset = -((60 * 24) - offset)
return int(offset) | The timezone offset of this point in time object as +/- minutes from
UTC.
A positive value of the timezone offset indicates minutes east of UTC,
and a negative value indicates minutes west of UTC.
0, if this object represents a time interval. | Below is the the instruction that describes the task:
### Input:
The timezone offset of this point in time object as +/- minutes from
UTC.
A positive value of the timezone offset indicates minutes east of UTC,
and a negative value indicates minutes west of UTC.
0, if this object represents a time interval.
### Response:
def minutes_from_utc(self):
"""
The timezone offset of this point in time object as +/- minutes from
UTC.
A positive value of the timezone offset indicates minutes east of UTC,
and a negative value indicates minutes west of UTC.
0, if this object represents a time interval.
"""
offset = 0
if self.__datetime is not None and \
self.__datetime.utcoffset() is not None:
offset = self.__datetime.utcoffset().seconds / 60
if self.__datetime.utcoffset().days == -1:
offset = -((60 * 24) - offset)
return int(offset) |
def reset(self):
"""Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
"""
with self._cond:
if self._count > 0:
if self._state == 0:
#reset the barrier, waking up threads
self._state = -1
elif self._state == -2:
#was broken, set it to reset state
#which clears when the last thread exits
self._state = -1
else:
self._state = 0
self._cond.notify_all() | Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised. | Below is the the instruction that describes the task:
### Input:
Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
### Response:
def reset(self):
"""Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
"""
with self._cond:
if self._count > 0:
if self._state == 0:
#reset the barrier, waking up threads
self._state = -1
elif self._state == -2:
#was broken, set it to reset state
#which clears when the last thread exits
self._state = -1
else:
self._state = 0
self._cond.notify_all() |
def _get_per_data_dir(self, dir_base, subpath):
"""Extend the given base directory with a per-data component.
The method creates a private path for the
:class:`~resolwe.flow.models.Data` object, such as::
./test_data/1/
if ``base_dir`` is ``'./test_data'`` and ``subpath`` is ``1``.
:param dir_base: The base path to be extended. This will usually
be one of the directories configured in the
``FLOW_EXECUTOR`` setting.
:param subpath: Objects's subpath used for the extending.
:return: The new path for the :class:`~resolwe.flow.models.Data`
object.
:rtype: str
"""
# Use Django settings here, because the state must be preserved
# across events. This also implies the directory settings can't
# be patched outside the manager and then just sent along in the
# command packets.
result = self.settings_actual.get('FLOW_EXECUTOR', {}).get(dir_base, '')
return os.path.join(result, subpath) | Extend the given base directory with a per-data component.
The method creates a private path for the
:class:`~resolwe.flow.models.Data` object, such as::
./test_data/1/
if ``base_dir`` is ``'./test_data'`` and ``subpath`` is ``1``.
:param dir_base: The base path to be extended. This will usually
be one of the directories configured in the
``FLOW_EXECUTOR`` setting.
:param subpath: Objects's subpath used for the extending.
:return: The new path for the :class:`~resolwe.flow.models.Data`
object.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Extend the given base directory with a per-data component.
The method creates a private path for the
:class:`~resolwe.flow.models.Data` object, such as::
./test_data/1/
if ``base_dir`` is ``'./test_data'`` and ``subpath`` is ``1``.
:param dir_base: The base path to be extended. This will usually
be one of the directories configured in the
``FLOW_EXECUTOR`` setting.
:param subpath: Objects's subpath used for the extending.
:return: The new path for the :class:`~resolwe.flow.models.Data`
object.
:rtype: str
### Response:
def _get_per_data_dir(self, dir_base, subpath):
"""Extend the given base directory with a per-data component.
The method creates a private path for the
:class:`~resolwe.flow.models.Data` object, such as::
./test_data/1/
if ``base_dir`` is ``'./test_data'`` and ``subpath`` is ``1``.
:param dir_base: The base path to be extended. This will usually
be one of the directories configured in the
``FLOW_EXECUTOR`` setting.
:param subpath: Objects's subpath used for the extending.
:return: The new path for the :class:`~resolwe.flow.models.Data`
object.
:rtype: str
"""
# Use Django settings here, because the state must be preserved
# across events. This also implies the directory settings can't
# be patched outside the manager and then just sent along in the
# command packets.
result = self.settings_actual.get('FLOW_EXECUTOR', {}).get(dir_base, '')
return os.path.join(result, subpath) |
def filter_samples(names, seqs, sep='.'):
"""
When there are uncollapsed contigs within the same sample, only retain the
first seq, or the seq that is most abundant (with cluster_size).
"""
seen = set()
filtered_names, filtered_seqs = [], []
for name, seq in zip(names, seqs):
samp = name.split(sep, 1)[0]
if samp in seen:
continue
seen.add(samp)
filtered_names.append(name)
filtered_seqs.append(seq)
nfiltered, nnames = len(filtered_names), len(names)
assert nfiltered == len(seen)
return filtered_names, filtered_seqs, seen | When there are uncollapsed contigs within the same sample, only retain the
first seq, or the seq that is most abundant (with cluster_size). | Below is the the instruction that describes the task:
### Input:
When there are uncollapsed contigs within the same sample, only retain the
first seq, or the seq that is most abundant (with cluster_size).
### Response:
def filter_samples(names, seqs, sep='.'):
"""
When there are uncollapsed contigs within the same sample, only retain the
first seq, or the seq that is most abundant (with cluster_size).
"""
seen = set()
filtered_names, filtered_seqs = [], []
for name, seq in zip(names, seqs):
samp = name.split(sep, 1)[0]
if samp in seen:
continue
seen.add(samp)
filtered_names.append(name)
filtered_seqs.append(seq)
nfiltered, nnames = len(filtered_names), len(names)
assert nfiltered == len(seen)
return filtered_names, filtered_seqs, seen |
def _call(self, x, out=None):
"""Extend ``x`` from the subspace."""
if out is None:
out = self.range.zero()
else:
out.set_zero()
out[self.index] = x
return out | Extend ``x`` from the subspace. | Below is the the instruction that describes the task:
### Input:
Extend ``x`` from the subspace.
### Response:
def _call(self, x, out=None):
"""Extend ``x`` from the subspace."""
if out is None:
out = self.range.zero()
else:
out.set_zero()
out[self.index] = x
return out |
def missing_keyword_message(sender, missing_keyword_exception):
"""Display an error when there is missing keyword.
:param sender: The sender.
:type sender: object
:param missing_keyword_exception: A KeywordNotFoundError exception.
:type missing_keyword_exception: KeywordNotFoundError
"""
warning_heading = m.Heading(
tr('Missing Keyword'), **WARNING_STYLE)
warning_message = tr(
'There is missing keyword that needed for this analysis.')
detail_heading = m.Heading(
tr('Detail'), **DETAILS_STYLE)
suggestion_heading = m.Heading(
tr('Suggestion'), **DETAILS_STYLE)
detail = tr(
'The layer <b>%s</b> is missing the keyword <i>%s</i>.' % (
missing_keyword_exception.layer_name,
missing_keyword_exception.keyword
)
)
suggestion = m.Paragraph(
tr('Please use the keyword wizard to update the keywords. You '
'can open the wizard by clicking on the '),
m.Image(
'file:///%s/img/icons/'
'show-keyword-wizard.svg' % resources_path(),
**SMALL_ICON_STYLE),
tr(' icon in the toolbar.')
)
message = m.Message()
message.add(warning_heading)
message.add(warning_message)
message.add(detail_heading)
message.add(detail)
message.add(suggestion_heading)
message.add(suggestion)
send_static_message(sender, message) | Display an error when there is missing keyword.
:param sender: The sender.
:type sender: object
:param missing_keyword_exception: A KeywordNotFoundError exception.
:type missing_keyword_exception: KeywordNotFoundError | Below is the the instruction that describes the task:
### Input:
Display an error when there is missing keyword.
:param sender: The sender.
:type sender: object
:param missing_keyword_exception: A KeywordNotFoundError exception.
:type missing_keyword_exception: KeywordNotFoundError
### Response:
def missing_keyword_message(sender, missing_keyword_exception):
"""Display an error when there is missing keyword.
:param sender: The sender.
:type sender: object
:param missing_keyword_exception: A KeywordNotFoundError exception.
:type missing_keyword_exception: KeywordNotFoundError
"""
warning_heading = m.Heading(
tr('Missing Keyword'), **WARNING_STYLE)
warning_message = tr(
'There is missing keyword that needed for this analysis.')
detail_heading = m.Heading(
tr('Detail'), **DETAILS_STYLE)
suggestion_heading = m.Heading(
tr('Suggestion'), **DETAILS_STYLE)
detail = tr(
'The layer <b>%s</b> is missing the keyword <i>%s</i>.' % (
missing_keyword_exception.layer_name,
missing_keyword_exception.keyword
)
)
suggestion = m.Paragraph(
tr('Please use the keyword wizard to update the keywords. You '
'can open the wizard by clicking on the '),
m.Image(
'file:///%s/img/icons/'
'show-keyword-wizard.svg' % resources_path(),
**SMALL_ICON_STYLE),
tr(' icon in the toolbar.')
)
message = m.Message()
message.add(warning_heading)
message.add(warning_message)
message.add(detail_heading)
message.add(detail)
message.add(suggestion_heading)
message.add(suggestion)
send_static_message(sender, message) |
def query(self, domain):
"""The actual query happens here. Time from queries is replaced with isoformat.
:param domain: The domain which should gets queried.
:type domain: str
:returns: List of dicts containing the search results.
:rtype: [list, dict]
"""
result = {}
try:
result = self.pdns.query(domain)
except:
self.error('Exception while querying passiveDNS. Check the domain format.')
# Clean the datetime problems in order to correct the json serializability
clean_result = []
for ind, resultset in enumerate(result):
if resultset.get('time_first', None):
resultset['time_first'] = resultset.get('time_first').isoformat(' ')
if resultset.get('time_last', None):
resultset['time_last'] = resultset.get('time_last').isoformat(' ')
clean_result.append(resultset)
return clean_result | The actual query happens here. Time from queries is replaced with isoformat.
:param domain: The domain which should gets queried.
:type domain: str
:returns: List of dicts containing the search results.
:rtype: [list, dict] | Below is the the instruction that describes the task:
### Input:
The actual query happens here. Time from queries is replaced with isoformat.
:param domain: The domain which should gets queried.
:type domain: str
:returns: List of dicts containing the search results.
:rtype: [list, dict]
### Response:
def query(self, domain):
"""The actual query happens here. Time from queries is replaced with isoformat.
:param domain: The domain which should gets queried.
:type domain: str
:returns: List of dicts containing the search results.
:rtype: [list, dict]
"""
result = {}
try:
result = self.pdns.query(domain)
except:
self.error('Exception while querying passiveDNS. Check the domain format.')
# Clean the datetime problems in order to correct the json serializability
clean_result = []
for ind, resultset in enumerate(result):
if resultset.get('time_first', None):
resultset['time_first'] = resultset.get('time_first').isoformat(' ')
if resultset.get('time_last', None):
resultset['time_last'] = resultset.get('time_last').isoformat(' ')
clean_result.append(resultset)
return clean_result |
def _get_samtools0_path(self):
"""Retrieve PATH to the samtools version specific for eriscript.
"""
samtools_path = os.path.realpath(os.path.join(self._get_ericscript_path(),"..", "..", "bin"))
return samtools_path | Retrieve PATH to the samtools version specific for eriscript. | Below is the the instruction that describes the task:
### Input:
Retrieve PATH to the samtools version specific for eriscript.
### Response:
def _get_samtools0_path(self):
"""Retrieve PATH to the samtools version specific for eriscript.
"""
samtools_path = os.path.realpath(os.path.join(self._get_ericscript_path(),"..", "..", "bin"))
return samtools_path |
def get_system_uptime_output_show_system_uptime_minutes(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_system_uptime = ET.Element("get_system_uptime")
config = get_system_uptime
output = ET.SubElement(get_system_uptime, "output")
show_system_uptime = ET.SubElement(output, "show-system-uptime")
rbridge_id_key = ET.SubElement(show_system_uptime, "rbridge-id")
rbridge_id_key.text = kwargs.pop('rbridge_id')
minutes = ET.SubElement(show_system_uptime, "minutes")
minutes.text = kwargs.pop('minutes')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_system_uptime_output_show_system_uptime_minutes(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_system_uptime = ET.Element("get_system_uptime")
config = get_system_uptime
output = ET.SubElement(get_system_uptime, "output")
show_system_uptime = ET.SubElement(output, "show-system-uptime")
rbridge_id_key = ET.SubElement(show_system_uptime, "rbridge-id")
rbridge_id_key.text = kwargs.pop('rbridge_id')
minutes = ET.SubElement(show_system_uptime, "minutes")
minutes.text = kwargs.pop('minutes')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def DbUnExportEvent(self, argin):
""" Mark one event channel as non exported in database
:param argin: name of event channel or factory to unexport
:type: tango.DevString
:return: none
:rtype: tango.DevVoid """
self._log.debug("In DbUnExportEvent()")
event_name = argin[0].lower()
self.db.unexport_event(event_name) | Mark one event channel as non exported in database
:param argin: name of event channel or factory to unexport
:type: tango.DevString
:return: none
:rtype: tango.DevVoid | Below is the the instruction that describes the task:
### Input:
Mark one event channel as non exported in database
:param argin: name of event channel or factory to unexport
:type: tango.DevString
:return: none
:rtype: tango.DevVoid
### Response:
def DbUnExportEvent(self, argin):
""" Mark one event channel as non exported in database
:param argin: name of event channel or factory to unexport
:type: tango.DevString
:return: none
:rtype: tango.DevVoid """
self._log.debug("In DbUnExportEvent()")
event_name = argin[0].lower()
self.db.unexport_event(event_name) |
def put(self, url, data):
"""
Make a PUT request to save data.
data should be a dictionary.
"""
response = self._run_method('PUT', url, data=data)
log.debug("OUTPUT: %s" % response.content)
return self._handle_response(url, response) | Make a PUT request to save data.
data should be a dictionary. | Below is the the instruction that describes the task:
### Input:
Make a PUT request to save data.
data should be a dictionary.
### Response:
def put(self, url, data):
"""
Make a PUT request to save data.
data should be a dictionary.
"""
response = self._run_method('PUT', url, data=data)
log.debug("OUTPUT: %s" % response.content)
return self._handle_response(url, response) |
def select(cls, *args, **kwargs):
"""Support read slaves."""
query = super(Model, cls).select(*args, **kwargs)
query.database = cls._get_read_database()
return query | Support read slaves. | Below is the the instruction that describes the task:
### Input:
Support read slaves.
### Response:
def select(cls, *args, **kwargs):
"""Support read slaves."""
query = super(Model, cls).select(*args, **kwargs)
query.database = cls._get_read_database()
return query |
def add(self, values, times, ifos):
"""Add values to the internal buffer
Parameters
----------
values: numpy.ndarray
Array of elements to add to the internal buffer.
times: dict of arrays
The current time to use for each element being added.
ifos: list of strs
The set of timers to be incremented.
"""
for ifo in ifos:
self.time[ifo] += 1
# Resize the internal buffer if we need more space
if self.index + len(values) >= len(self.buffer):
newlen = len(self.buffer) * 2
for ifo in self.ifos:
self.timer[ifo].resize(newlen)
self.buffer.resize(newlen)
self.buffer[self.index:self.index+len(values)] = values
if len(values) > 0:
for ifo in self.ifos:
self.timer[ifo][self.index:self.index+len(values)] = times[ifo]
self.index += len(values)
# Remove the expired old elements
keep = None
for ifo in ifos:
kt = self.timer[ifo][:self.index] >= self.time[ifo] - self.expiration
keep = numpy.logical_and(keep, kt) if keep is not None else kt
self.buffer[:keep.sum()] = self.buffer[:self.index][keep]
for ifo in self.ifos:
self.timer[ifo][:keep.sum()] = self.timer[ifo][:self.index][keep]
self.index = keep.sum() | Add values to the internal buffer
Parameters
----------
values: numpy.ndarray
Array of elements to add to the internal buffer.
times: dict of arrays
The current time to use for each element being added.
ifos: list of strs
The set of timers to be incremented. | Below is the the instruction that describes the task:
### Input:
Add values to the internal buffer
Parameters
----------
values: numpy.ndarray
Array of elements to add to the internal buffer.
times: dict of arrays
The current time to use for each element being added.
ifos: list of strs
The set of timers to be incremented.
### Response:
def add(self, values, times, ifos):
"""Add values to the internal buffer
Parameters
----------
values: numpy.ndarray
Array of elements to add to the internal buffer.
times: dict of arrays
The current time to use for each element being added.
ifos: list of strs
The set of timers to be incremented.
"""
for ifo in ifos:
self.time[ifo] += 1
# Resize the internal buffer if we need more space
if self.index + len(values) >= len(self.buffer):
newlen = len(self.buffer) * 2
for ifo in self.ifos:
self.timer[ifo].resize(newlen)
self.buffer.resize(newlen)
self.buffer[self.index:self.index+len(values)] = values
if len(values) > 0:
for ifo in self.ifos:
self.timer[ifo][self.index:self.index+len(values)] = times[ifo]
self.index += len(values)
# Remove the expired old elements
keep = None
for ifo in ifos:
kt = self.timer[ifo][:self.index] >= self.time[ifo] - self.expiration
keep = numpy.logical_and(keep, kt) if keep is not None else kt
self.buffer[:keep.sum()] = self.buffer[:self.index][keep]
for ifo in self.ifos:
self.timer[ifo][:keep.sum()] = self.timer[ifo][:self.index][keep]
self.index = keep.sum() |
def _check_pkg_version_format(pkg):
'''
Takes a package name and version specification (if any) and checks it using
the pip library.
'''
ret = {'result': False, 'comment': None,
'prefix': None, 'version_spec': None}
if not HAS_PIP:
ret['comment'] = (
'An importable Python 2 pip module is required but could not be '
'found on your system. This usually means that the system\'s pip '
'package is not installed properly.'
)
return ret
from_vcs = False
try:
# Get the requirement object from the pip library
try:
# With pip < 1.2, the __version__ attribute does not exist and
# vcs+URL urls are not properly parsed.
# The next line is meant to trigger an AttributeError and
# handle lower pip versions
log.debug('Installed pip version: %s', pip.__version__)
install_req = _from_line(pkg)
except AttributeError:
log.debug('Installed pip version is lower than 1.2')
supported_vcs = ('git', 'svn', 'hg', 'bzr')
if pkg.startswith(supported_vcs):
for vcs in supported_vcs:
if pkg.startswith(vcs):
from_vcs = True
install_req = _from_line(
pkg.split('{0}+'.format(vcs))[-1]
)
break
else:
install_req = _from_line(pkg)
except (ValueError, InstallationError) as exc:
ret['result'] = False
if not from_vcs and '=' in pkg and '==' not in pkg:
ret['comment'] = (
'Invalid version specification in package {0}. \'=\' is '
'not supported, use \'==\' instead.'.format(pkg)
)
return ret
ret['comment'] = (
'pip raised an exception while parsing \'{0}\': {1}'.format(
pkg, exc
)
)
return ret
if install_req.req is None:
# This is most likely an url and there's no way to know what will
# be installed before actually installing it.
ret['result'] = True
ret['prefix'] = ''
ret['version_spec'] = []
else:
ret['result'] = True
try:
ret['prefix'] = install_req.req.project_name
ret['version_spec'] = install_req.req.specs
except Exception:
ret['prefix'] = re.sub('[^A-Za-z0-9.]+', '-', install_req.name)
if hasattr(install_req, "specifier"):
specifier = install_req.specifier
else:
specifier = install_req.req.specifier
ret['version_spec'] = [(spec.operator, spec.version) for spec in specifier]
return ret | Takes a package name and version specification (if any) and checks it using
the pip library. | Below is the the instruction that describes the task:
### Input:
Takes a package name and version specification (if any) and checks it using
the pip library.
### Response:
def _check_pkg_version_format(pkg):
'''
Takes a package name and version specification (if any) and checks it using
the pip library.
'''
ret = {'result': False, 'comment': None,
'prefix': None, 'version_spec': None}
if not HAS_PIP:
ret['comment'] = (
'An importable Python 2 pip module is required but could not be '
'found on your system. This usually means that the system\'s pip '
'package is not installed properly.'
)
return ret
from_vcs = False
try:
# Get the requirement object from the pip library
try:
# With pip < 1.2, the __version__ attribute does not exist and
# vcs+URL urls are not properly parsed.
# The next line is meant to trigger an AttributeError and
# handle lower pip versions
log.debug('Installed pip version: %s', pip.__version__)
install_req = _from_line(pkg)
except AttributeError:
log.debug('Installed pip version is lower than 1.2')
supported_vcs = ('git', 'svn', 'hg', 'bzr')
if pkg.startswith(supported_vcs):
for vcs in supported_vcs:
if pkg.startswith(vcs):
from_vcs = True
install_req = _from_line(
pkg.split('{0}+'.format(vcs))[-1]
)
break
else:
install_req = _from_line(pkg)
except (ValueError, InstallationError) as exc:
ret['result'] = False
if not from_vcs and '=' in pkg and '==' not in pkg:
ret['comment'] = (
'Invalid version specification in package {0}. \'=\' is '
'not supported, use \'==\' instead.'.format(pkg)
)
return ret
ret['comment'] = (
'pip raised an exception while parsing \'{0}\': {1}'.format(
pkg, exc
)
)
return ret
if install_req.req is None:
# This is most likely an url and there's no way to know what will
# be installed before actually installing it.
ret['result'] = True
ret['prefix'] = ''
ret['version_spec'] = []
else:
ret['result'] = True
try:
ret['prefix'] = install_req.req.project_name
ret['version_spec'] = install_req.req.specs
except Exception:
ret['prefix'] = re.sub('[^A-Za-z0-9.]+', '-', install_req.name)
if hasattr(install_req, "specifier"):
specifier = install_req.specifier
else:
specifier = install_req.req.specifier
ret['version_spec'] = [(spec.operator, spec.version) for spec in specifier]
return ret |
def dp_from_p(p, ps, p_top=0., p_bot=1.1e5):
"""Get level thickness of pressure data, incorporating surface pressure.
Level edges are defined as halfway between the levels, as well as the user-
specified uppermost and lowermost values. The dp of levels whose bottom
pressure is less than the surface pressure is not changed by ps, since they
don't intersect the surface. If ps is in between a level's top and bottom
pressures, then its dp becomes the pressure difference between its top and
ps. If ps is less than a level's top and bottom pressures, then that level
is underground and its values are masked.
Note that postprocessing routines (e.g. at GFDL) typically mask out data
wherever the surface pressure is less than the level's given value, not the
level's upper edge. This masks out more levels than the
"""
p_str = get_dim_name(p, (internal_names.PLEVEL_STR, 'plev'))
p_vals = to_pascal(p.values.copy())
# Layer edges are halfway between the given pressure levels.
p_edges_interior = 0.5*(p_vals[:-1] + p_vals[1:])
p_edges = np.concatenate(([p_bot], p_edges_interior, [p_top]))
p_edge_above = p_edges[1:]
p_edge_below = p_edges[:-1]
dp = p_edge_below - p_edge_above
if not all(np.sign(dp)):
raise ValueError("dp array not all > 0 : {}".format(dp))
# Pressure difference between ps and the upper edge of each pressure level.
p_edge_above_xr = xr.DataArray(p_edge_above, dims=p.dims, coords=p.coords)
dp_to_sfc = ps - p_edge_above_xr
# Find the level adjacent to the masked, under-ground levels.
change = xr.DataArray(np.zeros(dp_to_sfc.shape), dims=dp_to_sfc.dims,
coords=dp_to_sfc.coords)
change[{p_str: slice(1, None)}] = np.diff(
np.sign(ps - to_pascal(p.copy()))
)
dp_combined = xr.DataArray(np.where(change, dp_to_sfc, dp),
dims=dp_to_sfc.dims, coords=dp_to_sfc.coords)
# Mask levels that are under ground.
above_ground = ps > to_pascal(p.copy())
above_ground[p_str] = p[p_str]
dp_with_ps = dp_combined.where(above_ground)
# Revert to original dim order.
possible_dim_orders = [
(internal_names.TIME_STR, p_str, internal_names.LAT_STR,
internal_names.LON_STR),
(internal_names.TIME_STR, p_str, internal_names.LAT_STR),
(internal_names.TIME_STR, p_str, internal_names.LON_STR),
(internal_names.TIME_STR, p_str),
(p_str, internal_names.LAT_STR, internal_names.LON_STR),
(p_str, internal_names.LAT_STR),
(p_str, internal_names.LON_STR),
(p_str,),
]
for dim_order in possible_dim_orders:
try:
return dp_with_ps.transpose(*dim_order)
except ValueError:
logging.debug("Failed transpose to dims: {}".format(dim_order))
else:
logging.debug("No transpose was successful.")
return dp_with_ps | Get level thickness of pressure data, incorporating surface pressure.
Level edges are defined as halfway between the levels, as well as the user-
specified uppermost and lowermost values. The dp of levels whose bottom
pressure is less than the surface pressure is not changed by ps, since they
don't intersect the surface. If ps is in between a level's top and bottom
pressures, then its dp becomes the pressure difference between its top and
ps. If ps is less than a level's top and bottom pressures, then that level
is underground and its values are masked.
Note that postprocessing routines (e.g. at GFDL) typically mask out data
wherever the surface pressure is less than the level's given value, not the
level's upper edge. This masks out more levels than the | Below is the the instruction that describes the task:
### Input:
Get level thickness of pressure data, incorporating surface pressure.
Level edges are defined as halfway between the levels, as well as the user-
specified uppermost and lowermost values. The dp of levels whose bottom
pressure is less than the surface pressure is not changed by ps, since they
don't intersect the surface. If ps is in between a level's top and bottom
pressures, then its dp becomes the pressure difference between its top and
ps. If ps is less than a level's top and bottom pressures, then that level
is underground and its values are masked.
Note that postprocessing routines (e.g. at GFDL) typically mask out data
wherever the surface pressure is less than the level's given value, not the
level's upper edge. This masks out more levels than the
### Response:
def dp_from_p(p, ps, p_top=0., p_bot=1.1e5):
"""Get level thickness of pressure data, incorporating surface pressure.
Level edges are defined as halfway between the levels, as well as the user-
specified uppermost and lowermost values. The dp of levels whose bottom
pressure is less than the surface pressure is not changed by ps, since they
don't intersect the surface. If ps is in between a level's top and bottom
pressures, then its dp becomes the pressure difference between its top and
ps. If ps is less than a level's top and bottom pressures, then that level
is underground and its values are masked.
Note that postprocessing routines (e.g. at GFDL) typically mask out data
wherever the surface pressure is less than the level's given value, not the
level's upper edge. This masks out more levels than the
"""
p_str = get_dim_name(p, (internal_names.PLEVEL_STR, 'plev'))
p_vals = to_pascal(p.values.copy())
# Layer edges are halfway between the given pressure levels.
p_edges_interior = 0.5*(p_vals[:-1] + p_vals[1:])
p_edges = np.concatenate(([p_bot], p_edges_interior, [p_top]))
p_edge_above = p_edges[1:]
p_edge_below = p_edges[:-1]
dp = p_edge_below - p_edge_above
if not all(np.sign(dp)):
raise ValueError("dp array not all > 0 : {}".format(dp))
# Pressure difference between ps and the upper edge of each pressure level.
p_edge_above_xr = xr.DataArray(p_edge_above, dims=p.dims, coords=p.coords)
dp_to_sfc = ps - p_edge_above_xr
# Find the level adjacent to the masked, under-ground levels.
change = xr.DataArray(np.zeros(dp_to_sfc.shape), dims=dp_to_sfc.dims,
coords=dp_to_sfc.coords)
change[{p_str: slice(1, None)}] = np.diff(
np.sign(ps - to_pascal(p.copy()))
)
dp_combined = xr.DataArray(np.where(change, dp_to_sfc, dp),
dims=dp_to_sfc.dims, coords=dp_to_sfc.coords)
# Mask levels that are under ground.
above_ground = ps > to_pascal(p.copy())
above_ground[p_str] = p[p_str]
dp_with_ps = dp_combined.where(above_ground)
# Revert to original dim order.
possible_dim_orders = [
(internal_names.TIME_STR, p_str, internal_names.LAT_STR,
internal_names.LON_STR),
(internal_names.TIME_STR, p_str, internal_names.LAT_STR),
(internal_names.TIME_STR, p_str, internal_names.LON_STR),
(internal_names.TIME_STR, p_str),
(p_str, internal_names.LAT_STR, internal_names.LON_STR),
(p_str, internal_names.LAT_STR),
(p_str, internal_names.LON_STR),
(p_str,),
]
for dim_order in possible_dim_orders:
try:
return dp_with_ps.transpose(*dim_order)
except ValueError:
logging.debug("Failed transpose to dims: {}".format(dim_order))
else:
logging.debug("No transpose was successful.")
return dp_with_ps |
def decode(self, encoding=None):
''' Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. '''
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy | Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. | Below is the the instruction that describes the task:
### Input:
Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary.
### Response:
def decode(self, encoding=None):
''' Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. '''
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy |
def get_outcome_results(self, course_id, include=None, outcome_ids=None, user_ids=None):
"""
Get outcome results.
Gets the outcome results for users and outcomes in the specified context.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - user_ids
"""If specified, only the users whose ids are given will be included in the
results. SIS ids can be used, prefixed by "sis_user_id:".
It is an error to specify an id for a user who is not a student in
the context."""
if user_ids is not None:
params["user_ids"] = user_ids
# OPTIONAL - outcome_ids
"""If specified, only the outcomes whose ids are given will be included in the
results. it is an error to specify an id for an outcome which is not linked
to the context."""
if outcome_ids is not None:
params["outcome_ids"] = outcome_ids
# OPTIONAL - include
"""[String, "alignments"|"outcomes"|"outcomes.alignments"|"outcome_groups"|"outcome_links"|"outcome_paths"|"users"]
Specify additional collections to be side loaded with the result.
"alignments" includes only the alignments referenced by the returned
results.
"outcomes.alignments" includes all alignments referenced by outcomes in the
context."""
if include is not None:
params["include"] = include
self.logger.debug("GET /api/v1/courses/{course_id}/outcome_results with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/outcome_results".format(**path), data=data, params=params, no_data=True) | Get outcome results.
Gets the outcome results for users and outcomes in the specified context. | Below is the the instruction that describes the task:
### Input:
Get outcome results.
Gets the outcome results for users and outcomes in the specified context.
### Response:
def get_outcome_results(self, course_id, include=None, outcome_ids=None, user_ids=None):
"""
Get outcome results.
Gets the outcome results for users and outcomes in the specified context.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - user_ids
"""If specified, only the users whose ids are given will be included in the
results. SIS ids can be used, prefixed by "sis_user_id:".
It is an error to specify an id for a user who is not a student in
the context."""
if user_ids is not None:
params["user_ids"] = user_ids
# OPTIONAL - outcome_ids
"""If specified, only the outcomes whose ids are given will be included in the
results. it is an error to specify an id for an outcome which is not linked
to the context."""
if outcome_ids is not None:
params["outcome_ids"] = outcome_ids
# OPTIONAL - include
"""[String, "alignments"|"outcomes"|"outcomes.alignments"|"outcome_groups"|"outcome_links"|"outcome_paths"|"users"]
Specify additional collections to be side loaded with the result.
"alignments" includes only the alignments referenced by the returned
results.
"outcomes.alignments" includes all alignments referenced by outcomes in the
context."""
if include is not None:
params["include"] = include
self.logger.debug("GET /api/v1/courses/{course_id}/outcome_results with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/outcome_results".format(**path), data=data, params=params, no_data=True) |
def call_servo(examples, serving_bundle):
"""Send an RPC request to the Servomatic prediction service.
Args:
examples: A list of examples that matches the model spec.
serving_bundle: A `ServingBundle` object that contains the information to
make the serving request.
Returns:
A ClassificationResponse or RegressionResponse proto.
"""
parsed_url = urlparse('http://' + serving_bundle.inference_address)
channel = implementations.insecure_channel(parsed_url.hostname,
parsed_url.port)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
if serving_bundle.use_predict:
request = predict_pb2.PredictRequest()
elif serving_bundle.model_type == 'classification':
request = classification_pb2.ClassificationRequest()
else:
request = regression_pb2.RegressionRequest()
request.model_spec.name = serving_bundle.model_name
if serving_bundle.model_version is not None:
request.model_spec.version.value = serving_bundle.model_version
if serving_bundle.signature is not None:
request.model_spec.signature_name = serving_bundle.signature
if serving_bundle.use_predict:
# tf.compat.v1 API used here to convert tf.example into proto. This
# utility file is bundled in the witwidget pip package which has a dep
# on TensorFlow.
request.inputs[serving_bundle.predict_input_tensor].CopyFrom(
tf.compat.v1.make_tensor_proto(
values=[ex.SerializeToString() for ex in examples],
dtype=types_pb2.DT_STRING))
else:
request.input.example_list.examples.extend(examples)
if serving_bundle.use_predict:
return common_utils.convert_predict_response(
stub.Predict(request, 30.0), serving_bundle) # 30 secs timeout
elif serving_bundle.model_type == 'classification':
return stub.Classify(request, 30.0) # 30 secs timeout
else:
return stub.Regress(request, 30.0) | Send an RPC request to the Servomatic prediction service.
Args:
examples: A list of examples that matches the model spec.
serving_bundle: A `ServingBundle` object that contains the information to
make the serving request.
Returns:
A ClassificationResponse or RegressionResponse proto. | Below is the the instruction that describes the task:
### Input:
Send an RPC request to the Servomatic prediction service.
Args:
examples: A list of examples that matches the model spec.
serving_bundle: A `ServingBundle` object that contains the information to
make the serving request.
Returns:
A ClassificationResponse or RegressionResponse proto.
### Response:
def call_servo(examples, serving_bundle):
"""Send an RPC request to the Servomatic prediction service.
Args:
examples: A list of examples that matches the model spec.
serving_bundle: A `ServingBundle` object that contains the information to
make the serving request.
Returns:
A ClassificationResponse or RegressionResponse proto.
"""
parsed_url = urlparse('http://' + serving_bundle.inference_address)
channel = implementations.insecure_channel(parsed_url.hostname,
parsed_url.port)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
if serving_bundle.use_predict:
request = predict_pb2.PredictRequest()
elif serving_bundle.model_type == 'classification':
request = classification_pb2.ClassificationRequest()
else:
request = regression_pb2.RegressionRequest()
request.model_spec.name = serving_bundle.model_name
if serving_bundle.model_version is not None:
request.model_spec.version.value = serving_bundle.model_version
if serving_bundle.signature is not None:
request.model_spec.signature_name = serving_bundle.signature
if serving_bundle.use_predict:
# tf.compat.v1 API used here to convert tf.example into proto. This
# utility file is bundled in the witwidget pip package which has a dep
# on TensorFlow.
request.inputs[serving_bundle.predict_input_tensor].CopyFrom(
tf.compat.v1.make_tensor_proto(
values=[ex.SerializeToString() for ex in examples],
dtype=types_pb2.DT_STRING))
else:
request.input.example_list.examples.extend(examples)
if serving_bundle.use_predict:
return common_utils.convert_predict_response(
stub.Predict(request, 30.0), serving_bundle) # 30 secs timeout
elif serving_bundle.model_type == 'classification':
return stub.Classify(request, 30.0) # 30 secs timeout
else:
return stub.Regress(request, 30.0) |
def post_dissect(self, data):
"""dissect the IPv6 package compressed into this IPHC packet.
The packet payload needs to be decompressed and depending on the
arguments, several conversions should be done.
"""
# uncompress payload
packet = IPv6()
packet.version = IPHC_DEFAULT_VERSION
packet.tc, packet.fl = self._getTrafficClassAndFlowLabel()
if not self.nh:
packet.nh = self._nhField
# HLIM: Hop Limit
if self.hlim == 0:
packet.hlim = self._hopLimit
elif self.hlim == 0x1:
packet.hlim = 1
elif self.hlim == 0x2:
packet.hlim = 64
else:
packet.hlim = 255
# TODO: Payload length can be inferred from lower layers from either the # noqa: E501
# 6LoWPAN Fragmentation header or the IEEE802.15.4 header
packet.src = self.decompressSourceAddr(packet)
packet.dst = self.decompressDestinyAddr(packet)
if self.nh == 1:
# The Next Header field is compressed and the next header is
# encoded using LOWPAN_NHC
packet.nh = 0x11 # UDP
udp = UDP()
if self.header_compression and \
self.header_compression & 0x4 == 0x0:
udp.chksum = self.udpChecksum
s, d = nhc_port(self)
if s == 16:
udp.sport = self.udpSourcePort
elif s == 8:
udp.sport = 0xF000 + s
elif s == 4:
udp.sport = 0xF0B0 + s
if d == 16:
udp.dport = self.udpDestinyPort
elif d == 8:
udp.dport = 0xF000 + d
elif d == 4:
udp.dport = 0xF0B0 + d
packet.payload = udp / data
data = raw(packet)
# else self.nh == 0 not necessary
elif self._nhField & 0xE0 == 0xE0: # IPv6 Extension Header Decompression # noqa: E501
warning('Unimplemented: IPv6 Extension Header decompression') # noqa: E501
packet.payload = conf.raw_layer(data)
data = raw(packet)
else:
packet.payload = conf.raw_layer(data)
data = raw(packet)
return Packet.post_dissect(self, data) | dissect the IPv6 package compressed into this IPHC packet.
The packet payload needs to be decompressed and depending on the
arguments, several conversions should be done. | Below is the the instruction that describes the task:
### Input:
dissect the IPv6 package compressed into this IPHC packet.
The packet payload needs to be decompressed and depending on the
arguments, several conversions should be done.
### Response:
def post_dissect(self, data):
"""dissect the IPv6 package compressed into this IPHC packet.
The packet payload needs to be decompressed and depending on the
arguments, several conversions should be done.
"""
# uncompress payload
packet = IPv6()
packet.version = IPHC_DEFAULT_VERSION
packet.tc, packet.fl = self._getTrafficClassAndFlowLabel()
if not self.nh:
packet.nh = self._nhField
# HLIM: Hop Limit
if self.hlim == 0:
packet.hlim = self._hopLimit
elif self.hlim == 0x1:
packet.hlim = 1
elif self.hlim == 0x2:
packet.hlim = 64
else:
packet.hlim = 255
# TODO: Payload length can be inferred from lower layers from either the # noqa: E501
# 6LoWPAN Fragmentation header or the IEEE802.15.4 header
packet.src = self.decompressSourceAddr(packet)
packet.dst = self.decompressDestinyAddr(packet)
if self.nh == 1:
# The Next Header field is compressed and the next header is
# encoded using LOWPAN_NHC
packet.nh = 0x11 # UDP
udp = UDP()
if self.header_compression and \
self.header_compression & 0x4 == 0x0:
udp.chksum = self.udpChecksum
s, d = nhc_port(self)
if s == 16:
udp.sport = self.udpSourcePort
elif s == 8:
udp.sport = 0xF000 + s
elif s == 4:
udp.sport = 0xF0B0 + s
if d == 16:
udp.dport = self.udpDestinyPort
elif d == 8:
udp.dport = 0xF000 + d
elif d == 4:
udp.dport = 0xF0B0 + d
packet.payload = udp / data
data = raw(packet)
# else self.nh == 0 not necessary
elif self._nhField & 0xE0 == 0xE0: # IPv6 Extension Header Decompression # noqa: E501
warning('Unimplemented: IPv6 Extension Header decompression') # noqa: E501
packet.payload = conf.raw_layer(data)
data = raw(packet)
else:
packet.payload = conf.raw_layer(data)
data = raw(packet)
return Packet.post_dissect(self, data) |
def do_next(self, line):
"""Jump to the next entities (ontology, class or property) depending on context"""
if not self.current:
print("Please select an ontology first. E.g. use the 'ls ontologies' or 'get ontology <name>' commands.")
elif self.currentEntity:
g = self.current['graph']
if self.currentEntity['type'] == 'class':
nextentity = g.nextClass(self.currentEntity['object'].uri)
self._select_class(str(nextentity.uri))
elif self.currentEntity['type'] == 'property':
nextentity = g.nextProperty(self.currentEntity['object'].uri)
self._select_property(str(nextentity.uri))
elif self.currentEntity['type'] == 'concept':
nextentity = g.nextConcept(self.currentEntity['object'].uri)
self._select_concept(str(nextentity.uri))
else:
print("Not implemented")
else:
if len(self.all_ontologies) > 1:
nextonto = self._next_ontology()
self._load_ontology(nextonto)
else:
self._print("Only one ontology available in repository.") | Jump to the next entities (ontology, class or property) depending on context | Below is the the instruction that describes the task:
### Input:
Jump to the next entities (ontology, class or property) depending on context
### Response:
def do_next(self, line):
"""Jump to the next entities (ontology, class or property) depending on context"""
if not self.current:
print("Please select an ontology first. E.g. use the 'ls ontologies' or 'get ontology <name>' commands.")
elif self.currentEntity:
g = self.current['graph']
if self.currentEntity['type'] == 'class':
nextentity = g.nextClass(self.currentEntity['object'].uri)
self._select_class(str(nextentity.uri))
elif self.currentEntity['type'] == 'property':
nextentity = g.nextProperty(self.currentEntity['object'].uri)
self._select_property(str(nextentity.uri))
elif self.currentEntity['type'] == 'concept':
nextentity = g.nextConcept(self.currentEntity['object'].uri)
self._select_concept(str(nextentity.uri))
else:
print("Not implemented")
else:
if len(self.all_ontologies) > 1:
nextonto = self._next_ontology()
self._load_ontology(nextonto)
else:
self._print("Only one ontology available in repository.") |
def find_mean_vector(*args, **kwargs):
"""
Returns the mean vector for a set of measurments. By default, this expects
the input to be plunges and bearings, but the type of input can be
controlled through the ``measurement`` kwarg.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``plunge`` & ``bearing``, both
array-like sequences representing linear features. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"lines"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
mean_vector : tuple of two floats
The plunge and bearing of the mean vector (in degrees).
r_value : float
The length of the mean vector (a value between 0 and 1).
"""
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'lines'))
vector, r_value = stereonet_math.mean_vector(lon, lat)
plunge, bearing = stereonet_math.geographic2plunge_bearing(*vector)
return (plunge[0], bearing[0]), r_value | Returns the mean vector for a set of measurments. By default, this expects
the input to be plunges and bearings, but the type of input can be
controlled through the ``measurement`` kwarg.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``plunge`` & ``bearing``, both
array-like sequences representing linear features. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"lines"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
mean_vector : tuple of two floats
The plunge and bearing of the mean vector (in degrees).
r_value : float
The length of the mean vector (a value between 0 and 1). | Below is the the instruction that describes the task:
### Input:
Returns the mean vector for a set of measurments. By default, this expects
the input to be plunges and bearings, but the type of input can be
controlled through the ``measurement`` kwarg.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``plunge`` & ``bearing``, both
array-like sequences representing linear features. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"lines"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
mean_vector : tuple of two floats
The plunge and bearing of the mean vector (in degrees).
r_value : float
The length of the mean vector (a value between 0 and 1).
### Response:
def find_mean_vector(*args, **kwargs):
"""
Returns the mean vector for a set of measurments. By default, this expects
the input to be plunges and bearings, but the type of input can be
controlled through the ``measurement`` kwarg.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``plunge`` & ``bearing``, both
array-like sequences representing linear features. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"lines"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
mean_vector : tuple of two floats
The plunge and bearing of the mean vector (in degrees).
r_value : float
The length of the mean vector (a value between 0 and 1).
"""
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'lines'))
vector, r_value = stereonet_math.mean_vector(lon, lat)
plunge, bearing = stereonet_math.geographic2plunge_bearing(*vector)
return (plunge[0], bearing[0]), r_value |
def capabilities(self, width, height, rotate, mode="1"):
"""
Assigns attributes such as ``width``, ``height``, ``size`` and
``bounding_box`` correctly oriented from the supplied parameters.
:param width: The device width.
:type width: int
:param height: The device height.
:type height: int
:param rotate: An integer value of 0 (default), 1, 2 or 3 only, where 0 is
no rotation, 1 is rotate 90° clockwise, 2 is 180° rotation and 3
represents 270° rotation.
:type rotate: int
:param mode: The supported color model, one of ``"1"``, ``"RGB"`` or
``"RGBA"`` only.
:type mode: str
"""
assert mode in ("1", "RGB", "RGBA")
assert rotate in (0, 1, 2, 3)
self._w = width
self._h = height
self.width = width if rotate % 2 == 0 else height
self.height = height if rotate % 2 == 0 else width
self.size = (self.width, self.height)
self.bounding_box = (0, 0, self.width - 1, self.height - 1)
self.rotate = rotate
self.mode = mode
self.persist = False | Assigns attributes such as ``width``, ``height``, ``size`` and
``bounding_box`` correctly oriented from the supplied parameters.
:param width: The device width.
:type width: int
:param height: The device height.
:type height: int
:param rotate: An integer value of 0 (default), 1, 2 or 3 only, where 0 is
no rotation, 1 is rotate 90° clockwise, 2 is 180° rotation and 3
represents 270° rotation.
:type rotate: int
:param mode: The supported color model, one of ``"1"``, ``"RGB"`` or
``"RGBA"`` only.
:type mode: str | Below is the the instruction that describes the task:
### Input:
Assigns attributes such as ``width``, ``height``, ``size`` and
``bounding_box`` correctly oriented from the supplied parameters.
:param width: The device width.
:type width: int
:param height: The device height.
:type height: int
:param rotate: An integer value of 0 (default), 1, 2 or 3 only, where 0 is
no rotation, 1 is rotate 90° clockwise, 2 is 180° rotation and 3
represents 270° rotation.
:type rotate: int
:param mode: The supported color model, one of ``"1"``, ``"RGB"`` or
``"RGBA"`` only.
:type mode: str
### Response:
def capabilities(self, width, height, rotate, mode="1"):
"""
Assigns attributes such as ``width``, ``height``, ``size`` and
``bounding_box`` correctly oriented from the supplied parameters.
:param width: The device width.
:type width: int
:param height: The device height.
:type height: int
:param rotate: An integer value of 0 (default), 1, 2 or 3 only, where 0 is
no rotation, 1 is rotate 90° clockwise, 2 is 180° rotation and 3
represents 270° rotation.
:type rotate: int
:param mode: The supported color model, one of ``"1"``, ``"RGB"`` or
``"RGBA"`` only.
:type mode: str
"""
assert mode in ("1", "RGB", "RGBA")
assert rotate in (0, 1, 2, 3)
self._w = width
self._h = height
self.width = width if rotate % 2 == 0 else height
self.height = height if rotate % 2 == 0 else width
self.size = (self.width, self.height)
self.bounding_box = (0, 0, self.width - 1, self.height - 1)
self.rotate = rotate
self.mode = mode
self.persist = False |
def list_external_tools_courses(self, course_id, include_parents=None, search_term=None, selectable=None):
"""
List external tools.
Returns the paginated list of external tools for the current context.
See the get request docs for a single tool for a list of properties on an external tool.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - search_term
"""The partial name of the tools to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - selectable
"""If true, then only tools that are meant to be selectable are returned"""
if selectable is not None:
params["selectable"] = selectable
# OPTIONAL - include_parents
"""If true, then include tools installed in all accounts above the current context"""
if include_parents is not None:
params["include_parents"] = include_parents
self.logger.debug("GET /api/v1/courses/{course_id}/external_tools with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/external_tools".format(**path), data=data, params=params, no_data=True) | List external tools.
Returns the paginated list of external tools for the current context.
See the get request docs for a single tool for a list of properties on an external tool. | Below is the the instruction that describes the task:
### Input:
List external tools.
Returns the paginated list of external tools for the current context.
See the get request docs for a single tool for a list of properties on an external tool.
### Response:
def list_external_tools_courses(self, course_id, include_parents=None, search_term=None, selectable=None):
"""
List external tools.
Returns the paginated list of external tools for the current context.
See the get request docs for a single tool for a list of properties on an external tool.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - search_term
"""The partial name of the tools to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - selectable
"""If true, then only tools that are meant to be selectable are returned"""
if selectable is not None:
params["selectable"] = selectable
# OPTIONAL - include_parents
"""If true, then include tools installed in all accounts above the current context"""
if include_parents is not None:
params["include_parents"] = include_parents
self.logger.debug("GET /api/v1/courses/{course_id}/external_tools with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/external_tools".format(**path), data=data, params=params, no_data=True) |
def smart_str(value, encoding='utf-8', errors='strict'):
"""Convert Python object to string.
:param value: Python object to convert.
:param encoding: Encoding to use if in Python 2 given object is unicode.
:param errors: Errors mode to use if in Python 2 given object is unicode.
"""
if not IS_PY3 and isinstance(value, unicode): # noqa
return value.encode(encoding, errors)
return str(value) | Convert Python object to string.
:param value: Python object to convert.
:param encoding: Encoding to use if in Python 2 given object is unicode.
:param errors: Errors mode to use if in Python 2 given object is unicode. | Below is the the instruction that describes the task:
### Input:
Convert Python object to string.
:param value: Python object to convert.
:param encoding: Encoding to use if in Python 2 given object is unicode.
:param errors: Errors mode to use if in Python 2 given object is unicode.
### Response:
def smart_str(value, encoding='utf-8', errors='strict'):
"""Convert Python object to string.
:param value: Python object to convert.
:param encoding: Encoding to use if in Python 2 given object is unicode.
:param errors: Errors mode to use if in Python 2 given object is unicode.
"""
if not IS_PY3 and isinstance(value, unicode): # noqa
return value.encode(encoding, errors)
return str(value) |
def create(cls, community_id, user_id, **data):
"""Get a community."""
with db.session.begin_nested():
obj = cls(id=community_id, id_user=user_id, **data)
db.session.add(obj)
return obj | Get a community. | Below is the the instruction that describes the task:
### Input:
Get a community.
### Response:
def create(cls, community_id, user_id, **data):
"""Get a community."""
with db.session.begin_nested():
obj = cls(id=community_id, id_user=user_id, **data)
db.session.add(obj)
return obj |
def handle(self, *args, **options):
"""Main command method."""
# Already running, so quit
if os.path.exists(self.lock_file):
self.log(("This script is already running. "
"(If your are sure it's not please "
"delete the lock file in {}')").format(self.lock_file))
sys.exit(0)
if not os.path.exists(os.path.dirname(self.lock_file)):
os.mkdir(os.path.dirname(self.lock_file), 0755)
archives_path = options.get('archives_path')
self.log('Using archives_path `%s`' % settings.SUPER_ARCHIVES_PATH)
if not os.path.exists(archives_path):
msg = 'archives_path ({}) does not exist'.format(archives_path)
raise CommandError(msg)
run_lock = file(self.lock_file, 'w')
run_lock.close()
try:
self.import_emails(
archives_path,
options.get('all'),
options.get('exclude_lists'),
)
except Exception as e:
logging.exception(e)
raise
finally:
os.remove(self.lock_file)
for mlist in MailingList.objects.all():
mlist.update_privacy()
mlist.save() | Main command method. | Below is the the instruction that describes the task:
### Input:
Main command method.
### Response:
def handle(self, *args, **options):
"""Main command method."""
# Already running, so quit
if os.path.exists(self.lock_file):
self.log(("This script is already running. "
"(If your are sure it's not please "
"delete the lock file in {}')").format(self.lock_file))
sys.exit(0)
if not os.path.exists(os.path.dirname(self.lock_file)):
os.mkdir(os.path.dirname(self.lock_file), 0755)
archives_path = options.get('archives_path')
self.log('Using archives_path `%s`' % settings.SUPER_ARCHIVES_PATH)
if not os.path.exists(archives_path):
msg = 'archives_path ({}) does not exist'.format(archives_path)
raise CommandError(msg)
run_lock = file(self.lock_file, 'w')
run_lock.close()
try:
self.import_emails(
archives_path,
options.get('all'),
options.get('exclude_lists'),
)
except Exception as e:
logging.exception(e)
raise
finally:
os.remove(self.lock_file)
for mlist in MailingList.objects.all():
mlist.update_privacy()
mlist.save() |
def fees(ctx):
""" List fees
"""
from peerplaysbase.operationids import getOperationNameForId
chain = Blockchain(peerplays_instance=ctx.peerplays)
feesObj = chain.chainParameters().get("current_fees")
fees = feesObj["parameters"]
t = PrettyTable(["Operation", "Type", "Fee"])
t.align = "l"
t.align["Fee"] = "r"
for fee in fees:
for f in fee[1]:
t.add_row(
[
getOperationNameForId(fee[0]),
f,
str(Amount({"amount": fee[1].get(f, 0), "asset_id": "1.3.0"})),
]
)
click.echo(t) | List fees | Below is the the instruction that describes the task:
### Input:
List fees
### Response:
def fees(ctx):
""" List fees
"""
from peerplaysbase.operationids import getOperationNameForId
chain = Blockchain(peerplays_instance=ctx.peerplays)
feesObj = chain.chainParameters().get("current_fees")
fees = feesObj["parameters"]
t = PrettyTable(["Operation", "Type", "Fee"])
t.align = "l"
t.align["Fee"] = "r"
for fee in fees:
for f in fee[1]:
t.add_row(
[
getOperationNameForId(fee[0]),
f,
str(Amount({"amount": fee[1].get(f, 0), "asset_id": "1.3.0"})),
]
)
click.echo(t) |
def load(cls,
config: Params,
serialization_dir: str,
weights_file: str = None,
cuda_device: int = -1) -> 'Model':
"""
Instantiates an already-trained model, based on the experiment
configuration and some optional overrides.
Parameters
----------
config: Params
The configuration that was used to train the model. It should definitely
have a `model` section, and should probably have a `trainer` section
as well.
serialization_dir: str = None
The directory containing the serialized weights, parameters, and vocabulary
of the model.
weights_file: str = None
By default we load the weights from `best.th` in the serialization
directory, but you can override that value here.
cuda_device: int = -1
By default we load the model on the CPU, but if you want to load it
for GPU usage you can specify the id of your GPU here
Returns
-------
model: Model
The model specified in the configuration, loaded with the serialized
vocabulary and the trained weights.
"""
# Peak at the class of the model.
model_type = config["model"]["type"]
# Load using an overridable _load method.
# This allows subclasses of Model to override _load.
# pylint: disable=protected-access
return cls.by_name(model_type)._load(config, serialization_dir, weights_file, cuda_device) | Instantiates an already-trained model, based on the experiment
configuration and some optional overrides.
Parameters
----------
config: Params
The configuration that was used to train the model. It should definitely
have a `model` section, and should probably have a `trainer` section
as well.
serialization_dir: str = None
The directory containing the serialized weights, parameters, and vocabulary
of the model.
weights_file: str = None
By default we load the weights from `best.th` in the serialization
directory, but you can override that value here.
cuda_device: int = -1
By default we load the model on the CPU, but if you want to load it
for GPU usage you can specify the id of your GPU here
Returns
-------
model: Model
The model specified in the configuration, loaded with the serialized
vocabulary and the trained weights. | Below is the the instruction that describes the task:
### Input:
Instantiates an already-trained model, based on the experiment
configuration and some optional overrides.
Parameters
----------
config: Params
The configuration that was used to train the model. It should definitely
have a `model` section, and should probably have a `trainer` section
as well.
serialization_dir: str = None
The directory containing the serialized weights, parameters, and vocabulary
of the model.
weights_file: str = None
By default we load the weights from `best.th` in the serialization
directory, but you can override that value here.
cuda_device: int = -1
By default we load the model on the CPU, but if you want to load it
for GPU usage you can specify the id of your GPU here
Returns
-------
model: Model
The model specified in the configuration, loaded with the serialized
vocabulary and the trained weights.
### Response:
def load(cls,
config: Params,
serialization_dir: str,
weights_file: str = None,
cuda_device: int = -1) -> 'Model':
"""
Instantiates an already-trained model, based on the experiment
configuration and some optional overrides.
Parameters
----------
config: Params
The configuration that was used to train the model. It should definitely
have a `model` section, and should probably have a `trainer` section
as well.
serialization_dir: str = None
The directory containing the serialized weights, parameters, and vocabulary
of the model.
weights_file: str = None
By default we load the weights from `best.th` in the serialization
directory, but you can override that value here.
cuda_device: int = -1
By default we load the model on the CPU, but if you want to load it
for GPU usage you can specify the id of your GPU here
Returns
-------
model: Model
The model specified in the configuration, loaded with the serialized
vocabulary and the trained weights.
"""
# Peak at the class of the model.
model_type = config["model"]["type"]
# Load using an overridable _load method.
# This allows subclasses of Model to override _load.
# pylint: disable=protected-access
return cls.by_name(model_type)._load(config, serialization_dir, weights_file, cuda_device) |
def get_source(
self, environment: Environment, template: str,
) -> Tuple[str, Optional[str], Callable]:
"""Returns the template source from the environment.
This considers the loaders on the :attr:`app` and blueprints.
"""
for loader in self._loaders():
try:
return loader.get_source(environment, template)
except TemplateNotFound:
continue
raise TemplateNotFound(template) | Returns the template source from the environment.
This considers the loaders on the :attr:`app` and blueprints. | Below is the the instruction that describes the task:
### Input:
Returns the template source from the environment.
This considers the loaders on the :attr:`app` and blueprints.
### Response:
def get_source(
self, environment: Environment, template: str,
) -> Tuple[str, Optional[str], Callable]:
"""Returns the template source from the environment.
This considers the loaders on the :attr:`app` and blueprints.
"""
for loader in self._loaders():
try:
return loader.get_source(environment, template)
except TemplateNotFound:
continue
raise TemplateNotFound(template) |
def buy_margin(self):
"""
[float] 买方向保证金
"""
return sum(position.buy_margin for position in six.itervalues(self._positions)) | [float] 买方向保证金 | Below is the the instruction that describes the task:
### Input:
[float] 买方向保证金
### Response:
def buy_margin(self):
"""
[float] 买方向保证金
"""
return sum(position.buy_margin for position in six.itervalues(self._positions)) |
def _kde_support(bin_range, bw, gridsize, cut, clip):
"""Establish support for a kernel density estimate."""
kmin, kmax = bin_range[0] - bw * cut, bin_range[1] + bw * cut
if isfinite(clip[0]):
kmin = max(kmin, clip[0])
if isfinite(clip[1]):
kmax = min(kmax, clip[1])
return np.linspace(kmin, kmax, gridsize) | Establish support for a kernel density estimate. | Below is the the instruction that describes the task:
### Input:
Establish support for a kernel density estimate.
### Response:
def _kde_support(bin_range, bw, gridsize, cut, clip):
"""Establish support for a kernel density estimate."""
kmin, kmax = bin_range[0] - bw * cut, bin_range[1] + bw * cut
if isfinite(clip[0]):
kmin = max(kmin, clip[0])
if isfinite(clip[1]):
kmax = min(kmax, clip[1])
return np.linspace(kmin, kmax, gridsize) |
def set_dict_key_value(
in_dict,
keys,
value,
delimiter=DEFAULT_TARGET_DELIM,
ordered_dict=False):
'''
Ensures that in_dict contains the series of recursive keys defined in keys.
Also sets whatever is at the end of `in_dict` traversed with `keys` to `value`.
:param dict in_dict: The dictionary to work with
:param str keys: The delimited string with one or more keys.
:param any value: The value to assign to the nested dict-key.
:param str delimiter: The delimiter to use in `keys`. Defaults to ':'.
:param bool ordered_dict: Create OrderedDicts if keys are missing.
Default: create regular dicts.
:return dict: Though it updates in_dict in-place.
'''
dict_pointer, last_key = _dict_rpartition(in_dict,
keys,
delimiter=delimiter,
ordered_dict=ordered_dict)
dict_pointer[last_key] = value
return in_dict | Ensures that in_dict contains the series of recursive keys defined in keys.
Also sets whatever is at the end of `in_dict` traversed with `keys` to `value`.
:param dict in_dict: The dictionary to work with
:param str keys: The delimited string with one or more keys.
:param any value: The value to assign to the nested dict-key.
:param str delimiter: The delimiter to use in `keys`. Defaults to ':'.
:param bool ordered_dict: Create OrderedDicts if keys are missing.
Default: create regular dicts.
:return dict: Though it updates in_dict in-place. | Below is the the instruction that describes the task:
### Input:
Ensures that in_dict contains the series of recursive keys defined in keys.
Also sets whatever is at the end of `in_dict` traversed with `keys` to `value`.
:param dict in_dict: The dictionary to work with
:param str keys: The delimited string with one or more keys.
:param any value: The value to assign to the nested dict-key.
:param str delimiter: The delimiter to use in `keys`. Defaults to ':'.
:param bool ordered_dict: Create OrderedDicts if keys are missing.
Default: create regular dicts.
:return dict: Though it updates in_dict in-place.
### Response:
def set_dict_key_value(
in_dict,
keys,
value,
delimiter=DEFAULT_TARGET_DELIM,
ordered_dict=False):
'''
Ensures that in_dict contains the series of recursive keys defined in keys.
Also sets whatever is at the end of `in_dict` traversed with `keys` to `value`.
:param dict in_dict: The dictionary to work with
:param str keys: The delimited string with one or more keys.
:param any value: The value to assign to the nested dict-key.
:param str delimiter: The delimiter to use in `keys`. Defaults to ':'.
:param bool ordered_dict: Create OrderedDicts if keys are missing.
Default: create regular dicts.
:return dict: Though it updates in_dict in-place.
'''
dict_pointer, last_key = _dict_rpartition(in_dict,
keys,
delimiter=delimiter,
ordered_dict=ordered_dict)
dict_pointer[last_key] = value
return in_dict |
def _update_fitness(self, action_set):
"""Update the fitness values of the rules belonging to this action
set."""
# Compute the accuracy of each rule. Accuracy is inversely
# proportional to error. Below a certain error threshold, accuracy
# becomes constant. Accuracy values range over (0, 1].
total_accuracy = 0
accuracies = {}
for rule in action_set:
if rule.error < self.error_threshold:
accuracy = 1
else:
accuracy = (
self.accuracy_coefficient *
(rule.error / self.error_threshold) **
-self.accuracy_power
)
accuracies[rule] = accuracy
total_accuracy += accuracy * rule.numerosity
# On rare occasions we have zero total accuracy. This avoids a div
# by zero
total_accuracy = total_accuracy or 1
# Use the relative accuracies of the rules to update their fitness
for rule in action_set:
accuracy = accuracies[rule]
rule.fitness += (
self.learning_rate *
(accuracy * rule.numerosity / total_accuracy -
rule.fitness)
) | Update the fitness values of the rules belonging to this action
set. | Below is the the instruction that describes the task:
### Input:
Update the fitness values of the rules belonging to this action
set.
### Response:
def _update_fitness(self, action_set):
"""Update the fitness values of the rules belonging to this action
set."""
# Compute the accuracy of each rule. Accuracy is inversely
# proportional to error. Below a certain error threshold, accuracy
# becomes constant. Accuracy values range over (0, 1].
total_accuracy = 0
accuracies = {}
for rule in action_set:
if rule.error < self.error_threshold:
accuracy = 1
else:
accuracy = (
self.accuracy_coefficient *
(rule.error / self.error_threshold) **
-self.accuracy_power
)
accuracies[rule] = accuracy
total_accuracy += accuracy * rule.numerosity
# On rare occasions we have zero total accuracy. This avoids a div
# by zero
total_accuracy = total_accuracy or 1
# Use the relative accuracies of the rules to update their fitness
for rule in action_set:
accuracy = accuracies[rule]
rule.fitness += (
self.learning_rate *
(accuracy * rule.numerosity / total_accuracy -
rule.fitness)
) |
def merge_dicts(d1, d2):
"""合并两个无限深度的 dict
会自动合并 list 格式
:param dict d1: 被合并的 dict
:param dict d2: 待合并的 dict
:returns: 一个新的生成器对象
:rtype: generator
"""
for k in set(d1.keys()).union(d2.keys()):
if k in d1 and k in d2:
if isinstance(d1[k], dict) and isinstance(d2[k], dict):
yield (k, dict(merge_dicts(d1[k], d2[k])))
elif isinstance(d1[k], list):
if isinstance(d2[k], list):
d1[k].extend(d2[k])
else:
d1[k].append(d2[k])
yield(k, d1)
else:
# If one of the values is not a dict, you can't continue merging it.
# Value from second dict overrides one in first and we move on.
yield (k, d2[k])
# Alternatively, replace this with exception raiser to alert you of value conflicts
elif k in d1:
yield (k, d1[k])
else:
yield (k, d2[k]) | 合并两个无限深度的 dict
会自动合并 list 格式
:param dict d1: 被合并的 dict
:param dict d2: 待合并的 dict
:returns: 一个新的生成器对象
:rtype: generator | Below is the the instruction that describes the task:
### Input:
合并两个无限深度的 dict
会自动合并 list 格式
:param dict d1: 被合并的 dict
:param dict d2: 待合并的 dict
:returns: 一个新的生成器对象
:rtype: generator
### Response:
def merge_dicts(d1, d2):
"""合并两个无限深度的 dict
会自动合并 list 格式
:param dict d1: 被合并的 dict
:param dict d2: 待合并的 dict
:returns: 一个新的生成器对象
:rtype: generator
"""
for k in set(d1.keys()).union(d2.keys()):
if k in d1 and k in d2:
if isinstance(d1[k], dict) and isinstance(d2[k], dict):
yield (k, dict(merge_dicts(d1[k], d2[k])))
elif isinstance(d1[k], list):
if isinstance(d2[k], list):
d1[k].extend(d2[k])
else:
d1[k].append(d2[k])
yield(k, d1)
else:
# If one of the values is not a dict, you can't continue merging it.
# Value from second dict overrides one in first and we move on.
yield (k, d2[k])
# Alternatively, replace this with exception raiser to alert you of value conflicts
elif k in d1:
yield (k, d1[k])
else:
yield (k, d2[k]) |
def _handle_start_area(self, attrs):
"""
Handle opening area element
:param attrs: Attributes of the element
:type attrs: Dict
"""
self._curr = {
'attributes': dict(attrs),
'tags': {},
'area_id': None
}
if attrs.get('id', None) is not None:
self._curr['area_id'] = int(attrs['id'])
del self._curr['attributes']['id'] | Handle opening area element
:param attrs: Attributes of the element
:type attrs: Dict | Below is the the instruction that describes the task:
### Input:
Handle opening area element
:param attrs: Attributes of the element
:type attrs: Dict
### Response:
def _handle_start_area(self, attrs):
"""
Handle opening area element
:param attrs: Attributes of the element
:type attrs: Dict
"""
self._curr = {
'attributes': dict(attrs),
'tags': {},
'area_id': None
}
if attrs.get('id', None) is not None:
self._curr['area_id'] = int(attrs['id'])
del self._curr['attributes']['id'] |
def dispatch(self, event: Any) -> None:
"""Send an event to an `ev_*` method.
`*` will be the events type converted to lower-case.
If `event.type` is an empty string or None then it will be ignored.
"""
if event.type:
getattr(self, "ev_%s" % (event.type.lower(),))(event) | Send an event to an `ev_*` method.
`*` will be the events type converted to lower-case.
If `event.type` is an empty string or None then it will be ignored. | Below is the the instruction that describes the task:
### Input:
Send an event to an `ev_*` method.
`*` will be the events type converted to lower-case.
If `event.type` is an empty string or None then it will be ignored.
### Response:
def dispatch(self, event: Any) -> None:
"""Send an event to an `ev_*` method.
`*` will be the events type converted to lower-case.
If `event.type` is an empty string or None then it will be ignored.
"""
if event.type:
getattr(self, "ev_%s" % (event.type.lower(),))(event) |
def fetch(self):
"""Samples up to current difficulty."""
length = np.random.randint(1, self._curr_length + 1)
nesting = np.random.randint(1, self._curr_nesting + 1)
return length, nesting | Samples up to current difficulty. | Below is the the instruction that describes the task:
### Input:
Samples up to current difficulty.
### Response:
def fetch(self):
"""Samples up to current difficulty."""
length = np.random.randint(1, self._curr_length + 1)
nesting = np.random.randint(1, self._curr_nesting + 1)
return length, nesting |
def set(self, name, value):
"""
Set an attribute's value.
@param name: The name of the attribute.
@type name: basestring
@param value: The attribute value.
@type value: basestring
@see: __setitem__()
"""
attr = self.getAttribute(name)
if attr is None:
attr = Attribute(name, value)
self.append(attr)
else:
attr.setValue(value) | Set an attribute's value.
@param name: The name of the attribute.
@type name: basestring
@param value: The attribute value.
@type value: basestring
@see: __setitem__() | Below is the the instruction that describes the task:
### Input:
Set an attribute's value.
@param name: The name of the attribute.
@type name: basestring
@param value: The attribute value.
@type value: basestring
@see: __setitem__()
### Response:
def set(self, name, value):
"""
Set an attribute's value.
@param name: The name of the attribute.
@type name: basestring
@param value: The attribute value.
@type value: basestring
@see: __setitem__()
"""
attr = self.getAttribute(name)
if attr is None:
attr = Attribute(name, value)
self.append(attr)
else:
attr.setValue(value) |
def river_flow(self, source, world, river_list, lake_list):
"""simulate fluid dynamics by using starting point and flowing to the
lowest available point"""
current_location = source
path = [source]
# start the flow
while True:
x, y = current_location
# is there a river nearby, flow into it
for dx, dy in DIR_NEIGHBORS:
ax, ay = x + dx, y + dy
if self.wrap:
ax, ay = overflow(ax, world.width), overflow(ay,
world.height)
for river in river_list:
if [ax, ay] in river:
merge = False
for rx, ry in river:
if [ax, ay] == [rx, ry]:
merge = True
path.append([rx, ry])
elif merge:
path.append([rx, ry])
return path # skip the rest, return path
# found a sea?
if world.is_ocean((x, y)):
break
# find our immediate lowest elevation and flow there
quick_section = self.find_quick_path(current_location, world)
if quick_section:
path.append(quick_section)
current_location = quick_section
continue # stop here and enter back into loop
is_wrapped, lower_elevation = self.findLowerElevation(
current_location, world)
if lower_elevation and not is_wrapped:
lower_path = worldengine.astar.PathFinder().find(
world.layers['elevation'].data, current_location, lower_elevation)
if lower_path:
path += lower_path
current_location = path[-1]
else:
break
elif lower_elevation and is_wrapped:
# TODO: make this more natural
max_radius = 40
cx, cy = current_location
lx, ly = lower_elevation
if x < 0 or y < 0 or x > world.width or y > world.height:
raise Exception(
"BUG: fix me... we shouldn't be here: %s %s" % (
current_location, lower_elevation))
if not in_circle(max_radius, cx, cy, lx, cy):
# are we wrapping on x axis?
if cx - lx < 0:
lx = 0 # move to left edge
nx = world.width - 1 # next step is wrapped around
else:
lx = world.width - 1 # move to right edge
nx = 0 # next step is wrapped around
ly = ny = int((cy + ly) / 2) # move halfway
elif not in_circle(max_radius, cx, cy, cx, ly):
# are we wrapping on y axis?
if cy - ly < 0:
ly = 0 # move to top edge
ny = world.height - 1 # next step is wrapped around
else:
ly = world.height - 1 # move to bottom edge
ny = 0 # next step is wrapped around
lx = nx = int((cx + lx) / 2) # move halfway
else:
raise Exception(
"BUG: fix me... we are not in circle: %s %s" % (
current_location, lower_elevation))
# find our way to the edge
edge_path = worldengine.astar.PathFinder().find(
world.layers['elevation'].data, [cx, cy], [lx, ly])
if not edge_path:
# can't find another other path, make it a lake
lake_list.append(current_location)
break
path += edge_path # add our newly found path
path.append([nx, ny]) # finally add our overflow to other side
current_location = path[-1]
# find our way to lowest position original found
lower_path = worldengine.astar.PathFinder().find(
world.layers['elevation'].data, current_location, lower_elevation)
path += lower_path
current_location = path[-1]
else: # can't find any other path, make it a lake
lake_list.append(current_location)
break # end of river
if not world.contains(current_location):
print("Why are we here:", current_location)
return path | simulate fluid dynamics by using starting point and flowing to the
lowest available point | Below is the the instruction that describes the task:
### Input:
simulate fluid dynamics by using starting point and flowing to the
lowest available point
### Response:
def river_flow(self, source, world, river_list, lake_list):
"""simulate fluid dynamics by using starting point and flowing to the
lowest available point"""
current_location = source
path = [source]
# start the flow
while True:
x, y = current_location
# is there a river nearby, flow into it
for dx, dy in DIR_NEIGHBORS:
ax, ay = x + dx, y + dy
if self.wrap:
ax, ay = overflow(ax, world.width), overflow(ay,
world.height)
for river in river_list:
if [ax, ay] in river:
merge = False
for rx, ry in river:
if [ax, ay] == [rx, ry]:
merge = True
path.append([rx, ry])
elif merge:
path.append([rx, ry])
return path # skip the rest, return path
# found a sea?
if world.is_ocean((x, y)):
break
# find our immediate lowest elevation and flow there
quick_section = self.find_quick_path(current_location, world)
if quick_section:
path.append(quick_section)
current_location = quick_section
continue # stop here and enter back into loop
is_wrapped, lower_elevation = self.findLowerElevation(
current_location, world)
if lower_elevation and not is_wrapped:
lower_path = worldengine.astar.PathFinder().find(
world.layers['elevation'].data, current_location, lower_elevation)
if lower_path:
path += lower_path
current_location = path[-1]
else:
break
elif lower_elevation and is_wrapped:
# TODO: make this more natural
max_radius = 40
cx, cy = current_location
lx, ly = lower_elevation
if x < 0 or y < 0 or x > world.width or y > world.height:
raise Exception(
"BUG: fix me... we shouldn't be here: %s %s" % (
current_location, lower_elevation))
if not in_circle(max_radius, cx, cy, lx, cy):
# are we wrapping on x axis?
if cx - lx < 0:
lx = 0 # move to left edge
nx = world.width - 1 # next step is wrapped around
else:
lx = world.width - 1 # move to right edge
nx = 0 # next step is wrapped around
ly = ny = int((cy + ly) / 2) # move halfway
elif not in_circle(max_radius, cx, cy, cx, ly):
# are we wrapping on y axis?
if cy - ly < 0:
ly = 0 # move to top edge
ny = world.height - 1 # next step is wrapped around
else:
ly = world.height - 1 # move to bottom edge
ny = 0 # next step is wrapped around
lx = nx = int((cx + lx) / 2) # move halfway
else:
raise Exception(
"BUG: fix me... we are not in circle: %s %s" % (
current_location, lower_elevation))
# find our way to the edge
edge_path = worldengine.astar.PathFinder().find(
world.layers['elevation'].data, [cx, cy], [lx, ly])
if not edge_path:
# can't find another other path, make it a lake
lake_list.append(current_location)
break
path += edge_path # add our newly found path
path.append([nx, ny]) # finally add our overflow to other side
current_location = path[-1]
# find our way to lowest position original found
lower_path = worldengine.astar.PathFinder().find(
world.layers['elevation'].data, current_location, lower_elevation)
path += lower_path
current_location = path[-1]
else: # can't find any other path, make it a lake
lake_list.append(current_location)
break # end of river
if not world.contains(current_location):
print("Why are we here:", current_location)
return path |
def dollarfy(x):
"""Replaces Math elements in element list 'x' with a $-enclosed string.
stringify() passes through TeX math. Use dollarfy(x) first to replace
Math elements with math strings set in dollars. 'x' should be a deep copy
so that the underlying document is left untouched.
Returns 'x'."""
def _dollarfy(key, value, fmt, meta): # pylint: disable=unused-argument
"""Replaces Math elements"""
if key == 'Math':
return Str('$' + value[1] + '$')
return None
return walk(x, _dollarfy, '', {}) | Replaces Math elements in element list 'x' with a $-enclosed string.
stringify() passes through TeX math. Use dollarfy(x) first to replace
Math elements with math strings set in dollars. 'x' should be a deep copy
so that the underlying document is left untouched.
Returns 'x'. | Below is the the instruction that describes the task:
### Input:
Replaces Math elements in element list 'x' with a $-enclosed string.
stringify() passes through TeX math. Use dollarfy(x) first to replace
Math elements with math strings set in dollars. 'x' should be a deep copy
so that the underlying document is left untouched.
Returns 'x'.
### Response:
def dollarfy(x):
"""Replaces Math elements in element list 'x' with a $-enclosed string.
stringify() passes through TeX math. Use dollarfy(x) first to replace
Math elements with math strings set in dollars. 'x' should be a deep copy
so that the underlying document is left untouched.
Returns 'x'."""
def _dollarfy(key, value, fmt, meta): # pylint: disable=unused-argument
"""Replaces Math elements"""
if key == 'Math':
return Str('$' + value[1] + '$')
return None
return walk(x, _dollarfy, '', {}) |
def post_process(self, dir_name, d):
"""
Simple post-processing for various files other than the vasprun.xml.
Called by generate_task_doc. Modify this if your runs have other
kinds of processing requirements.
Args:
dir_name:
The dir_name.
d:
Current doc generated.
"""
logger.info("Post-processing dir:{}".format(dir_name))
fullpath = os.path.abspath(dir_name)
# VASP input generated by pymatgen's alchemy has a
# transformations.json file that keeps track of the origin of a
# particular structure. This is extremely useful for tracing back a
# result. If such a file is found, it is inserted into the task doc
# as d["transformations"]
transformations = {}
filenames = glob.glob(os.path.join(fullpath, "transformations.json*"))
if len(filenames) >= 1:
with zopen(filenames[0], "rt") as f:
transformations = json.load(f)
try:
m = re.match("(\d+)-ICSD",
transformations["history"][0]["source"])
if m:
d["icsd_id"] = int(m.group(1))
except Exception as ex:
logger.warning("Cannot parse ICSD from transformations "
"file.")
pass
else:
logger.warning("Transformations file does not exist.")
other_parameters = transformations.get("other_parameters")
new_tags = None
if other_parameters:
# We don't want to leave tags or authors in the
# transformations file because they'd be copied into
# every structure generated after this one.
new_tags = other_parameters.pop("tags", None)
new_author = other_parameters.pop("author", None)
if new_author:
d["author"] = new_author
if not other_parameters: # if dict is now empty remove it
transformations.pop("other_parameters")
d["transformations"] = transformations
# Calculations done using custodian has a custodian.json,
# which tracks the jobs performed and any errors detected and fixed.
# This is useful for tracking what has actually be done to get a
# result. If such a file is found, it is inserted into the task doc
# as d["custodian"]
filenames = glob.glob(os.path.join(fullpath, "custodian.json*"))
if len(filenames) >= 1:
with zopen(filenames[0], "rt") as f:
d["custodian"] = json.load(f)
# Parse OUTCAR for additional information and run stats that are
# generally not in vasprun.xml.
try:
run_stats = {}
for filename in glob.glob(os.path.join(fullpath, "OUTCAR*")):
outcar = Outcar(filename)
i = 1 if re.search("relax2", filename) else 0
taskname = "relax2" if re.search("relax2", filename) else \
"relax1"
d["calculations"][i]["output"]["outcar"] = outcar.as_dict()
run_stats[taskname] = outcar.run_stats
except:
logger.error("Bad OUTCAR for {}.".format(fullpath))
try:
overall_run_stats = {}
for key in ["Total CPU time used (sec)", "User time (sec)",
"System time (sec)", "Elapsed time (sec)"]:
overall_run_stats[key] = sum([v[key]
for v in run_stats.values()])
run_stats["overall"] = overall_run_stats
except:
logger.error("Bad run stats for {}.".format(fullpath))
d["run_stats"] = run_stats
#Convert to full uri path.
if self.use_full_uri:
d["dir_name"] = get_uri(dir_name)
if new_tags:
d["tags"] = new_tags
logger.info("Post-processed " + fullpath) | Simple post-processing for various files other than the vasprun.xml.
Called by generate_task_doc. Modify this if your runs have other
kinds of processing requirements.
Args:
dir_name:
The dir_name.
d:
Current doc generated. | Below is the the instruction that describes the task:
### Input:
Simple post-processing for various files other than the vasprun.xml.
Called by generate_task_doc. Modify this if your runs have other
kinds of processing requirements.
Args:
dir_name:
The dir_name.
d:
Current doc generated.
### Response:
def post_process(self, dir_name, d):
"""
Simple post-processing for various files other than the vasprun.xml.
Called by generate_task_doc. Modify this if your runs have other
kinds of processing requirements.
Args:
dir_name:
The dir_name.
d:
Current doc generated.
"""
logger.info("Post-processing dir:{}".format(dir_name))
fullpath = os.path.abspath(dir_name)
# VASP input generated by pymatgen's alchemy has a
# transformations.json file that keeps track of the origin of a
# particular structure. This is extremely useful for tracing back a
# result. If such a file is found, it is inserted into the task doc
# as d["transformations"]
transformations = {}
filenames = glob.glob(os.path.join(fullpath, "transformations.json*"))
if len(filenames) >= 1:
with zopen(filenames[0], "rt") as f:
transformations = json.load(f)
try:
m = re.match("(\d+)-ICSD",
transformations["history"][0]["source"])
if m:
d["icsd_id"] = int(m.group(1))
except Exception as ex:
logger.warning("Cannot parse ICSD from transformations "
"file.")
pass
else:
logger.warning("Transformations file does not exist.")
other_parameters = transformations.get("other_parameters")
new_tags = None
if other_parameters:
# We don't want to leave tags or authors in the
# transformations file because they'd be copied into
# every structure generated after this one.
new_tags = other_parameters.pop("tags", None)
new_author = other_parameters.pop("author", None)
if new_author:
d["author"] = new_author
if not other_parameters: # if dict is now empty remove it
transformations.pop("other_parameters")
d["transformations"] = transformations
# Calculations done using custodian has a custodian.json,
# which tracks the jobs performed and any errors detected and fixed.
# This is useful for tracking what has actually be done to get a
# result. If such a file is found, it is inserted into the task doc
# as d["custodian"]
filenames = glob.glob(os.path.join(fullpath, "custodian.json*"))
if len(filenames) >= 1:
with zopen(filenames[0], "rt") as f:
d["custodian"] = json.load(f)
# Parse OUTCAR for additional information and run stats that are
# generally not in vasprun.xml.
try:
run_stats = {}
for filename in glob.glob(os.path.join(fullpath, "OUTCAR*")):
outcar = Outcar(filename)
i = 1 if re.search("relax2", filename) else 0
taskname = "relax2" if re.search("relax2", filename) else \
"relax1"
d["calculations"][i]["output"]["outcar"] = outcar.as_dict()
run_stats[taskname] = outcar.run_stats
except:
logger.error("Bad OUTCAR for {}.".format(fullpath))
try:
overall_run_stats = {}
for key in ["Total CPU time used (sec)", "User time (sec)",
"System time (sec)", "Elapsed time (sec)"]:
overall_run_stats[key] = sum([v[key]
for v in run_stats.values()])
run_stats["overall"] = overall_run_stats
except:
logger.error("Bad run stats for {}.".format(fullpath))
d["run_stats"] = run_stats
#Convert to full uri path.
if self.use_full_uri:
d["dir_name"] = get_uri(dir_name)
if new_tags:
d["tags"] = new_tags
logger.info("Post-processed " + fullpath) |
def remove_interface_router(self, router, body=None):
"""Removes an internal network interface from the specified router."""
return self.put((self.router_path % router) +
"/remove_router_interface", body=body) | Removes an internal network interface from the specified router. | Below is the the instruction that describes the task:
### Input:
Removes an internal network interface from the specified router.
### Response:
def remove_interface_router(self, router, body=None):
"""Removes an internal network interface from the specified router."""
return self.put((self.router_path % router) +
"/remove_router_interface", body=body) |
def unescape(s, quote=False):
"""
The opposite of the cgi.escape function.
Replace escaped characters '&', '<' and '>' with the corresponding
regular characters. If the optional flag quote is true, the escaped quotation
mark character ('"') is also translated.
"""
s = s.replace('<', '<')
s = s.replace('>', '>')
if quote:
s = s.replace('"', '"')
s = s.replace('&', '&')
return s | The opposite of the cgi.escape function.
Replace escaped characters '&', '<' and '>' with the corresponding
regular characters. If the optional flag quote is true, the escaped quotation
mark character ('"') is also translated. | Below is the the instruction that describes the task:
### Input:
The opposite of the cgi.escape function.
Replace escaped characters '&', '<' and '>' with the corresponding
regular characters. If the optional flag quote is true, the escaped quotation
mark character ('"') is also translated.
### Response:
def unescape(s, quote=False):
"""
The opposite of the cgi.escape function.
Replace escaped characters '&', '<' and '>' with the corresponding
regular characters. If the optional flag quote is true, the escaped quotation
mark character ('"') is also translated.
"""
s = s.replace('<', '<')
s = s.replace('>', '>')
if quote:
s = s.replace('"', '"')
s = s.replace('&', '&')
return s |
def _create_endpoints(self):
"""Create all api endpoints using self.endpoint and partial from functools"""
for k, v in self.endpoints.items():
_repr = '%s.%s' % (self.__class__.__name__, k)
self.__dict__[k] = EndPointPartial(self._make_request, v, _repr) | Create all api endpoints using self.endpoint and partial from functools | Below is the the instruction that describes the task:
### Input:
Create all api endpoints using self.endpoint and partial from functools
### Response:
def _create_endpoints(self):
"""Create all api endpoints using self.endpoint and partial from functools"""
for k, v in self.endpoints.items():
_repr = '%s.%s' % (self.__class__.__name__, k)
self.__dict__[k] = EndPointPartial(self._make_request, v, _repr) |
def _handle_exclude(self, state, sls, saltenv, errors):
'''
Take the exclude dec out of the state and apply it to the highstate
global dec
'''
if 'exclude' in state:
exc = state.pop('exclude')
if not isinstance(exc, list):
err = ('Exclude Declaration in SLS {0} is not formed '
'as a list'.format(sls))
errors.append(err)
state.setdefault('__exclude__', []).extend(exc) | Take the exclude dec out of the state and apply it to the highstate
global dec | Below is the the instruction that describes the task:
### Input:
Take the exclude dec out of the state and apply it to the highstate
global dec
### Response:
def _handle_exclude(self, state, sls, saltenv, errors):
'''
Take the exclude dec out of the state and apply it to the highstate
global dec
'''
if 'exclude' in state:
exc = state.pop('exclude')
if not isinstance(exc, list):
err = ('Exclude Declaration in SLS {0} is not formed '
'as a list'.format(sls))
errors.append(err)
state.setdefault('__exclude__', []).extend(exc) |
def log_start(task, logger="TaskLogger"):
"""Begin logging of a task
Convenience function to log a task in the default
TaskLogger
Parameters
----------
task : str
Name of the task to be started
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger
"""
tasklogger = get_tasklogger(logger)
tasklogger.start_task(task)
return tasklogger | Begin logging of a task
Convenience function to log a task in the default
TaskLogger
Parameters
----------
task : str
Name of the task to be started
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger | Below is the the instruction that describes the task:
### Input:
Begin logging of a task
Convenience function to log a task in the default
TaskLogger
Parameters
----------
task : str
Name of the task to be started
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger
### Response:
def log_start(task, logger="TaskLogger"):
"""Begin logging of a task
Convenience function to log a task in the default
TaskLogger
Parameters
----------
task : str
Name of the task to be started
logger : str, optional (default: "TaskLogger")
Unique name of the logger to retrieve
Returns
-------
logger : TaskLogger
"""
tasklogger = get_tasklogger(logger)
tasklogger.start_task(task)
return tasklogger |
def query(self,sql):
"""
Execute an SQL query on the server and fetch the resulting XML file
back.
@return: message received (may be empty) from LDBD Server as a string
"""
msg = "QUERY\0" + sql + "\0"
self.sfile.write(msg)
ret, output = self.__response__()
reply = str(output[0])
if ret:
msg = "Error executing query on server %d:%s" % (ret, reply)
raise LDBDClientException, msg
return reply | Execute an SQL query on the server and fetch the resulting XML file
back.
@return: message received (may be empty) from LDBD Server as a string | Below is the the instruction that describes the task:
### Input:
Execute an SQL query on the server and fetch the resulting XML file
back.
@return: message received (may be empty) from LDBD Server as a string
### Response:
def query(self,sql):
"""
Execute an SQL query on the server and fetch the resulting XML file
back.
@return: message received (may be empty) from LDBD Server as a string
"""
msg = "QUERY\0" + sql + "\0"
self.sfile.write(msg)
ret, output = self.__response__()
reply = str(output[0])
if ret:
msg = "Error executing query on server %d:%s" % (ret, reply)
raise LDBDClientException, msg
return reply |
def bug_suggestions_line(err, term_cache=None):
"""
Get Bug suggestions for a given TextLogError (err).
Tries to extract a search term from a clean version of the given error's
line. We build a search term from the cleaned line and use that to search
for bugs. Returns a dictionary with the cleaned line, the generated search
term, and any bugs found with said search term.
"""
if term_cache is None:
term_cache = {}
# remove the mozharness prefix
clean_line = get_mozharness_substring(err.line)
# get a meaningful search term out of the error line
search_term = get_error_search_term(clean_line)
bugs = dict(open_recent=[], all_others=[])
# collect open recent and all other bugs suggestions
search_terms = []
if search_term:
search_terms.append(search_term)
if search_term not in term_cache:
term_cache[search_term] = Bugscache.search(search_term)
bugs = term_cache[search_term]
if not bugs or not (bugs['open_recent'] or
bugs['all_others']):
# no suggestions, try to use
# the crash signature as search term
crash_signature = get_crash_signature(clean_line)
if crash_signature:
search_terms.append(crash_signature)
if crash_signature not in term_cache:
term_cache[crash_signature] = Bugscache.search(crash_signature)
bugs = term_cache[crash_signature]
# TODO: Rename 'search' to 'error_text' or similar, since that's
# closer to what it actually represents (bug 1091060).
return {
"search": clean_line,
"search_terms": search_terms,
"bugs": bugs,
} | Get Bug suggestions for a given TextLogError (err).
Tries to extract a search term from a clean version of the given error's
line. We build a search term from the cleaned line and use that to search
for bugs. Returns a dictionary with the cleaned line, the generated search
term, and any bugs found with said search term. | Below is the the instruction that describes the task:
### Input:
Get Bug suggestions for a given TextLogError (err).
Tries to extract a search term from a clean version of the given error's
line. We build a search term from the cleaned line and use that to search
for bugs. Returns a dictionary with the cleaned line, the generated search
term, and any bugs found with said search term.
### Response:
def bug_suggestions_line(err, term_cache=None):
"""
Get Bug suggestions for a given TextLogError (err).
Tries to extract a search term from a clean version of the given error's
line. We build a search term from the cleaned line and use that to search
for bugs. Returns a dictionary with the cleaned line, the generated search
term, and any bugs found with said search term.
"""
if term_cache is None:
term_cache = {}
# remove the mozharness prefix
clean_line = get_mozharness_substring(err.line)
# get a meaningful search term out of the error line
search_term = get_error_search_term(clean_line)
bugs = dict(open_recent=[], all_others=[])
# collect open recent and all other bugs suggestions
search_terms = []
if search_term:
search_terms.append(search_term)
if search_term not in term_cache:
term_cache[search_term] = Bugscache.search(search_term)
bugs = term_cache[search_term]
if not bugs or not (bugs['open_recent'] or
bugs['all_others']):
# no suggestions, try to use
# the crash signature as search term
crash_signature = get_crash_signature(clean_line)
if crash_signature:
search_terms.append(crash_signature)
if crash_signature not in term_cache:
term_cache[crash_signature] = Bugscache.search(crash_signature)
bugs = term_cache[crash_signature]
# TODO: Rename 'search' to 'error_text' or similar, since that's
# closer to what it actually represents (bug 1091060).
return {
"search": clean_line,
"search_terms": search_terms,
"bugs": bugs,
} |
def set_style(key):
"""
Select a style by name, e.g. set_style('default'). To revert to the
previous style use the key 'unset' or False.
"""
if key is None:
return
elif not key or key in ['unset', 'backup']:
if 'backup' in styles:
plt.rcParams.update(styles['backup'])
else:
raise Exception('No style backed up to restore')
elif key not in styles:
raise KeyError('%r not in available styles.')
else:
path = os.path.join(os.path.dirname(__file__), styles[key])
new_style = rc_params_from_file(path, use_default_template=False)
styles['backup'] = dict(plt.rcParams)
plt.rcParams.update(new_style) | Select a style by name, e.g. set_style('default'). To revert to the
previous style use the key 'unset' or False. | Below is the the instruction that describes the task:
### Input:
Select a style by name, e.g. set_style('default'). To revert to the
previous style use the key 'unset' or False.
### Response:
def set_style(key):
"""
Select a style by name, e.g. set_style('default'). To revert to the
previous style use the key 'unset' or False.
"""
if key is None:
return
elif not key or key in ['unset', 'backup']:
if 'backup' in styles:
plt.rcParams.update(styles['backup'])
else:
raise Exception('No style backed up to restore')
elif key not in styles:
raise KeyError('%r not in available styles.')
else:
path = os.path.join(os.path.dirname(__file__), styles[key])
new_style = rc_params_from_file(path, use_default_template=False)
styles['backup'] = dict(plt.rcParams)
plt.rcParams.update(new_style) |
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = super(StandardReport, self).error(line_number, offset,
text, check)
if code and (self.counters[code] == 1 or self._repeat):
self._deferred_print.append(
(line_number, offset, code, text[5:], check.__doc__))
return code | Report an error, according to options. | Below is the the instruction that describes the task:
### Input:
Report an error, according to options.
### Response:
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = super(StandardReport, self).error(line_number, offset,
text, check)
if code and (self.counters[code] == 1 or self._repeat):
self._deferred_print.append(
(line_number, offset, code, text[5:], check.__doc__))
return code |
def mask_and_mean_loss(input_tensor, binary_tensor, axis=None):
"""
Mask a loss by using a tensor filled with 0 or 1 and average correctly.
:param input_tensor: A float tensor of shape [batch_size, ...] representing the loss/cross_entropy
:param binary_tensor: A float tensor of shape [batch_size, ...] representing the mask.
:return: A float tensor of shape [batch_size, ...] representing the masked loss.
:param axis: The dimensions to reduce. If None (the default), reduces all dimensions.
Must be in the range [-rank(input_tensor), rank(input_tensor)).
"""
return mean_on_masked(mask_loss(input_tensor, binary_tensor), binary_tensor, axis=axis) | Mask a loss by using a tensor filled with 0 or 1 and average correctly.
:param input_tensor: A float tensor of shape [batch_size, ...] representing the loss/cross_entropy
:param binary_tensor: A float tensor of shape [batch_size, ...] representing the mask.
:return: A float tensor of shape [batch_size, ...] representing the masked loss.
:param axis: The dimensions to reduce. If None (the default), reduces all dimensions.
Must be in the range [-rank(input_tensor), rank(input_tensor)). | Below is the the instruction that describes the task:
### Input:
Mask a loss by using a tensor filled with 0 or 1 and average correctly.
:param input_tensor: A float tensor of shape [batch_size, ...] representing the loss/cross_entropy
:param binary_tensor: A float tensor of shape [batch_size, ...] representing the mask.
:return: A float tensor of shape [batch_size, ...] representing the masked loss.
:param axis: The dimensions to reduce. If None (the default), reduces all dimensions.
Must be in the range [-rank(input_tensor), rank(input_tensor)).
### Response:
def mask_and_mean_loss(input_tensor, binary_tensor, axis=None):
"""
Mask a loss by using a tensor filled with 0 or 1 and average correctly.
:param input_tensor: A float tensor of shape [batch_size, ...] representing the loss/cross_entropy
:param binary_tensor: A float tensor of shape [batch_size, ...] representing the mask.
:return: A float tensor of shape [batch_size, ...] representing the masked loss.
:param axis: The dimensions to reduce. If None (the default), reduces all dimensions.
Must be in the range [-rank(input_tensor), rank(input_tensor)).
"""
return mean_on_masked(mask_loss(input_tensor, binary_tensor), binary_tensor, axis=axis) |
def main():
"""
Loads the config and handles the workers.
"""
config = Config()
pipes_dir = config.get('pipes', 'directory')
pipes_config = config.get('pipes', 'config_file')
pipes_config_path = os.path.join(config.config_dir, pipes_config)
if not os.path.exists(pipes_config_path):
print_error("Please configure the named pipes first")
return
workers = create_pipe_workers(pipes_config_path, pipes_dir)
if workers:
for worker in workers:
worker.start()
try:
for worker in workers:
worker.join()
except KeyboardInterrupt:
print_notification("Shutting down")
for worker in workers:
worker.terminate()
worker.join() | Loads the config and handles the workers. | Below is the the instruction that describes the task:
### Input:
Loads the config and handles the workers.
### Response:
def main():
"""
Loads the config and handles the workers.
"""
config = Config()
pipes_dir = config.get('pipes', 'directory')
pipes_config = config.get('pipes', 'config_file')
pipes_config_path = os.path.join(config.config_dir, pipes_config)
if not os.path.exists(pipes_config_path):
print_error("Please configure the named pipes first")
return
workers = create_pipe_workers(pipes_config_path, pipes_dir)
if workers:
for worker in workers:
worker.start()
try:
for worker in workers:
worker.join()
except KeyboardInterrupt:
print_notification("Shutting down")
for worker in workers:
worker.terminate()
worker.join() |
def to_json_format(conf):
'''Convert fields of a python dictionary to be dumped in json format'''
if 'fmode' in conf:
conf['fmode'] = oct(conf['fmode'])[-3:]
if 'dmode' in conf:
conf['dmode'] = oct(conf['dmode'])[-3:] | Convert fields of a python dictionary to be dumped in json format | Below is the the instruction that describes the task:
### Input:
Convert fields of a python dictionary to be dumped in json format
### Response:
def to_json_format(conf):
'''Convert fields of a python dictionary to be dumped in json format'''
if 'fmode' in conf:
conf['fmode'] = oct(conf['fmode'])[-3:]
if 'dmode' in conf:
conf['dmode'] = oct(conf['dmode'])[-3:] |
def _make_blocks(records): # @NoSelf
'''
Organizes the physical records into blocks in a list by
placing consecutive physical records into a single block, so
lesser VXRs will be created.
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Parameters:
records: list
A list of records that there is data for
Returns:
sparse_blocks: list of list
A list of ranges we have physical values for.
Example:
Input: [1,2,3,4,10,11,12,13,50,51,52,53]
Output: [[1,4],[10,13],[50,53]]
'''
sparse_blocks = []
total = len(records)
if (total == 0):
return []
x = 0
while (x < total):
recstart = records[x]
y = x
recnum = recstart
# Find the location in the records before the next gap
# Call this value "y"
while ((y+1) < total):
y = y + 1
nextnum = records[y]
diff = nextnum - recnum
if (diff == 1):
recnum = nextnum
else:
y = y - 1
break
# Put the values of the records into "ablock", append to sparse_blocks
ablock = []
ablock.append(recstart)
if ((y+1) == total):
recend = records[total-1]
else:
recend = records[y]
x = y + 1
ablock.append(recend)
sparse_blocks.append(ablock)
return sparse_blocks | Organizes the physical records into blocks in a list by
placing consecutive physical records into a single block, so
lesser VXRs will be created.
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Parameters:
records: list
A list of records that there is data for
Returns:
sparse_blocks: list of list
A list of ranges we have physical values for.
Example:
Input: [1,2,3,4,10,11,12,13,50,51,52,53]
Output: [[1,4],[10,13],[50,53]] | Below is the the instruction that describes the task:
### Input:
Organizes the physical records into blocks in a list by
placing consecutive physical records into a single block, so
lesser VXRs will be created.
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Parameters:
records: list
A list of records that there is data for
Returns:
sparse_blocks: list of list
A list of ranges we have physical values for.
Example:
Input: [1,2,3,4,10,11,12,13,50,51,52,53]
Output: [[1,4],[10,13],[50,53]]
### Response:
def _make_blocks(records): # @NoSelf
'''
Organizes the physical records into blocks in a list by
placing consecutive physical records into a single block, so
lesser VXRs will be created.
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Parameters:
records: list
A list of records that there is data for
Returns:
sparse_blocks: list of list
A list of ranges we have physical values for.
Example:
Input: [1,2,3,4,10,11,12,13,50,51,52,53]
Output: [[1,4],[10,13],[50,53]]
'''
sparse_blocks = []
total = len(records)
if (total == 0):
return []
x = 0
while (x < total):
recstart = records[x]
y = x
recnum = recstart
# Find the location in the records before the next gap
# Call this value "y"
while ((y+1) < total):
y = y + 1
nextnum = records[y]
diff = nextnum - recnum
if (diff == 1):
recnum = nextnum
else:
y = y - 1
break
# Put the values of the records into "ablock", append to sparse_blocks
ablock = []
ablock.append(recstart)
if ((y+1) == total):
recend = records[total-1]
else:
recend = records[y]
x = y + 1
ablock.append(recend)
sparse_blocks.append(ablock)
return sparse_blocks |
def dpi(self):
"""
A (horz_dpi, vert_dpi) 2-tuple specifying the dots-per-inch
resolution of this image. A default value of (72, 72) is used if the
dpi is not specified in the image file.
"""
def int_dpi(dpi):
"""
Return an integer dots-per-inch value corresponding to *dpi*. If
*dpi* is |None|, a non-numeric type, less than 1 or greater than
2048, 72 is returned.
"""
try:
int_dpi = int(round(float(dpi)))
if int_dpi < 1 or int_dpi > 2048:
int_dpi = 72
except (TypeError, ValueError):
int_dpi = 72
return int_dpi
def normalize_pil_dpi(pil_dpi):
"""
Return a (horz_dpi, vert_dpi) 2-tuple corresponding to *pil_dpi*,
the value for the 'dpi' key in the ``info`` dict of a PIL image.
If the 'dpi' key is not present or contains an invalid value,
``(72, 72)`` is returned.
"""
if isinstance(pil_dpi, tuple):
return (int_dpi(pil_dpi[0]), int_dpi(pil_dpi[1]))
return (72, 72)
return normalize_pil_dpi(self._pil_props[2]) | A (horz_dpi, vert_dpi) 2-tuple specifying the dots-per-inch
resolution of this image. A default value of (72, 72) is used if the
dpi is not specified in the image file. | Below is the the instruction that describes the task:
### Input:
A (horz_dpi, vert_dpi) 2-tuple specifying the dots-per-inch
resolution of this image. A default value of (72, 72) is used if the
dpi is not specified in the image file.
### Response:
def dpi(self):
"""
A (horz_dpi, vert_dpi) 2-tuple specifying the dots-per-inch
resolution of this image. A default value of (72, 72) is used if the
dpi is not specified in the image file.
"""
def int_dpi(dpi):
"""
Return an integer dots-per-inch value corresponding to *dpi*. If
*dpi* is |None|, a non-numeric type, less than 1 or greater than
2048, 72 is returned.
"""
try:
int_dpi = int(round(float(dpi)))
if int_dpi < 1 or int_dpi > 2048:
int_dpi = 72
except (TypeError, ValueError):
int_dpi = 72
return int_dpi
def normalize_pil_dpi(pil_dpi):
"""
Return a (horz_dpi, vert_dpi) 2-tuple corresponding to *pil_dpi*,
the value for the 'dpi' key in the ``info`` dict of a PIL image.
If the 'dpi' key is not present or contains an invalid value,
``(72, 72)`` is returned.
"""
if isinstance(pil_dpi, tuple):
return (int_dpi(pil_dpi[0]), int_dpi(pil_dpi[1]))
return (72, 72)
return normalize_pil_dpi(self._pil_props[2]) |
def store_sample(self, sample_bytes, filename, type_tag):
"""Store a sample into the datastore.
Args:
filename: Name of the file.
sample_bytes: Actual bytes of sample.
type_tag: Type of sample ('exe','pcap','pdf','json','swf', or ...)
Returns:
md5 digest of the sample.
"""
# Temp sanity check for old clients
if len(filename) > 1000:
print 'switched bytes/filename... %s %s' % (sample_bytes[:100], filename[:100])
exit(1)
sample_info = {}
# Compute the MD5 hash
sample_info['md5'] = hashlib.md5(sample_bytes).hexdigest()
# Check if sample already exists
if self.has_sample(sample_info['md5']):
return sample_info['md5']
# Run the periodic operations
self.periodic_ops()
# Check if we need to expire anything
self.expire_data()
# Okay start populating the sample for adding to the data store
# Filename, length, import time and type_tag
sample_info['filename'] = filename
sample_info['length'] = len(sample_bytes)
sample_info['import_time'] = datetime.datetime.utcnow()
sample_info['type_tag'] = type_tag
# Random customer for now
import random
sample_info['customer'] = random.choice(['Mega Corp', 'Huge Inc', 'BearTron', 'Dorseys Mom'])
# Push the file into the MongoDB GridFS
sample_info['__grid_fs'] = self.gridfs_handle.put(sample_bytes)
self.database[self.sample_collection].insert(sample_info)
# Print info
print 'Sample Storage: %.2f out of %.2f MB' % (self.sample_storage_size(), self.samples_cap)
# Return the sample md5
return sample_info['md5'] | Store a sample into the datastore.
Args:
filename: Name of the file.
sample_bytes: Actual bytes of sample.
type_tag: Type of sample ('exe','pcap','pdf','json','swf', or ...)
Returns:
md5 digest of the sample. | Below is the the instruction that describes the task:
### Input:
Store a sample into the datastore.
Args:
filename: Name of the file.
sample_bytes: Actual bytes of sample.
type_tag: Type of sample ('exe','pcap','pdf','json','swf', or ...)
Returns:
md5 digest of the sample.
### Response:
def store_sample(self, sample_bytes, filename, type_tag):
"""Store a sample into the datastore.
Args:
filename: Name of the file.
sample_bytes: Actual bytes of sample.
type_tag: Type of sample ('exe','pcap','pdf','json','swf', or ...)
Returns:
md5 digest of the sample.
"""
# Temp sanity check for old clients
if len(filename) > 1000:
print 'switched bytes/filename... %s %s' % (sample_bytes[:100], filename[:100])
exit(1)
sample_info = {}
# Compute the MD5 hash
sample_info['md5'] = hashlib.md5(sample_bytes).hexdigest()
# Check if sample already exists
if self.has_sample(sample_info['md5']):
return sample_info['md5']
# Run the periodic operations
self.periodic_ops()
# Check if we need to expire anything
self.expire_data()
# Okay start populating the sample for adding to the data store
# Filename, length, import time and type_tag
sample_info['filename'] = filename
sample_info['length'] = len(sample_bytes)
sample_info['import_time'] = datetime.datetime.utcnow()
sample_info['type_tag'] = type_tag
# Random customer for now
import random
sample_info['customer'] = random.choice(['Mega Corp', 'Huge Inc', 'BearTron', 'Dorseys Mom'])
# Push the file into the MongoDB GridFS
sample_info['__grid_fs'] = self.gridfs_handle.put(sample_bytes)
self.database[self.sample_collection].insert(sample_info)
# Print info
print 'Sample Storage: %.2f out of %.2f MB' % (self.sample_storage_size(), self.samples_cap)
# Return the sample md5
return sample_info['md5'] |
def Lastovka_solid_integral(T, similarity_variable):
r'''Integrates solid constant-pressure heat capacitiy with the similarity
variable concept and method as shown in [1]_.
Uses a explicit form as derived with Sympy.
Parameters
----------
T : float
Temperature of solid [K]
similarity_variable : float
similarity variable as defined in [1]_, [mol/g]
Returns
-------
H : float
Difference in enthalpy from 0 K, [J/kg]
Notes
-----
Original model is in terms of J/g/K. Note that the model is for predicting
mass heat capacity, not molar heat capacity like most other methods!
See Also
--------
Lastovka_solid
Examples
--------
>>> Lastovka_solid_integral(300, 0.2139)
283246.1242170376
References
----------
.. [1] Laštovka, Václav, Michal Fulem, Mildred Becerra, and John M. Shaw.
"A Similarity Variable for Estimating the Heat Capacity of Solid Organic
Compounds: Part II. Application: Heat Capacity Calculation for
Ill-Defined Organic Solids." Fluid Phase Equilibria 268, no. 1-2
(June 25, 2008): 134-41. doi:10.1016/j.fluid.2008.03.018.
'''
A1 = 0.013183
A2 = 0.249381
theta = 151.8675
C1 = 0.026526
C2 = -0.024942
D1 = 0.000025
D2 = -0.000123
similarity_variable2 = similarity_variable*similarity_variable
return (T*T*T*(1000.*D1*similarity_variable/3.
+ 1000.*D2*similarity_variable2/3.) + T*T*(500.*C1*similarity_variable
+ 500.*C2*similarity_variable2)
+ (3000.*A1*R*similarity_variable*theta
+ 3000.*A2*R*similarity_variable2*theta)/(exp(theta/T) - 1.)) | r'''Integrates solid constant-pressure heat capacitiy with the similarity
variable concept and method as shown in [1]_.
Uses a explicit form as derived with Sympy.
Parameters
----------
T : float
Temperature of solid [K]
similarity_variable : float
similarity variable as defined in [1]_, [mol/g]
Returns
-------
H : float
Difference in enthalpy from 0 K, [J/kg]
Notes
-----
Original model is in terms of J/g/K. Note that the model is for predicting
mass heat capacity, not molar heat capacity like most other methods!
See Also
--------
Lastovka_solid
Examples
--------
>>> Lastovka_solid_integral(300, 0.2139)
283246.1242170376
References
----------
.. [1] Laštovka, Václav, Michal Fulem, Mildred Becerra, and John M. Shaw.
"A Similarity Variable for Estimating the Heat Capacity of Solid Organic
Compounds: Part II. Application: Heat Capacity Calculation for
Ill-Defined Organic Solids." Fluid Phase Equilibria 268, no. 1-2
(June 25, 2008): 134-41. doi:10.1016/j.fluid.2008.03.018. | Below is the the instruction that describes the task:
### Input:
r'''Integrates solid constant-pressure heat capacitiy with the similarity
variable concept and method as shown in [1]_.
Uses a explicit form as derived with Sympy.
Parameters
----------
T : float
Temperature of solid [K]
similarity_variable : float
similarity variable as defined in [1]_, [mol/g]
Returns
-------
H : float
Difference in enthalpy from 0 K, [J/kg]
Notes
-----
Original model is in terms of J/g/K. Note that the model is for predicting
mass heat capacity, not molar heat capacity like most other methods!
See Also
--------
Lastovka_solid
Examples
--------
>>> Lastovka_solid_integral(300, 0.2139)
283246.1242170376
References
----------
.. [1] Laštovka, Václav, Michal Fulem, Mildred Becerra, and John M. Shaw.
"A Similarity Variable for Estimating the Heat Capacity of Solid Organic
Compounds: Part II. Application: Heat Capacity Calculation for
Ill-Defined Organic Solids." Fluid Phase Equilibria 268, no. 1-2
(June 25, 2008): 134-41. doi:10.1016/j.fluid.2008.03.018.
### Response:
def Lastovka_solid_integral(T, similarity_variable):
r'''Integrates solid constant-pressure heat capacitiy with the similarity
variable concept and method as shown in [1]_.
Uses a explicit form as derived with Sympy.
Parameters
----------
T : float
Temperature of solid [K]
similarity_variable : float
similarity variable as defined in [1]_, [mol/g]
Returns
-------
H : float
Difference in enthalpy from 0 K, [J/kg]
Notes
-----
Original model is in terms of J/g/K. Note that the model is for predicting
mass heat capacity, not molar heat capacity like most other methods!
See Also
--------
Lastovka_solid
Examples
--------
>>> Lastovka_solid_integral(300, 0.2139)
283246.1242170376
References
----------
.. [1] Laštovka, Václav, Michal Fulem, Mildred Becerra, and John M. Shaw.
"A Similarity Variable for Estimating the Heat Capacity of Solid Organic
Compounds: Part II. Application: Heat Capacity Calculation for
Ill-Defined Organic Solids." Fluid Phase Equilibria 268, no. 1-2
(June 25, 2008): 134-41. doi:10.1016/j.fluid.2008.03.018.
'''
A1 = 0.013183
A2 = 0.249381
theta = 151.8675
C1 = 0.026526
C2 = -0.024942
D1 = 0.000025
D2 = -0.000123
similarity_variable2 = similarity_variable*similarity_variable
return (T*T*T*(1000.*D1*similarity_variable/3.
+ 1000.*D2*similarity_variable2/3.) + T*T*(500.*C1*similarity_variable
+ 500.*C2*similarity_variable2)
+ (3000.*A1*R*similarity_variable*theta
+ 3000.*A2*R*similarity_variable2*theta)/(exp(theta/T) - 1.)) |
def merge_pex_path(self, pex_path):
"""Merges a new PEX_PATH definition into the existing one (if any).
:param string pex_path: The PEX_PATH to merge.
"""
if not pex_path:
return
self.pex_path = ':'.join(merge_split(self.pex_path, pex_path)) | Merges a new PEX_PATH definition into the existing one (if any).
:param string pex_path: The PEX_PATH to merge. | Below is the the instruction that describes the task:
### Input:
Merges a new PEX_PATH definition into the existing one (if any).
:param string pex_path: The PEX_PATH to merge.
### Response:
def merge_pex_path(self, pex_path):
"""Merges a new PEX_PATH definition into the existing one (if any).
:param string pex_path: The PEX_PATH to merge.
"""
if not pex_path:
return
self.pex_path = ':'.join(merge_split(self.pex_path, pex_path)) |
def Uniform(low, high, tag=None):
"""
A Uniform random variate
Parameters
----------
low : scalar
Lower bound of the distribution support.
high : scalar
Upper bound of the distribution support.
"""
assert low < high, 'Uniform "low" must be less than "high"'
return uv(ss.uniform(loc=low, scale=high - low), tag=tag) | A Uniform random variate
Parameters
----------
low : scalar
Lower bound of the distribution support.
high : scalar
Upper bound of the distribution support. | Below is the the instruction that describes the task:
### Input:
A Uniform random variate
Parameters
----------
low : scalar
Lower bound of the distribution support.
high : scalar
Upper bound of the distribution support.
### Response:
def Uniform(low, high, tag=None):
"""
A Uniform random variate
Parameters
----------
low : scalar
Lower bound of the distribution support.
high : scalar
Upper bound of the distribution support.
"""
assert low < high, 'Uniform "low" must be less than "high"'
return uv(ss.uniform(loc=low, scale=high - low), tag=tag) |
def init_flatpak():
"""
If we are in Flatpak, we must build a tessdata/ directory using the
.traineddata files from each locale directory
"""
tessdata_files = glob.glob("/app/share/locale/*/*.traineddata")
if len(tessdata_files) <= 0:
return os.path.exists("/app")
localdir = os.path.expanduser("~/.local")
base_data_dir = os.getenv(
"XDG_DATA_HOME",
os.path.join(localdir, "share")
)
tessdatadir = os.path.join(base_data_dir, "paperwork", "tessdata")
logger.info("Assuming we are running in Flatpak."
" Building tessdata directory {} ...".format(tessdatadir))
util.rm_rf(tessdatadir)
util.mkdir_p(tessdatadir)
os.symlink("/app/share/tessdata/eng.traineddata",
os.path.join(tessdatadir, "eng.traineddata"))
os.symlink("/app/share/tessdata/osd.traineddata",
os.path.join(tessdatadir, "osd.traineddata"))
os.symlink("/app/share/tessdata/configs",
os.path.join(tessdatadir, "configs"))
os.symlink("/app/share/tessdata/tessconfigs",
os.path.join(tessdatadir, "tessconfigs"))
for tessdata in tessdata_files:
logger.info("{} found".format(tessdata))
os.symlink(tessdata, os.path.join(tessdatadir,
os.path.basename(tessdata)))
os.environ['TESSDATA_PREFIX'] = os.path.dirname(tessdatadir)
logger.info("Tessdata directory ready")
return True | If we are in Flatpak, we must build a tessdata/ directory using the
.traineddata files from each locale directory | Below is the the instruction that describes the task:
### Input:
If we are in Flatpak, we must build a tessdata/ directory using the
.traineddata files from each locale directory
### Response:
def init_flatpak():
"""
If we are in Flatpak, we must build a tessdata/ directory using the
.traineddata files from each locale directory
"""
tessdata_files = glob.glob("/app/share/locale/*/*.traineddata")
if len(tessdata_files) <= 0:
return os.path.exists("/app")
localdir = os.path.expanduser("~/.local")
base_data_dir = os.getenv(
"XDG_DATA_HOME",
os.path.join(localdir, "share")
)
tessdatadir = os.path.join(base_data_dir, "paperwork", "tessdata")
logger.info("Assuming we are running in Flatpak."
" Building tessdata directory {} ...".format(tessdatadir))
util.rm_rf(tessdatadir)
util.mkdir_p(tessdatadir)
os.symlink("/app/share/tessdata/eng.traineddata",
os.path.join(tessdatadir, "eng.traineddata"))
os.symlink("/app/share/tessdata/osd.traineddata",
os.path.join(tessdatadir, "osd.traineddata"))
os.symlink("/app/share/tessdata/configs",
os.path.join(tessdatadir, "configs"))
os.symlink("/app/share/tessdata/tessconfigs",
os.path.join(tessdatadir, "tessconfigs"))
for tessdata in tessdata_files:
logger.info("{} found".format(tessdata))
os.symlink(tessdata, os.path.join(tessdatadir,
os.path.basename(tessdata)))
os.environ['TESSDATA_PREFIX'] = os.path.dirname(tessdatadir)
logger.info("Tessdata directory ready")
return True |
def write_file(self, filename):
"""Write the coverage data to `filename`."""
# Create the file data.
data = {}
data['lines'] = self.line_data()
arcs = self.arc_data()
if arcs:
data['arcs'] = arcs
if self.collector:
data['collector'] = self.collector
if self.debug and self.debug.should('dataio'):
self.debug.write("Writing data to %r" % (filename,))
# Write the pickle to the file.
fdata = open(filename, 'wb')
try:
pickle.dump(data, fdata, 2)
finally:
fdata.close() | Write the coverage data to `filename`. | Below is the the instruction that describes the task:
### Input:
Write the coverage data to `filename`.
### Response:
def write_file(self, filename):
"""Write the coverage data to `filename`."""
# Create the file data.
data = {}
data['lines'] = self.line_data()
arcs = self.arc_data()
if arcs:
data['arcs'] = arcs
if self.collector:
data['collector'] = self.collector
if self.debug and self.debug.should('dataio'):
self.debug.write("Writing data to %r" % (filename,))
# Write the pickle to the file.
fdata = open(filename, 'wb')
try:
pickle.dump(data, fdata, 2)
finally:
fdata.close() |
def drop_privileges():
"""
Set settings.DROPLET_USER UID for the current process
After calling this, root operation will be impossible to execute
See root context manager
"""
uid = int(pwd.getpwnam(settings.DROPLET_USER).pw_uid)
os.setuid(uid) | Set settings.DROPLET_USER UID for the current process
After calling this, root operation will be impossible to execute
See root context manager | Below is the the instruction that describes the task:
### Input:
Set settings.DROPLET_USER UID for the current process
After calling this, root operation will be impossible to execute
See root context manager
### Response:
def drop_privileges():
"""
Set settings.DROPLET_USER UID for the current process
After calling this, root operation will be impossible to execute
See root context manager
"""
uid = int(pwd.getpwnam(settings.DROPLET_USER).pw_uid)
os.setuid(uid) |
def TAPQuery(RAdeg=180.0, DECdeg=0.0, width=1, height=1):
"""Do a query of the CADC Megacam table. Get all observations insize the box. Returns a file-like object"""
QUERY =( """ SELECT """
""" COORD1(CENTROID(Plane.position_bounds)) AS "RAJ2000", COORD2(CENTROID(Plane.position_bounds)) AS "DEJ2000", Plane.time_bounds_lower as "MJDATE" """
""" FROM """
""" caom2.Observation as o JOIN caom2.Plane as Plane on o.obsID=Plane.obsID """
""" WHERE """
""" o.collection = 'CFHT' """
""" AND o.instrument_name = 'MegaPrime' """
""" AND INTERSECTS( BOX('ICRS', {}, {}, {}, {}), Plane.position_bounds ) = 1 """
""" AND ( o.proposal_id LIKE '%P05') """)
# """ AND ( o.proposal_id LIKE '%P05' OR o.proposal_id LIKE '%L03' or o.proposal_id LIKE '%L06' or o.proposal_id
# in ( '06AF33', '06BF98' ) ) """ )
QUERY = QUERY.format( RAdeg, DECdeg, width, height)
data={"QUERY": QUERY,
"REQUEST": "doQuery",
"LANG": "ADQL",
"FORMAT": "votable"}
url="http://www.cadc.hia.nrc.gc.ca/tap/sync"
print url, data
return urllib.urlopen(url,urllib.urlencode(data)) | Do a query of the CADC Megacam table. Get all observations insize the box. Returns a file-like object | Below is the the instruction that describes the task:
### Input:
Do a query of the CADC Megacam table. Get all observations insize the box. Returns a file-like object
### Response:
def TAPQuery(RAdeg=180.0, DECdeg=0.0, width=1, height=1):
"""Do a query of the CADC Megacam table. Get all observations insize the box. Returns a file-like object"""
QUERY =( """ SELECT """
""" COORD1(CENTROID(Plane.position_bounds)) AS "RAJ2000", COORD2(CENTROID(Plane.position_bounds)) AS "DEJ2000", Plane.time_bounds_lower as "MJDATE" """
""" FROM """
""" caom2.Observation as o JOIN caom2.Plane as Plane on o.obsID=Plane.obsID """
""" WHERE """
""" o.collection = 'CFHT' """
""" AND o.instrument_name = 'MegaPrime' """
""" AND INTERSECTS( BOX('ICRS', {}, {}, {}, {}), Plane.position_bounds ) = 1 """
""" AND ( o.proposal_id LIKE '%P05') """)
# """ AND ( o.proposal_id LIKE '%P05' OR o.proposal_id LIKE '%L03' or o.proposal_id LIKE '%L06' or o.proposal_id
# in ( '06AF33', '06BF98' ) ) """ )
QUERY = QUERY.format( RAdeg, DECdeg, width, height)
data={"QUERY": QUERY,
"REQUEST": "doQuery",
"LANG": "ADQL",
"FORMAT": "votable"}
url="http://www.cadc.hia.nrc.gc.ca/tap/sync"
print url, data
return urllib.urlopen(url,urllib.urlencode(data)) |
def mutate_node(node, context):
"""
:type context: Context
"""
context.stack.append(node)
try:
if node.type in ('tfpdef', 'import_from', 'import_name'):
return
if node.start_pos[0] - 1 != context.current_line_index:
context.current_line_index = node.start_pos[0] - 1
context.index = 0 # indexes are unique per line, so start over here!
if hasattr(node, 'children'):
mutate_list_of_nodes(node, context=context)
# this is just an optimization to stop early
if context.number_of_performed_mutations and context.mutation_id != ALL:
return
mutation = mutations_by_type.get(node.type)
if mutation is None:
return
for key, value in sorted(mutation.items()):
old = getattr(node, key)
if context.exclude_line():
continue
new = evaluate(
value,
context=context,
node=node,
value=getattr(node, 'value', None),
children=getattr(node, 'children', None),
)
assert not callable(new)
if new is not None and new != old:
if context.should_mutate():
context.number_of_performed_mutations += 1
context.performed_mutation_ids.append(context.mutation_id_of_current_index)
setattr(node, key, new)
context.index += 1
# this is just an optimization to stop early
if context.number_of_performed_mutations and context.mutation_id != ALL:
return
finally:
context.stack.pop() | :type context: Context | Below is the the instruction that describes the task:
### Input:
:type context: Context
### Response:
def mutate_node(node, context):
"""
:type context: Context
"""
context.stack.append(node)
try:
if node.type in ('tfpdef', 'import_from', 'import_name'):
return
if node.start_pos[0] - 1 != context.current_line_index:
context.current_line_index = node.start_pos[0] - 1
context.index = 0 # indexes are unique per line, so start over here!
if hasattr(node, 'children'):
mutate_list_of_nodes(node, context=context)
# this is just an optimization to stop early
if context.number_of_performed_mutations and context.mutation_id != ALL:
return
mutation = mutations_by_type.get(node.type)
if mutation is None:
return
for key, value in sorted(mutation.items()):
old = getattr(node, key)
if context.exclude_line():
continue
new = evaluate(
value,
context=context,
node=node,
value=getattr(node, 'value', None),
children=getattr(node, 'children', None),
)
assert not callable(new)
if new is not None and new != old:
if context.should_mutate():
context.number_of_performed_mutations += 1
context.performed_mutation_ids.append(context.mutation_id_of_current_index)
setattr(node, key, new)
context.index += 1
# this is just an optimization to stop early
if context.number_of_performed_mutations and context.mutation_id != ALL:
return
finally:
context.stack.pop() |
def analisar_retorno(retorno,
classe_resposta=RespostaSAT, campos=RespostaSAT.CAMPOS,
campos_alternativos=[], funcao=None, manter_verbatim=True):
"""Analisa o retorno (supostamente um retorno de uma função do SAT) conforme
o padrão e campos esperados. O retorno deverá possuir dados separados entre
si através de pipes e o número de campos deverá coincidir com os campos
especificados.
O campos devem ser especificados como uma tupla onde cada elemento da tupla
deverá ser uma tupla contendo dois elementos: o nome do campo e uma função
de conversão a partir de uma string unicode. Por exemplo:
.. sourcecode:: python
>>> retorno = '123456|08000|SAT em operacao||'
>>> resposta = analisar_retorno(retorno, funcao='ConsultarSAT')
>>> resposta.numeroSessao
123456
>>> resposta.EEEEE
u'08000'
>>> resposta.mensagem
u'SAT em operacao'
>>> resposta.cod
u''
>>> resposta.mensagemSEFAZ
u''
>>> resposta.atributos.funcao
'ConsultarSAT'
>>> resposta.atributos.verbatim
'123456|08000|SAT em operacao||'
:param unicode retorno: O conteúdo **unicode** da resposta retornada pela
função da DLL SAT.
:param type classe_resposta: O tipo :class:`RespostaSAT` ou especialização
que irá representar o retorno, após sua decomposição em campos.
:param tuple campos: Especificação dos campos (nomes) e seus conversores a
a partir do tipo ``unicode``.
:param list campos_alternativos: Especifica conjuntos de campos alternativos
que serão considerados caso o número de campos encontrados na resposta
não coincida com o número de campos especificados em ``campos``.
Para que a relação alternativa de campos funcione, é importante que
cada relação de campos alternativos tenha um número diferente de campos.
:param str funcao: Nome da função da DLL SAT que gerou o retorno, que
estará disponível nos atributos adicionais à resposta.
:param bool manter_verbatim: Se uma cópia verbatim da resposta deverá ser
mantida nos atributos adicionais à resposta.
:raises ErroRespostaSATInvalida: Se o retorno não estiver em conformidade
com o padrão esperado ou se não possuir os campos especificados.
:return: Uma instância de :class:`RespostaSAT` ou especialização.
:rtype: satcfe.resposta.padrao.RespostaSAT
"""
if '|' not in retorno:
raise ErroRespostaSATInvalida('Resposta nao possui pipes separando os '
'campos: "%s"' % as_ascii(retorno))
partes = retorno.split('|')
if len(partes) != len(campos):
# procura por uma relação alternativa de campos do retorno
for relacao_alternativa in campos_alternativos:
if len(partes) == len(relacao_alternativa):
relacao_campos = relacao_alternativa
break
else:
raise ErroRespostaSATInvalida('Resposta nao possui o numero '
'esperado de campos. Esperados %d campos, mas '
'contem %d: "%s"' % (
len(campos), len(partes), as_ascii(retorno),))
else:
relacao_campos = campos
resultado = {}
def _enumerate(sequence):
for index, value in enumerate(sequence):
yield index, value[0], value[1]
for indice, campo, conversor in _enumerate(relacao_campos):
resultado[campo] = conversor(partes[indice])
resposta = classe_resposta(**resultado)
resposta.atributos.funcao = funcao
resposta.atributos.verbatim = retorno if manter_verbatim else None
return resposta | Analisa o retorno (supostamente um retorno de uma função do SAT) conforme
o padrão e campos esperados. O retorno deverá possuir dados separados entre
si através de pipes e o número de campos deverá coincidir com os campos
especificados.
O campos devem ser especificados como uma tupla onde cada elemento da tupla
deverá ser uma tupla contendo dois elementos: o nome do campo e uma função
de conversão a partir de uma string unicode. Por exemplo:
.. sourcecode:: python
>>> retorno = '123456|08000|SAT em operacao||'
>>> resposta = analisar_retorno(retorno, funcao='ConsultarSAT')
>>> resposta.numeroSessao
123456
>>> resposta.EEEEE
u'08000'
>>> resposta.mensagem
u'SAT em operacao'
>>> resposta.cod
u''
>>> resposta.mensagemSEFAZ
u''
>>> resposta.atributos.funcao
'ConsultarSAT'
>>> resposta.atributos.verbatim
'123456|08000|SAT em operacao||'
:param unicode retorno: O conteúdo **unicode** da resposta retornada pela
função da DLL SAT.
:param type classe_resposta: O tipo :class:`RespostaSAT` ou especialização
que irá representar o retorno, após sua decomposição em campos.
:param tuple campos: Especificação dos campos (nomes) e seus conversores a
a partir do tipo ``unicode``.
:param list campos_alternativos: Especifica conjuntos de campos alternativos
que serão considerados caso o número de campos encontrados na resposta
não coincida com o número de campos especificados em ``campos``.
Para que a relação alternativa de campos funcione, é importante que
cada relação de campos alternativos tenha um número diferente de campos.
:param str funcao: Nome da função da DLL SAT que gerou o retorno, que
estará disponível nos atributos adicionais à resposta.
:param bool manter_verbatim: Se uma cópia verbatim da resposta deverá ser
mantida nos atributos adicionais à resposta.
:raises ErroRespostaSATInvalida: Se o retorno não estiver em conformidade
com o padrão esperado ou se não possuir os campos especificados.
:return: Uma instância de :class:`RespostaSAT` ou especialização.
:rtype: satcfe.resposta.padrao.RespostaSAT | Below is the the instruction that describes the task:
### Input:
Analisa o retorno (supostamente um retorno de uma função do SAT) conforme
o padrão e campos esperados. O retorno deverá possuir dados separados entre
si através de pipes e o número de campos deverá coincidir com os campos
especificados.
O campos devem ser especificados como uma tupla onde cada elemento da tupla
deverá ser uma tupla contendo dois elementos: o nome do campo e uma função
de conversão a partir de uma string unicode. Por exemplo:
.. sourcecode:: python
>>> retorno = '123456|08000|SAT em operacao||'
>>> resposta = analisar_retorno(retorno, funcao='ConsultarSAT')
>>> resposta.numeroSessao
123456
>>> resposta.EEEEE
u'08000'
>>> resposta.mensagem
u'SAT em operacao'
>>> resposta.cod
u''
>>> resposta.mensagemSEFAZ
u''
>>> resposta.atributos.funcao
'ConsultarSAT'
>>> resposta.atributos.verbatim
'123456|08000|SAT em operacao||'
:param unicode retorno: O conteúdo **unicode** da resposta retornada pela
função da DLL SAT.
:param type classe_resposta: O tipo :class:`RespostaSAT` ou especialização
que irá representar o retorno, após sua decomposição em campos.
:param tuple campos: Especificação dos campos (nomes) e seus conversores a
a partir do tipo ``unicode``.
:param list campos_alternativos: Especifica conjuntos de campos alternativos
que serão considerados caso o número de campos encontrados na resposta
não coincida com o número de campos especificados em ``campos``.
Para que a relação alternativa de campos funcione, é importante que
cada relação de campos alternativos tenha um número diferente de campos.
:param str funcao: Nome da função da DLL SAT que gerou o retorno, que
estará disponível nos atributos adicionais à resposta.
:param bool manter_verbatim: Se uma cópia verbatim da resposta deverá ser
mantida nos atributos adicionais à resposta.
:raises ErroRespostaSATInvalida: Se o retorno não estiver em conformidade
com o padrão esperado ou se não possuir os campos especificados.
:return: Uma instância de :class:`RespostaSAT` ou especialização.
:rtype: satcfe.resposta.padrao.RespostaSAT
### Response:
def analisar_retorno(retorno,
classe_resposta=RespostaSAT, campos=RespostaSAT.CAMPOS,
campos_alternativos=[], funcao=None, manter_verbatim=True):
"""Analisa o retorno (supostamente um retorno de uma função do SAT) conforme
o padrão e campos esperados. O retorno deverá possuir dados separados entre
si através de pipes e o número de campos deverá coincidir com os campos
especificados.
O campos devem ser especificados como uma tupla onde cada elemento da tupla
deverá ser uma tupla contendo dois elementos: o nome do campo e uma função
de conversão a partir de uma string unicode. Por exemplo:
.. sourcecode:: python
>>> retorno = '123456|08000|SAT em operacao||'
>>> resposta = analisar_retorno(retorno, funcao='ConsultarSAT')
>>> resposta.numeroSessao
123456
>>> resposta.EEEEE
u'08000'
>>> resposta.mensagem
u'SAT em operacao'
>>> resposta.cod
u''
>>> resposta.mensagemSEFAZ
u''
>>> resposta.atributos.funcao
'ConsultarSAT'
>>> resposta.atributos.verbatim
'123456|08000|SAT em operacao||'
:param unicode retorno: O conteúdo **unicode** da resposta retornada pela
função da DLL SAT.
:param type classe_resposta: O tipo :class:`RespostaSAT` ou especialização
que irá representar o retorno, após sua decomposição em campos.
:param tuple campos: Especificação dos campos (nomes) e seus conversores a
a partir do tipo ``unicode``.
:param list campos_alternativos: Especifica conjuntos de campos alternativos
que serão considerados caso o número de campos encontrados na resposta
não coincida com o número de campos especificados em ``campos``.
Para que a relação alternativa de campos funcione, é importante que
cada relação de campos alternativos tenha um número diferente de campos.
:param str funcao: Nome da função da DLL SAT que gerou o retorno, que
estará disponível nos atributos adicionais à resposta.
:param bool manter_verbatim: Se uma cópia verbatim da resposta deverá ser
mantida nos atributos adicionais à resposta.
:raises ErroRespostaSATInvalida: Se o retorno não estiver em conformidade
com o padrão esperado ou se não possuir os campos especificados.
:return: Uma instância de :class:`RespostaSAT` ou especialização.
:rtype: satcfe.resposta.padrao.RespostaSAT
"""
if '|' not in retorno:
raise ErroRespostaSATInvalida('Resposta nao possui pipes separando os '
'campos: "%s"' % as_ascii(retorno))
partes = retorno.split('|')
if len(partes) != len(campos):
# procura por uma relação alternativa de campos do retorno
for relacao_alternativa in campos_alternativos:
if len(partes) == len(relacao_alternativa):
relacao_campos = relacao_alternativa
break
else:
raise ErroRespostaSATInvalida('Resposta nao possui o numero '
'esperado de campos. Esperados %d campos, mas '
'contem %d: "%s"' % (
len(campos), len(partes), as_ascii(retorno),))
else:
relacao_campos = campos
resultado = {}
def _enumerate(sequence):
for index, value in enumerate(sequence):
yield index, value[0], value[1]
for indice, campo, conversor in _enumerate(relacao_campos):
resultado[campo] = conversor(partes[indice])
resposta = classe_resposta(**resultado)
resposta.atributos.funcao = funcao
resposta.atributos.verbatim = retorno if manter_verbatim else None
return resposta |
def wait_until_element_clickable(self, element, timeout=None):
"""Search element and wait until it is clickable
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param timeout: max time to wait
:returns: the web element if it is clickable
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element is not clickable after the timeout
"""
return self._wait_until(self._expected_condition_find_element_clickable, element, timeout) | Search element and wait until it is clickable
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param timeout: max time to wait
:returns: the web element if it is clickable
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element is not clickable after the timeout | Below is the the instruction that describes the task:
### Input:
Search element and wait until it is clickable
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param timeout: max time to wait
:returns: the web element if it is clickable
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element is not clickable after the timeout
### Response:
def wait_until_element_clickable(self, element, timeout=None):
"""Search element and wait until it is clickable
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param timeout: max time to wait
:returns: the web element if it is clickable
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element is not clickable after the timeout
"""
return self._wait_until(self._expected_condition_find_element_clickable, element, timeout) |
def reftrack_type_data(rt, role):
"""Return the data for the type (e.g. Asset, Alembic, Camera etc)
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the type
:rtype: depending on role
:raises: None
"""
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return rt.get_typ()
elif role == QtCore.Qt.DecorationRole:
return rt.get_typ_icon() | Return the data for the type (e.g. Asset, Alembic, Camera etc)
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the type
:rtype: depending on role
:raises: None | Below is the the instruction that describes the task:
### Input:
Return the data for the type (e.g. Asset, Alembic, Camera etc)
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the type
:rtype: depending on role
:raises: None
### Response:
def reftrack_type_data(rt, role):
"""Return the data for the type (e.g. Asset, Alembic, Camera etc)
:param rt: the :class:`jukeboxcore.reftrack.Reftrack` holds the data
:type rt: :class:`jukeboxcore.reftrack.Reftrack`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the type
:rtype: depending on role
:raises: None
"""
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return rt.get_typ()
elif role == QtCore.Qt.DecorationRole:
return rt.get_typ_icon() |
def add_priority(self, name, **attrs):
"""
Add a Priority to the project and returns a :class:`Priority` object.
:param name: name of the :class:`Priority`
:param attrs: optional attributes for :class:`Priority`
"""
return Priorities(self.requester).create(self.id, name, **attrs) | Add a Priority to the project and returns a :class:`Priority` object.
:param name: name of the :class:`Priority`
:param attrs: optional attributes for :class:`Priority` | Below is the the instruction that describes the task:
### Input:
Add a Priority to the project and returns a :class:`Priority` object.
:param name: name of the :class:`Priority`
:param attrs: optional attributes for :class:`Priority`
### Response:
def add_priority(self, name, **attrs):
"""
Add a Priority to the project and returns a :class:`Priority` object.
:param name: name of the :class:`Priority`
:param attrs: optional attributes for :class:`Priority`
"""
return Priorities(self.requester).create(self.id, name, **attrs) |
def _init_field(self, setting, field_class, name, code=None):
"""
Initialize a field whether it is built with a custom name for a
specific translation language or not.
"""
kwargs = {
"label": setting["label"] + ":",
"required": setting["type"] in (int, float),
"initial": getattr(settings, name),
"help_text": self.format_help(setting["description"]),
}
if setting["choices"]:
field_class = forms.ChoiceField
kwargs["choices"] = setting["choices"]
field_instance = field_class(**kwargs)
code_name = ('_modeltranslation_' + code if code else '')
self.fields[name + code_name] = field_instance
css_class = field_class.__name__.lower()
field_instance.widget.attrs["class"] = css_class
if code:
field_instance.widget.attrs["class"] += " modeltranslation" | Initialize a field whether it is built with a custom name for a
specific translation language or not. | Below is the the instruction that describes the task:
### Input:
Initialize a field whether it is built with a custom name for a
specific translation language or not.
### Response:
def _init_field(self, setting, field_class, name, code=None):
"""
Initialize a field whether it is built with a custom name for a
specific translation language or not.
"""
kwargs = {
"label": setting["label"] + ":",
"required": setting["type"] in (int, float),
"initial": getattr(settings, name),
"help_text": self.format_help(setting["description"]),
}
if setting["choices"]:
field_class = forms.ChoiceField
kwargs["choices"] = setting["choices"]
field_instance = field_class(**kwargs)
code_name = ('_modeltranslation_' + code if code else '')
self.fields[name + code_name] = field_instance
css_class = field_class.__name__.lower()
field_instance.widget.attrs["class"] = css_class
if code:
field_instance.widget.attrs["class"] += " modeltranslation" |
def production_envelope(model, reactions, objective=None, carbon_sources=None,
points=20, threshold=None):
"""Calculate the objective value conditioned on all combinations of
fluxes for a set of chosen reactions
The production envelope can be used to analyze a model's ability to
produce a given compound conditional on the fluxes for another set of
reactions, such as the uptake rates. The model is alternately optimized
with respect to minimizing and maximizing the objective and the
obtained fluxes are recorded. Ranges to compute production is set to the
effective
bounds, i.e., the minimum / maximum fluxes that can be obtained given
current reaction bounds.
Parameters
----------
model : cobra.Model
The model to compute the production envelope for.
reactions : list or string
A list of reactions, reaction identifiers or a single reaction.
objective : string, dict, model.solver.interface.Objective, optional
The objective (reaction) to use for the production envelope. Use the
model's current objective if left missing.
carbon_sources : list or string, optional
One or more reactions or reaction identifiers that are the source of
carbon for computing carbon (mol carbon in output over mol carbon in
input) and mass yield (gram product over gram output). Only objectives
with a carbon containing input and output metabolite is supported.
Will identify active carbon sources in the medium if none are specified.
points : int, optional
The number of points to calculate production for.
threshold : float, optional
A cut-off under which flux values will be considered to be zero
(default model.tolerance).
Returns
-------
pandas.DataFrame
A data frame with one row per evaluated point and
- reaction id : one column per input reaction indicating the flux at
each given point,
- carbon_source: identifiers of carbon exchange reactions
A column for the maximum and minimum each for the following types:
- flux: the objective flux
- carbon_yield: if carbon source is defined and the product is a
single metabolite (mol carbon product per mol carbon feeding source)
- mass_yield: if carbon source is defined and the product is a
single metabolite (gram product per 1 g of feeding source)
Examples
--------
>>> import cobra.test
>>> from cobra.flux_analysis import production_envelope
>>> model = cobra.test.create_test_model("textbook")
>>> production_envelope(model, ["EX_glc__D_e", "EX_o2_e"])
"""
reactions = model.reactions.get_by_any(reactions)
objective = model.solver.objective if objective is None else objective
data = dict()
if carbon_sources is None:
c_input = find_carbon_sources(model)
else:
c_input = model.reactions.get_by_any(carbon_sources)
if c_input is None:
data['carbon_source'] = None
elif hasattr(c_input, 'id'):
data['carbon_source'] = c_input.id
else:
data['carbon_source'] = ', '.join(rxn.id for rxn in c_input)
threshold = normalize_cutoff(model, threshold)
size = points ** len(reactions)
for direction in ('minimum', 'maximum'):
data['flux_{}'.format(direction)] = full(size, nan, dtype=float)
data['carbon_yield_{}'.format(direction)] = full(
size, nan, dtype=float)
data['mass_yield_{}'.format(direction)] = full(
size, nan, dtype=float)
grid = pd.DataFrame(data)
with model:
model.objective = objective
objective_reactions = list(sutil.linear_reaction_coefficients(model))
if len(objective_reactions) != 1:
raise ValueError('cannot calculate yields for objectives with '
'multiple reactions')
c_output = objective_reactions[0]
min_max = fva(model, reactions, fraction_of_optimum=0)
min_max[min_max.abs() < threshold] = 0.0
points = list(product(*[
linspace(min_max.at[rxn.id, "minimum"],
min_max.at[rxn.id, "maximum"],
points, endpoint=True) for rxn in reactions]))
tmp = pd.DataFrame(points, columns=[rxn.id for rxn in reactions])
grid = pd.concat([grid, tmp], axis=1, copy=False)
add_envelope(model, reactions, grid, c_input, c_output, threshold)
return grid | Calculate the objective value conditioned on all combinations of
fluxes for a set of chosen reactions
The production envelope can be used to analyze a model's ability to
produce a given compound conditional on the fluxes for another set of
reactions, such as the uptake rates. The model is alternately optimized
with respect to minimizing and maximizing the objective and the
obtained fluxes are recorded. Ranges to compute production is set to the
effective
bounds, i.e., the minimum / maximum fluxes that can be obtained given
current reaction bounds.
Parameters
----------
model : cobra.Model
The model to compute the production envelope for.
reactions : list or string
A list of reactions, reaction identifiers or a single reaction.
objective : string, dict, model.solver.interface.Objective, optional
The objective (reaction) to use for the production envelope. Use the
model's current objective if left missing.
carbon_sources : list or string, optional
One or more reactions or reaction identifiers that are the source of
carbon for computing carbon (mol carbon in output over mol carbon in
input) and mass yield (gram product over gram output). Only objectives
with a carbon containing input and output metabolite is supported.
Will identify active carbon sources in the medium if none are specified.
points : int, optional
The number of points to calculate production for.
threshold : float, optional
A cut-off under which flux values will be considered to be zero
(default model.tolerance).
Returns
-------
pandas.DataFrame
A data frame with one row per evaluated point and
- reaction id : one column per input reaction indicating the flux at
each given point,
- carbon_source: identifiers of carbon exchange reactions
A column for the maximum and minimum each for the following types:
- flux: the objective flux
- carbon_yield: if carbon source is defined and the product is a
single metabolite (mol carbon product per mol carbon feeding source)
- mass_yield: if carbon source is defined and the product is a
single metabolite (gram product per 1 g of feeding source)
Examples
--------
>>> import cobra.test
>>> from cobra.flux_analysis import production_envelope
>>> model = cobra.test.create_test_model("textbook")
>>> production_envelope(model, ["EX_glc__D_e", "EX_o2_e"]) | Below is the the instruction that describes the task:
### Input:
Calculate the objective value conditioned on all combinations of
fluxes for a set of chosen reactions
The production envelope can be used to analyze a model's ability to
produce a given compound conditional on the fluxes for another set of
reactions, such as the uptake rates. The model is alternately optimized
with respect to minimizing and maximizing the objective and the
obtained fluxes are recorded. Ranges to compute production is set to the
effective
bounds, i.e., the minimum / maximum fluxes that can be obtained given
current reaction bounds.
Parameters
----------
model : cobra.Model
The model to compute the production envelope for.
reactions : list or string
A list of reactions, reaction identifiers or a single reaction.
objective : string, dict, model.solver.interface.Objective, optional
The objective (reaction) to use for the production envelope. Use the
model's current objective if left missing.
carbon_sources : list or string, optional
One or more reactions or reaction identifiers that are the source of
carbon for computing carbon (mol carbon in output over mol carbon in
input) and mass yield (gram product over gram output). Only objectives
with a carbon containing input and output metabolite is supported.
Will identify active carbon sources in the medium if none are specified.
points : int, optional
The number of points to calculate production for.
threshold : float, optional
A cut-off under which flux values will be considered to be zero
(default model.tolerance).
Returns
-------
pandas.DataFrame
A data frame with one row per evaluated point and
- reaction id : one column per input reaction indicating the flux at
each given point,
- carbon_source: identifiers of carbon exchange reactions
A column for the maximum and minimum each for the following types:
- flux: the objective flux
- carbon_yield: if carbon source is defined and the product is a
single metabolite (mol carbon product per mol carbon feeding source)
- mass_yield: if carbon source is defined and the product is a
single metabolite (gram product per 1 g of feeding source)
Examples
--------
>>> import cobra.test
>>> from cobra.flux_analysis import production_envelope
>>> model = cobra.test.create_test_model("textbook")
>>> production_envelope(model, ["EX_glc__D_e", "EX_o2_e"])
### Response:
def production_envelope(model, reactions, objective=None, carbon_sources=None,
points=20, threshold=None):
"""Calculate the objective value conditioned on all combinations of
fluxes for a set of chosen reactions
The production envelope can be used to analyze a model's ability to
produce a given compound conditional on the fluxes for another set of
reactions, such as the uptake rates. The model is alternately optimized
with respect to minimizing and maximizing the objective and the
obtained fluxes are recorded. Ranges to compute production is set to the
effective
bounds, i.e., the minimum / maximum fluxes that can be obtained given
current reaction bounds.
Parameters
----------
model : cobra.Model
The model to compute the production envelope for.
reactions : list or string
A list of reactions, reaction identifiers or a single reaction.
objective : string, dict, model.solver.interface.Objective, optional
The objective (reaction) to use for the production envelope. Use the
model's current objective if left missing.
carbon_sources : list or string, optional
One or more reactions or reaction identifiers that are the source of
carbon for computing carbon (mol carbon in output over mol carbon in
input) and mass yield (gram product over gram output). Only objectives
with a carbon containing input and output metabolite is supported.
Will identify active carbon sources in the medium if none are specified.
points : int, optional
The number of points to calculate production for.
threshold : float, optional
A cut-off under which flux values will be considered to be zero
(default model.tolerance).
Returns
-------
pandas.DataFrame
A data frame with one row per evaluated point and
- reaction id : one column per input reaction indicating the flux at
each given point,
- carbon_source: identifiers of carbon exchange reactions
A column for the maximum and minimum each for the following types:
- flux: the objective flux
- carbon_yield: if carbon source is defined and the product is a
single metabolite (mol carbon product per mol carbon feeding source)
- mass_yield: if carbon source is defined and the product is a
single metabolite (gram product per 1 g of feeding source)
Examples
--------
>>> import cobra.test
>>> from cobra.flux_analysis import production_envelope
>>> model = cobra.test.create_test_model("textbook")
>>> production_envelope(model, ["EX_glc__D_e", "EX_o2_e"])
"""
reactions = model.reactions.get_by_any(reactions)
objective = model.solver.objective if objective is None else objective
data = dict()
if carbon_sources is None:
c_input = find_carbon_sources(model)
else:
c_input = model.reactions.get_by_any(carbon_sources)
if c_input is None:
data['carbon_source'] = None
elif hasattr(c_input, 'id'):
data['carbon_source'] = c_input.id
else:
data['carbon_source'] = ', '.join(rxn.id for rxn in c_input)
threshold = normalize_cutoff(model, threshold)
size = points ** len(reactions)
for direction in ('minimum', 'maximum'):
data['flux_{}'.format(direction)] = full(size, nan, dtype=float)
data['carbon_yield_{}'.format(direction)] = full(
size, nan, dtype=float)
data['mass_yield_{}'.format(direction)] = full(
size, nan, dtype=float)
grid = pd.DataFrame(data)
with model:
model.objective = objective
objective_reactions = list(sutil.linear_reaction_coefficients(model))
if len(objective_reactions) != 1:
raise ValueError('cannot calculate yields for objectives with '
'multiple reactions')
c_output = objective_reactions[0]
min_max = fva(model, reactions, fraction_of_optimum=0)
min_max[min_max.abs() < threshold] = 0.0
points = list(product(*[
linspace(min_max.at[rxn.id, "minimum"],
min_max.at[rxn.id, "maximum"],
points, endpoint=True) for rxn in reactions]))
tmp = pd.DataFrame(points, columns=[rxn.id for rxn in reactions])
grid = pd.concat([grid, tmp], axis=1, copy=False)
add_envelope(model, reactions, grid, c_input, c_output, threshold)
return grid |
def cat(expr, others=None, sep=None, na_rep=None):
"""
Concatenate strings in sequence with given separator
:param expr:
:param others: other sequences
:param sep: string or None, default None
:param na_rep: string or None default None, if None, NA in the sequence are ignored
:return:
"""
if others is not None:
from .strings import _cat as cat_str
return cat_str(expr, others, sep=sep, na_rep=na_rep)
return _cat(expr, sep=sep, na_rep=na_rep) | Concatenate strings in sequence with given separator
:param expr:
:param others: other sequences
:param sep: string or None, default None
:param na_rep: string or None default None, if None, NA in the sequence are ignored
:return: | Below is the the instruction that describes the task:
### Input:
Concatenate strings in sequence with given separator
:param expr:
:param others: other sequences
:param sep: string or None, default None
:param na_rep: string or None default None, if None, NA in the sequence are ignored
:return:
### Response:
def cat(expr, others=None, sep=None, na_rep=None):
"""
Concatenate strings in sequence with given separator
:param expr:
:param others: other sequences
:param sep: string or None, default None
:param na_rep: string or None default None, if None, NA in the sequence are ignored
:return:
"""
if others is not None:
from .strings import _cat as cat_str
return cat_str(expr, others, sep=sep, na_rep=na_rep)
return _cat(expr, sep=sep, na_rep=na_rep) |
def get_stabilized_reciprocal_mesh(mesh,
rotations,
is_shift=None,
is_time_reversal=True,
qpoints=None,
is_dense=False):
"""Return k-point map to the irreducible k-points and k-point grid points.
The symmetry is searched from the input rotation matrices in real space.
Parameters
----------
mesh : array_like
Uniform sampling mesh numbers.
dtype='intc', shape=(3,)
rotations : array_like
Rotation matrices with respect to real space basis vectors.
dtype='intc', shape=(rotations, 3)
is_shift : array_like
[0, 0, 0] gives Gamma center mesh and value 1 gives half mesh shift.
dtype='intc', shape=(3,)
is_time_reversal : bool
Time reversal symmetry is included or not.
qpoints : array_like
q-points used as stabilizer(s) given in reciprocal space with respect
to reciprocal basis vectors.
dtype='double', shape=(qpoints ,3) or (3,)
is_dense : bool, optional
grid_mapping_table is returned with dtype='uintp' if True. Otherwise
its dtype='intc'. Default is False.
Returns
-------
grid_mapping_table : ndarray
Grid point mapping table to ir-gird-points.
dtype='intc', shape=(prod(mesh),)
grid_address : ndarray
Address of all grid points. Each address is given by three unsigned
integers.
dtype='intc', shape=(prod(mesh), 3)
"""
_set_no_error()
if is_dense:
dtype = 'uintp'
else:
dtype = 'intc'
mapping_table = np.zeros(np.prod(mesh), dtype=dtype)
grid_address = np.zeros((np.prod(mesh), 3), dtype='intc')
if is_shift is None:
is_shift = [0, 0, 0]
if qpoints is None:
qpoints = np.array([[0, 0, 0]], dtype='double', order='C')
else:
qpoints = np.array(qpoints, dtype='double', order='C')
if qpoints.shape == (3,):
qpoints = np.array([qpoints], dtype='double', order='C')
if spg.stabilized_reciprocal_mesh(
grid_address,
mapping_table,
np.array(mesh, dtype='intc'),
np.array(is_shift, dtype='intc'),
is_time_reversal * 1,
np.array(rotations, dtype='intc', order='C'),
qpoints) > 0:
return mapping_table, grid_address
else:
return None | Return k-point map to the irreducible k-points and k-point grid points.
The symmetry is searched from the input rotation matrices in real space.
Parameters
----------
mesh : array_like
Uniform sampling mesh numbers.
dtype='intc', shape=(3,)
rotations : array_like
Rotation matrices with respect to real space basis vectors.
dtype='intc', shape=(rotations, 3)
is_shift : array_like
[0, 0, 0] gives Gamma center mesh and value 1 gives half mesh shift.
dtype='intc', shape=(3,)
is_time_reversal : bool
Time reversal symmetry is included or not.
qpoints : array_like
q-points used as stabilizer(s) given in reciprocal space with respect
to reciprocal basis vectors.
dtype='double', shape=(qpoints ,3) or (3,)
is_dense : bool, optional
grid_mapping_table is returned with dtype='uintp' if True. Otherwise
its dtype='intc'. Default is False.
Returns
-------
grid_mapping_table : ndarray
Grid point mapping table to ir-gird-points.
dtype='intc', shape=(prod(mesh),)
grid_address : ndarray
Address of all grid points. Each address is given by three unsigned
integers.
dtype='intc', shape=(prod(mesh), 3) | Below is the the instruction that describes the task:
### Input:
Return k-point map to the irreducible k-points and k-point grid points.
The symmetry is searched from the input rotation matrices in real space.
Parameters
----------
mesh : array_like
Uniform sampling mesh numbers.
dtype='intc', shape=(3,)
rotations : array_like
Rotation matrices with respect to real space basis vectors.
dtype='intc', shape=(rotations, 3)
is_shift : array_like
[0, 0, 0] gives Gamma center mesh and value 1 gives half mesh shift.
dtype='intc', shape=(3,)
is_time_reversal : bool
Time reversal symmetry is included or not.
qpoints : array_like
q-points used as stabilizer(s) given in reciprocal space with respect
to reciprocal basis vectors.
dtype='double', shape=(qpoints ,3) or (3,)
is_dense : bool, optional
grid_mapping_table is returned with dtype='uintp' if True. Otherwise
its dtype='intc'. Default is False.
Returns
-------
grid_mapping_table : ndarray
Grid point mapping table to ir-gird-points.
dtype='intc', shape=(prod(mesh),)
grid_address : ndarray
Address of all grid points. Each address is given by three unsigned
integers.
dtype='intc', shape=(prod(mesh), 3)
### Response:
def get_stabilized_reciprocal_mesh(mesh,
rotations,
is_shift=None,
is_time_reversal=True,
qpoints=None,
is_dense=False):
"""Return k-point map to the irreducible k-points and k-point grid points.
The symmetry is searched from the input rotation matrices in real space.
Parameters
----------
mesh : array_like
Uniform sampling mesh numbers.
dtype='intc', shape=(3,)
rotations : array_like
Rotation matrices with respect to real space basis vectors.
dtype='intc', shape=(rotations, 3)
is_shift : array_like
[0, 0, 0] gives Gamma center mesh and value 1 gives half mesh shift.
dtype='intc', shape=(3,)
is_time_reversal : bool
Time reversal symmetry is included or not.
qpoints : array_like
q-points used as stabilizer(s) given in reciprocal space with respect
to reciprocal basis vectors.
dtype='double', shape=(qpoints ,3) or (3,)
is_dense : bool, optional
grid_mapping_table is returned with dtype='uintp' if True. Otherwise
its dtype='intc'. Default is False.
Returns
-------
grid_mapping_table : ndarray
Grid point mapping table to ir-gird-points.
dtype='intc', shape=(prod(mesh),)
grid_address : ndarray
Address of all grid points. Each address is given by three unsigned
integers.
dtype='intc', shape=(prod(mesh), 3)
"""
_set_no_error()
if is_dense:
dtype = 'uintp'
else:
dtype = 'intc'
mapping_table = np.zeros(np.prod(mesh), dtype=dtype)
grid_address = np.zeros((np.prod(mesh), 3), dtype='intc')
if is_shift is None:
is_shift = [0, 0, 0]
if qpoints is None:
qpoints = np.array([[0, 0, 0]], dtype='double', order='C')
else:
qpoints = np.array(qpoints, dtype='double', order='C')
if qpoints.shape == (3,):
qpoints = np.array([qpoints], dtype='double', order='C')
if spg.stabilized_reciprocal_mesh(
grid_address,
mapping_table,
np.array(mesh, dtype='intc'),
np.array(is_shift, dtype='intc'),
is_time_reversal * 1,
np.array(rotations, dtype='intc', order='C'),
qpoints) > 0:
return mapping_table, grid_address
else:
return None |
def create_scene():
"""
Create a scene with a Fuze bottle, some cubes, and an axis.
Returns
----------
scene : trimesh.Scene
Object with geometry
"""
scene = trimesh.Scene()
# plane
geom = trimesh.creation.box((0.5, 0.5, 0.01))
geom.apply_translation((0, 0, -0.005))
geom.visual.face_colors = (.6, .6, .6)
scene.add_geometry(geom)
# axis
geom = trimesh.creation.axis(0.02)
scene.add_geometry(geom)
box_size = 0.1
# box1
geom = trimesh.creation.box((box_size,) * 3)
geom.visual.face_colors = np.random.uniform(
0, 1, (len(geom.faces), 3))
transform = tf.translation_matrix([0.1, 0.1, box_size / 2])
scene.add_geometry(geom, transform=transform)
# box2
geom = trimesh.creation.box((box_size,) * 3)
geom.visual.face_colors = np.random.uniform(
0, 1, (len(geom.faces), 3))
transform = tf.translation_matrix([-0.1, 0.1, box_size / 2])
scene.add_geometry(geom, transform=transform)
# fuze
geom = trimesh.load(str(here / '../models/fuze.obj'))
transform = tf.translation_matrix([-0.1, -0.1, 0])
scene.add_geometry(geom, transform=transform)
# sphere
geom = trimesh.creation.icosphere(radius=0.05)
geom.visual.face_colors = np.random.uniform(
0, 1, (len(geom.faces), 3))
transform = tf.translation_matrix([0.1, -0.1, box_size / 2])
scene.add_geometry(geom, transform=transform)
return scene | Create a scene with a Fuze bottle, some cubes, and an axis.
Returns
----------
scene : trimesh.Scene
Object with geometry | Below is the the instruction that describes the task:
### Input:
Create a scene with a Fuze bottle, some cubes, and an axis.
Returns
----------
scene : trimesh.Scene
Object with geometry
### Response:
def create_scene():
"""
Create a scene with a Fuze bottle, some cubes, and an axis.
Returns
----------
scene : trimesh.Scene
Object with geometry
"""
scene = trimesh.Scene()
# plane
geom = trimesh.creation.box((0.5, 0.5, 0.01))
geom.apply_translation((0, 0, -0.005))
geom.visual.face_colors = (.6, .6, .6)
scene.add_geometry(geom)
# axis
geom = trimesh.creation.axis(0.02)
scene.add_geometry(geom)
box_size = 0.1
# box1
geom = trimesh.creation.box((box_size,) * 3)
geom.visual.face_colors = np.random.uniform(
0, 1, (len(geom.faces), 3))
transform = tf.translation_matrix([0.1, 0.1, box_size / 2])
scene.add_geometry(geom, transform=transform)
# box2
geom = trimesh.creation.box((box_size,) * 3)
geom.visual.face_colors = np.random.uniform(
0, 1, (len(geom.faces), 3))
transform = tf.translation_matrix([-0.1, 0.1, box_size / 2])
scene.add_geometry(geom, transform=transform)
# fuze
geom = trimesh.load(str(here / '../models/fuze.obj'))
transform = tf.translation_matrix([-0.1, -0.1, 0])
scene.add_geometry(geom, transform=transform)
# sphere
geom = trimesh.creation.icosphere(radius=0.05)
geom.visual.face_colors = np.random.uniform(
0, 1, (len(geom.faces), 3))
transform = tf.translation_matrix([0.1, -0.1, box_size / 2])
scene.add_geometry(geom, transform=transform)
return scene |
def read_xml(self, file_configuration):
"""parses C++ code, defined on the file_configurations and returns
GCCXML generated file content"""
xml_file_path = None
delete_xml_file = True
fc = file_configuration
reader = source_reader.source_reader_t(
self.__config,
None,
self.__decl_factory)
try:
if fc.content_type == fc.CONTENT_TYPE.STANDARD_SOURCE_FILE:
self.logger.info('Parsing source file "%s" ... ', fc.data)
xml_file_path = reader.create_xml_file(fc.data)
elif fc.content_type == \
file_configuration_t.CONTENT_TYPE.GCCXML_GENERATED_FILE:
self.logger.info('Parsing xml file "%s" ... ', fc.data)
xml_file_path = fc.data
delete_xml_file = False
elif fc.content_type == fc.CONTENT_TYPE.CACHED_SOURCE_FILE:
# TODO: raise error when header file does not exist
if not os.path.exists(fc.cached_source_file):
dir_ = os.path.split(fc.cached_source_file)[0]
if dir_ and not os.path.exists(dir_):
os.makedirs(dir_)
self.logger.info(
'Creating xml file "%s" from source file "%s" ... ',
fc.cached_source_file, fc.data)
xml_file_path = reader.create_xml_file(
fc.data,
fc.cached_source_file)
else:
xml_file_path = fc.cached_source_file
else:
xml_file_path = reader.create_xml_file_from_string(fc.data)
with open(xml_file_path, "r") as xml_file:
xml = xml_file.read()
utils.remove_file_no_raise(xml_file_path, self.__config)
self.__xml_generator_from_xml_file = \
reader.xml_generator_from_xml_file
return xml
finally:
if xml_file_path and delete_xml_file:
utils.remove_file_no_raise(xml_file_path, self.__config) | parses C++ code, defined on the file_configurations and returns
GCCXML generated file content | Below is the the instruction that describes the task:
### Input:
parses C++ code, defined on the file_configurations and returns
GCCXML generated file content
### Response:
def read_xml(self, file_configuration):
"""parses C++ code, defined on the file_configurations and returns
GCCXML generated file content"""
xml_file_path = None
delete_xml_file = True
fc = file_configuration
reader = source_reader.source_reader_t(
self.__config,
None,
self.__decl_factory)
try:
if fc.content_type == fc.CONTENT_TYPE.STANDARD_SOURCE_FILE:
self.logger.info('Parsing source file "%s" ... ', fc.data)
xml_file_path = reader.create_xml_file(fc.data)
elif fc.content_type == \
file_configuration_t.CONTENT_TYPE.GCCXML_GENERATED_FILE:
self.logger.info('Parsing xml file "%s" ... ', fc.data)
xml_file_path = fc.data
delete_xml_file = False
elif fc.content_type == fc.CONTENT_TYPE.CACHED_SOURCE_FILE:
# TODO: raise error when header file does not exist
if not os.path.exists(fc.cached_source_file):
dir_ = os.path.split(fc.cached_source_file)[0]
if dir_ and not os.path.exists(dir_):
os.makedirs(dir_)
self.logger.info(
'Creating xml file "%s" from source file "%s" ... ',
fc.cached_source_file, fc.data)
xml_file_path = reader.create_xml_file(
fc.data,
fc.cached_source_file)
else:
xml_file_path = fc.cached_source_file
else:
xml_file_path = reader.create_xml_file_from_string(fc.data)
with open(xml_file_path, "r") as xml_file:
xml = xml_file.read()
utils.remove_file_no_raise(xml_file_path, self.__config)
self.__xml_generator_from_xml_file = \
reader.xml_generator_from_xml_file
return xml
finally:
if xml_file_path and delete_xml_file:
utils.remove_file_no_raise(xml_file_path, self.__config) |
def repo_data(PACKAGES_TXT, repo, flag):
"""Grap data packages
"""
(name, location, size, unsize,
rname, rlocation, rsize, runsize) = ([] for i in range(8))
for line in PACKAGES_TXT.splitlines():
if _meta_.rsl_deps in ["on", "ON"] and "--resolve-off" not in flag:
status(0.000005)
if line.startswith("PACKAGE NAME:"):
name.append(line[15:].strip())
if line.startswith("PACKAGE LOCATION:"):
location.append(line[21:].strip())
if line.startswith("PACKAGE SIZE (compressed):"):
size.append(line[28:-2].strip())
if line.startswith("PACKAGE SIZE (uncompressed):"):
unsize.append(line[30:-2].strip())
if repo == "rlw":
(rname,
rlocation,
rsize,
runsize
) = rlw_filter(name, location, size, unsize)
elif repo == "alien":
(rname,
rlocation,
rsize,
runsize
) = alien_filter(name, location, size, unsize)
elif repo == "rested":
(rname,
rlocation,
rsize,
runsize
) = rested_filter(name, location, size, unsize)
elif repo == "ktown":
(rname,
rlocation,
rsize,
runsize
) = ktown_filter(name, location, size, unsize)
else:
rname, rlocation, rsize, runsize = name, location, size, unsize
return [rname, rlocation, rsize, runsize] | Grap data packages | Below is the the instruction that describes the task:
### Input:
Grap data packages
### Response:
def repo_data(PACKAGES_TXT, repo, flag):
"""Grap data packages
"""
(name, location, size, unsize,
rname, rlocation, rsize, runsize) = ([] for i in range(8))
for line in PACKAGES_TXT.splitlines():
if _meta_.rsl_deps in ["on", "ON"] and "--resolve-off" not in flag:
status(0.000005)
if line.startswith("PACKAGE NAME:"):
name.append(line[15:].strip())
if line.startswith("PACKAGE LOCATION:"):
location.append(line[21:].strip())
if line.startswith("PACKAGE SIZE (compressed):"):
size.append(line[28:-2].strip())
if line.startswith("PACKAGE SIZE (uncompressed):"):
unsize.append(line[30:-2].strip())
if repo == "rlw":
(rname,
rlocation,
rsize,
runsize
) = rlw_filter(name, location, size, unsize)
elif repo == "alien":
(rname,
rlocation,
rsize,
runsize
) = alien_filter(name, location, size, unsize)
elif repo == "rested":
(rname,
rlocation,
rsize,
runsize
) = rested_filter(name, location, size, unsize)
elif repo == "ktown":
(rname,
rlocation,
rsize,
runsize
) = ktown_filter(name, location, size, unsize)
else:
rname, rlocation, rsize, runsize = name, location, size, unsize
return [rname, rlocation, rsize, runsize] |
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures | Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API | Below is the the instruction that describes the task:
### Input:
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
### Response:
def parse_departures(self, xml):
"""
Parse the NS API xml result into Departure objects
@param xml: raw XML result from the NS API
"""
obj = xmltodict.parse(xml)
departures = []
for departure in obj['ActueleVertrekTijden']['VertrekkendeTrein']:
newdep = Departure(departure)
departures.append(newdep)
#print('-- dep --')
#print(newdep.__dict__)
#print(newdep.to_json())
print(newdep.delay)
return departures |
def GenFwdState(self):
"""Generates forwarding state rules.
The lexer will fast forward until there is string content. The
string content will be returned to the string processor.
"""
for c in self.cont:
self._AddToken("FWD", c, None, None)
for s in self.sep:
self._AddToken("FWD", s, None, None)
self._AddToken("FWD", ".", "PushBack,PopState", None) | Generates forwarding state rules.
The lexer will fast forward until there is string content. The
string content will be returned to the string processor. | Below is the the instruction that describes the task:
### Input:
Generates forwarding state rules.
The lexer will fast forward until there is string content. The
string content will be returned to the string processor.
### Response:
def GenFwdState(self):
"""Generates forwarding state rules.
The lexer will fast forward until there is string content. The
string content will be returned to the string processor.
"""
for c in self.cont:
self._AddToken("FWD", c, None, None)
for s in self.sep:
self._AddToken("FWD", s, None, None)
self._AddToken("FWD", ".", "PushBack,PopState", None) |
def read(self, entity=None, attrs=None, ignore=None, params=None):
"""Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1223540
<https://bugzilla.redhat.com/show_bug.cgi?id=1223540>`_.
Also, do not try to read the "password" field. No value is returned for
the field, for obvious reasons.
"""
if attrs is None:
attrs = self.read_json()
if ignore is None:
ignore = set()
ignore.add('password')
if 'email' not in attrs and 'email' not in ignore:
response = client.put(
self.path('self'),
{},
**self._server_config.get_client_kwargs()
)
response.raise_for_status()
attrs['email'] = response.json().get('email')
return super(DockerComputeResource, self).read(
entity, attrs, ignore, params) | Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1223540
<https://bugzilla.redhat.com/show_bug.cgi?id=1223540>`_.
Also, do not try to read the "password" field. No value is returned for
the field, for obvious reasons. | Below is the the instruction that describes the task:
### Input:
Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1223540
<https://bugzilla.redhat.com/show_bug.cgi?id=1223540>`_.
Also, do not try to read the "password" field. No value is returned for
the field, for obvious reasons.
### Response:
def read(self, entity=None, attrs=None, ignore=None, params=None):
"""Do extra work to fetch a complete set of attributes for this entity.
For more information, see `Bugzilla #1223540
<https://bugzilla.redhat.com/show_bug.cgi?id=1223540>`_.
Also, do not try to read the "password" field. No value is returned for
the field, for obvious reasons.
"""
if attrs is None:
attrs = self.read_json()
if ignore is None:
ignore = set()
ignore.add('password')
if 'email' not in attrs and 'email' not in ignore:
response = client.put(
self.path('self'),
{},
**self._server_config.get_client_kwargs()
)
response.raise_for_status()
attrs['email'] = response.json().get('email')
return super(DockerComputeResource, self).read(
entity, attrs, ignore, params) |
def _StopStatusUpdateThread(self):
"""Stops the status update thread."""
self._status_update_active = False
if self._status_update_thread.isAlive():
self._status_update_thread.join()
self._status_update_thread = None | Stops the status update thread. | Below is the the instruction that describes the task:
### Input:
Stops the status update thread.
### Response:
def _StopStatusUpdateThread(self):
"""Stops the status update thread."""
self._status_update_active = False
if self._status_update_thread.isAlive():
self._status_update_thread.join()
self._status_update_thread = None |
def get_resolved_jars_for_coordinates(self, coordinates, memo=None):
"""Collects jars for the passed coordinates.
Because artifacts are only fetched for the "winning" version of a module, the artifacts
will not always represent the version originally declared by the library.
This method is transitive within the passed coordinates dependencies.
:param coordinates collections.Iterable: Collection of coordinates to collect transitive
resolved jars for.
:param memo: See `traverse_dependency_graph`.
:returns: All the artifacts for all of the jars for the provided coordinates,
including transitive dependencies.
:rtype: list of :class:`pants.java.jar.ResolvedJar`
"""
def to_resolved_jar(jar_ref, jar_path):
return ResolvedJar(coordinate=M2Coordinate(org=jar_ref.org,
name=jar_ref.name,
rev=jar_ref.rev,
classifier=jar_ref.classifier,
ext=jar_ref.ext),
cache_path=jar_path)
resolved_jars = OrderedSet()
def create_collection(dep):
return OrderedSet([dep])
for jar in coordinates:
classifier = jar.classifier if self._conf == 'default' else self._conf
jar_module_ref = IvyModuleRef(jar.org, jar.name, jar.rev, classifier, jar.ext)
for module_ref in self.traverse_dependency_graph(jar_module_ref, create_collection, memo):
for artifact_path in self._artifacts_by_ref[module_ref.unversioned]:
resolved_jars.add(to_resolved_jar(module_ref, artifact_path))
return resolved_jars | Collects jars for the passed coordinates.
Because artifacts are only fetched for the "winning" version of a module, the artifacts
will not always represent the version originally declared by the library.
This method is transitive within the passed coordinates dependencies.
:param coordinates collections.Iterable: Collection of coordinates to collect transitive
resolved jars for.
:param memo: See `traverse_dependency_graph`.
:returns: All the artifacts for all of the jars for the provided coordinates,
including transitive dependencies.
:rtype: list of :class:`pants.java.jar.ResolvedJar` | Below is the the instruction that describes the task:
### Input:
Collects jars for the passed coordinates.
Because artifacts are only fetched for the "winning" version of a module, the artifacts
will not always represent the version originally declared by the library.
This method is transitive within the passed coordinates dependencies.
:param coordinates collections.Iterable: Collection of coordinates to collect transitive
resolved jars for.
:param memo: See `traverse_dependency_graph`.
:returns: All the artifacts for all of the jars for the provided coordinates,
including transitive dependencies.
:rtype: list of :class:`pants.java.jar.ResolvedJar`
### Response:
def get_resolved_jars_for_coordinates(self, coordinates, memo=None):
"""Collects jars for the passed coordinates.
Because artifacts are only fetched for the "winning" version of a module, the artifacts
will not always represent the version originally declared by the library.
This method is transitive within the passed coordinates dependencies.
:param coordinates collections.Iterable: Collection of coordinates to collect transitive
resolved jars for.
:param memo: See `traverse_dependency_graph`.
:returns: All the artifacts for all of the jars for the provided coordinates,
including transitive dependencies.
:rtype: list of :class:`pants.java.jar.ResolvedJar`
"""
def to_resolved_jar(jar_ref, jar_path):
return ResolvedJar(coordinate=M2Coordinate(org=jar_ref.org,
name=jar_ref.name,
rev=jar_ref.rev,
classifier=jar_ref.classifier,
ext=jar_ref.ext),
cache_path=jar_path)
resolved_jars = OrderedSet()
def create_collection(dep):
return OrderedSet([dep])
for jar in coordinates:
classifier = jar.classifier if self._conf == 'default' else self._conf
jar_module_ref = IvyModuleRef(jar.org, jar.name, jar.rev, classifier, jar.ext)
for module_ref in self.traverse_dependency_graph(jar_module_ref, create_collection, memo):
for artifact_path in self._artifacts_by_ref[module_ref.unversioned]:
resolved_jars.add(to_resolved_jar(module_ref, artifact_path))
return resolved_jars |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.