code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def queryByPortSensor(portiaConfig, edgeId, port, sensor, last=False, params={ 'from': None, 'to': None, 'order': None, 'precision': 'ms', 'limit': None }):
"""Returns a pandas data frame with the portia select resultset"""
header = {'Accept': 'text/csv'}
if last == False:
endpoint = '/select/device/{0}/port/{1}/sensor/{2}{3}'.format( edgeId, port, sensor, utils.buildGetParams(params) )
else:
endpoint = '/select/device/{0}/port/{1}/sensor/{2}/last{3}'.format( edgeId, port, sensor, utils.buildGetParams(params) )
response = utils.httpGetRequest(portiaConfig, endpoint, header)
if response.status_code == 200:
try:
dimensionSeries = pandas.read_csv( StringIO(response.text), sep=';' )
if portiaConfig['debug']:
print( '[portia-debug]: {0} rows'.format( len(dimensionSeries.index) ) )
return dimensionSeries
except:
raise Exception('couldn\'t create pandas data frame')
else:
raise Exception('couldn\'t retrieve data') | Returns a pandas data frame with the portia select resultset | Below is the the instruction that describes the task:
### Input:
Returns a pandas data frame with the portia select resultset
### Response:
def queryByPortSensor(portiaConfig, edgeId, port, sensor, last=False, params={ 'from': None, 'to': None, 'order': None, 'precision': 'ms', 'limit': None }):
"""Returns a pandas data frame with the portia select resultset"""
header = {'Accept': 'text/csv'}
if last == False:
endpoint = '/select/device/{0}/port/{1}/sensor/{2}{3}'.format( edgeId, port, sensor, utils.buildGetParams(params) )
else:
endpoint = '/select/device/{0}/port/{1}/sensor/{2}/last{3}'.format( edgeId, port, sensor, utils.buildGetParams(params) )
response = utils.httpGetRequest(portiaConfig, endpoint, header)
if response.status_code == 200:
try:
dimensionSeries = pandas.read_csv( StringIO(response.text), sep=';' )
if portiaConfig['debug']:
print( '[portia-debug]: {0} rows'.format( len(dimensionSeries.index) ) )
return dimensionSeries
except:
raise Exception('couldn\'t create pandas data frame')
else:
raise Exception('couldn\'t retrieve data') |
def get_xml_root(xml_path):
"""Load and parse an xml by given xml_path and return its root.
:param xml_path: URL to a xml file
:type xml_path: str
:return: xml root
"""
r = requests.get(xml_path)
root = ET.fromstring(r.content)
return root | Load and parse an xml by given xml_path and return its root.
:param xml_path: URL to a xml file
:type xml_path: str
:return: xml root | Below is the the instruction that describes the task:
### Input:
Load and parse an xml by given xml_path and return its root.
:param xml_path: URL to a xml file
:type xml_path: str
:return: xml root
### Response:
def get_xml_root(xml_path):
"""Load and parse an xml by given xml_path and return its root.
:param xml_path: URL to a xml file
:type xml_path: str
:return: xml root
"""
r = requests.get(xml_path)
root = ET.fromstring(r.content)
return root |
def chain_check(cls, timestamp: int) -> bool:
"""
Given a record timestamp, verify the chain integrity.
:param timestamp: UNIX time / POSIX time / Epoch time
:return: 'True' if the timestamp fits the chain. 'False' otherwise.
"""
# Creation is messy.
# You want genius, you get madness; two sides of the same coin.
# ... I'm sure this can be cleaned up. However, let's test it first.
record = cls.get_record(timestamp)
if isinstance(record, NistBeaconValue) is False:
# Don't you dare try to play me
return False
prev_record = cls.get_previous(record.timestamp)
next_record = cls.get_next(record.timestamp)
if prev_record is None and next_record is None:
# Uh, how did you manage to do this?
# I'm not even mad, that's amazing.
return False
if (
isinstance(prev_record, NistBeaconValue) and
isinstance(next_record, NistBeaconValue)
):
# Majority case, somewhere in the middle of the chain
# True if:
# - All three records have proper signatures
# - The requested record's previous output equals previous
# - The next possible record's previous output equals the record
return (
record.valid_signature and
prev_record.valid_signature and
next_record.valid_signature and
record.previous_output_value == prev_record.output_value and
next_record.previous_output_value == record.output_value
)
if (
prev_record is None and
isinstance(next_record, NistBeaconValue)
):
# Edge case, this was potentially the first record of all time
return (
record.valid_signature and
next_record.valid_signature and
cls._INIT_RECORD == record and
next_record.previous_output_value == record.output_value
)
if (
isinstance(prev_record, NistBeaconValue) and
next_record is None
):
# Edge case, this was potentially the latest and greatest
return (
record.valid_signature and
prev_record.valid_signature and
record.previous_output_value == prev_record.output_value
) | Given a record timestamp, verify the chain integrity.
:param timestamp: UNIX time / POSIX time / Epoch time
:return: 'True' if the timestamp fits the chain. 'False' otherwise. | Below is the the instruction that describes the task:
### Input:
Given a record timestamp, verify the chain integrity.
:param timestamp: UNIX time / POSIX time / Epoch time
:return: 'True' if the timestamp fits the chain. 'False' otherwise.
### Response:
def chain_check(cls, timestamp: int) -> bool:
"""
Given a record timestamp, verify the chain integrity.
:param timestamp: UNIX time / POSIX time / Epoch time
:return: 'True' if the timestamp fits the chain. 'False' otherwise.
"""
# Creation is messy.
# You want genius, you get madness; two sides of the same coin.
# ... I'm sure this can be cleaned up. However, let's test it first.
record = cls.get_record(timestamp)
if isinstance(record, NistBeaconValue) is False:
# Don't you dare try to play me
return False
prev_record = cls.get_previous(record.timestamp)
next_record = cls.get_next(record.timestamp)
if prev_record is None and next_record is None:
# Uh, how did you manage to do this?
# I'm not even mad, that's amazing.
return False
if (
isinstance(prev_record, NistBeaconValue) and
isinstance(next_record, NistBeaconValue)
):
# Majority case, somewhere in the middle of the chain
# True if:
# - All three records have proper signatures
# - The requested record's previous output equals previous
# - The next possible record's previous output equals the record
return (
record.valid_signature and
prev_record.valid_signature and
next_record.valid_signature and
record.previous_output_value == prev_record.output_value and
next_record.previous_output_value == record.output_value
)
if (
prev_record is None and
isinstance(next_record, NistBeaconValue)
):
# Edge case, this was potentially the first record of all time
return (
record.valid_signature and
next_record.valid_signature and
cls._INIT_RECORD == record and
next_record.previous_output_value == record.output_value
)
if (
isinstance(prev_record, NistBeaconValue) and
next_record is None
):
# Edge case, this was potentially the latest and greatest
return (
record.valid_signature and
prev_record.valid_signature and
record.previous_output_value == prev_record.output_value
) |
def unload_plugin(name, category=None):
""" remove single plugin
Parameters
----------
name : str
plugin name
category : str
plugin category
Examples
--------
>>> from pprint import pprint
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> errors = load_plugin_classes([DecoderPlugin],category='decoders')
>>> pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> unload_plugin('example','decoders')
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
"""
if category is not None:
_all_plugins[category].pop(name)
else:
for cat in _all_plugins:
if name in _all_plugins[cat]:
_all_plugins[cat].pop(name) | remove single plugin
Parameters
----------
name : str
plugin name
category : str
plugin category
Examples
--------
>>> from pprint import pprint
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> errors = load_plugin_classes([DecoderPlugin],category='decoders')
>>> pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> unload_plugin('example','decoders')
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}} | Below is the the instruction that describes the task:
### Input:
remove single plugin
Parameters
----------
name : str
plugin name
category : str
plugin category
Examples
--------
>>> from pprint import pprint
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> errors = load_plugin_classes([DecoderPlugin],category='decoders')
>>> pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> unload_plugin('example','decoders')
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
### Response:
def unload_plugin(name, category=None):
""" remove single plugin
Parameters
----------
name : str
plugin name
category : str
plugin category
Examples
--------
>>> from pprint import pprint
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
>>> class DecoderPlugin(object):
... plugin_name = 'example'
... plugin_descript = 'a decoder for dicts containing _example_ key'
... dict_signature = ('_example_',)
...
>>> errors = load_plugin_classes([DecoderPlugin],category='decoders')
>>> pprint(view_plugins())
{'decoders': {'example': 'a decoder for dicts containing _example_ key'},
'encoders': {},
'parsers': {}}
>>> unload_plugin('example','decoders')
>>> pprint(view_plugins())
{'decoders': {}, 'encoders': {}, 'parsers': {}}
"""
if category is not None:
_all_plugins[category].pop(name)
else:
for cat in _all_plugins:
if name in _all_plugins[cat]:
_all_plugins[cat].pop(name) |
def panic(self, *args):
"""
Creates a fatal error and exit
"""
self._err("fatal", *args)
if self.test_errs_mode is False: # pragma: no cover
sys.exit(1) | Creates a fatal error and exit | Below is the the instruction that describes the task:
### Input:
Creates a fatal error and exit
### Response:
def panic(self, *args):
"""
Creates a fatal error and exit
"""
self._err("fatal", *args)
if self.test_errs_mode is False: # pragma: no cover
sys.exit(1) |
def save(self, ds_name, data, dtype=None):
"""
See create_dataset.
"""
return self.create_dataset(ds_name, data, dtype) | See create_dataset. | Below is the the instruction that describes the task:
### Input:
See create_dataset.
### Response:
def save(self, ds_name, data, dtype=None):
"""
See create_dataset.
"""
return self.create_dataset(ds_name, data, dtype) |
def findFirst(self, tableClass, comparison=None,
offset=None, sort=None, default=None):
"""
Usage::
s.findFirst(tableClass [, query arguments except 'limit'])
Example::
class YourItemType(Item):
a = integer()
b = text()
c = integer()
...
it = s.findFirst(YourItemType,
AND(YourItemType.a == 1,
YourItemType.b == u'2'),
sort=YourItemType.c.descending)
Search for an item with columns in the database that match the passed
comparison, offset and sort, returning the first match if one is found,
or the passed default (None if none is passed) if one is not found.
"""
limit = 1
for item in self.query(tableClass, comparison, limit, offset, sort):
return item
return default | Usage::
s.findFirst(tableClass [, query arguments except 'limit'])
Example::
class YourItemType(Item):
a = integer()
b = text()
c = integer()
...
it = s.findFirst(YourItemType,
AND(YourItemType.a == 1,
YourItemType.b == u'2'),
sort=YourItemType.c.descending)
Search for an item with columns in the database that match the passed
comparison, offset and sort, returning the first match if one is found,
or the passed default (None if none is passed) if one is not found. | Below is the the instruction that describes the task:
### Input:
Usage::
s.findFirst(tableClass [, query arguments except 'limit'])
Example::
class YourItemType(Item):
a = integer()
b = text()
c = integer()
...
it = s.findFirst(YourItemType,
AND(YourItemType.a == 1,
YourItemType.b == u'2'),
sort=YourItemType.c.descending)
Search for an item with columns in the database that match the passed
comparison, offset and sort, returning the first match if one is found,
or the passed default (None if none is passed) if one is not found.
### Response:
def findFirst(self, tableClass, comparison=None,
offset=None, sort=None, default=None):
"""
Usage::
s.findFirst(tableClass [, query arguments except 'limit'])
Example::
class YourItemType(Item):
a = integer()
b = text()
c = integer()
...
it = s.findFirst(YourItemType,
AND(YourItemType.a == 1,
YourItemType.b == u'2'),
sort=YourItemType.c.descending)
Search for an item with columns in the database that match the passed
comparison, offset and sort, returning the first match if one is found,
or the passed default (None if none is passed) if one is not found.
"""
limit = 1
for item in self.query(tableClass, comparison, limit, offset, sort):
return item
return default |
def _build_preconditions_table(self):
'''Builds the local action precondition expressions.'''
self.local_action_preconditions = dict()
self.global_action_preconditions = []
action_fluents = self.action_fluents
for precond in self.preconds:
scope = precond.scope
action_scope = [action for action in scope if action in action_fluents]
if len(action_scope) == 1:
name = action_scope[0]
self.local_action_preconditions[name] = self.local_action_preconditions.get(name, [])
self.local_action_preconditions[name].append(precond)
else:
self.global_action_preconditions.append(precond) | Builds the local action precondition expressions. | Below is the the instruction that describes the task:
### Input:
Builds the local action precondition expressions.
### Response:
def _build_preconditions_table(self):
'''Builds the local action precondition expressions.'''
self.local_action_preconditions = dict()
self.global_action_preconditions = []
action_fluents = self.action_fluents
for precond in self.preconds:
scope = precond.scope
action_scope = [action for action in scope if action in action_fluents]
if len(action_scope) == 1:
name = action_scope[0]
self.local_action_preconditions[name] = self.local_action_preconditions.get(name, [])
self.local_action_preconditions[name].append(precond)
else:
self.global_action_preconditions.append(precond) |
def drop_table(model, keyspaces=None, connections=None):
"""
Drops the table indicated by the model, if it exists.
If `keyspaces` is specified, the table will be dropped for all specified keyspaces. Note that the `Model.__keyspace__` is ignored in that case.
If `connections` is specified, the table will be synched for all specified connections. Note that the `Model.__connection__` is ignored in that case.
If not specified, it will try to get the connection from the Model.
**This function should be used with caution, especially in production environments.
Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).**
*There are plans to guard schema-modifying functions with an environment-driven conditional.*
"""
context = _get_context(keyspaces, connections)
for connection, keyspace in context:
with query.ContextQuery(model, keyspace=keyspace) as m:
_drop_table(m, connection=connection) | Drops the table indicated by the model, if it exists.
If `keyspaces` is specified, the table will be dropped for all specified keyspaces. Note that the `Model.__keyspace__` is ignored in that case.
If `connections` is specified, the table will be synched for all specified connections. Note that the `Model.__connection__` is ignored in that case.
If not specified, it will try to get the connection from the Model.
**This function should be used with caution, especially in production environments.
Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).**
*There are plans to guard schema-modifying functions with an environment-driven conditional.* | Below is the the instruction that describes the task:
### Input:
Drops the table indicated by the model, if it exists.
If `keyspaces` is specified, the table will be dropped for all specified keyspaces. Note that the `Model.__keyspace__` is ignored in that case.
If `connections` is specified, the table will be synched for all specified connections. Note that the `Model.__connection__` is ignored in that case.
If not specified, it will try to get the connection from the Model.
**This function should be used with caution, especially in production environments.
Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).**
*There are plans to guard schema-modifying functions with an environment-driven conditional.*
### Response:
def drop_table(model, keyspaces=None, connections=None):
"""
Drops the table indicated by the model, if it exists.
If `keyspaces` is specified, the table will be dropped for all specified keyspaces. Note that the `Model.__keyspace__` is ignored in that case.
If `connections` is specified, the table will be synched for all specified connections. Note that the `Model.__connection__` is ignored in that case.
If not specified, it will try to get the connection from the Model.
**This function should be used with caution, especially in production environments.
Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).**
*There are plans to guard schema-modifying functions with an environment-driven conditional.*
"""
context = _get_context(keyspaces, connections)
for connection, keyspace in context:
with query.ContextQuery(model, keyspace=keyspace) as m:
_drop_table(m, connection=connection) |
def _get_by_id(collection, id):
'''
Get item from a list by the id field
'''
matches = [item for item in collection if item.id == id]
if not matches:
raise ValueError('Could not find a matching item')
elif len(matches) > 1:
raise ValueError('The id matched {0} items, not 1'.format(len(matches)))
return matches[0] | Get item from a list by the id field | Below is the the instruction that describes the task:
### Input:
Get item from a list by the id field
### Response:
def _get_by_id(collection, id):
'''
Get item from a list by the id field
'''
matches = [item for item in collection if item.id == id]
if not matches:
raise ValueError('Could not find a matching item')
elif len(matches) > 1:
raise ValueError('The id matched {0} items, not 1'.format(len(matches)))
return matches[0] |
def finite_difference(self, *args, **kwargs):
"""
Calculates a numerical approximation of the Jacobian of the model using
the sixth order central finite difference method. Accepts a `dx`
keyword to tune the relative stepsize used.
Makes 6*n_params calls to the model.
:return: A numerical approximation of the Jacobian of the model as a
list with length n_components containing numpy arrays of shape
(n_params, n_datapoints)
"""
# See also: scipy.misc.derivative. It might be convinced to work, but
# it will make way too many function evaluations
dx = kwargs.pop('dx')
bound_arguments = self.__signature__.bind(*args, **kwargs)
var_vals = [bound_arguments.arguments[var.name] for var in self.independent_vars]
param_vals = [bound_arguments.arguments[param.name] for param in self.params]
param_vals = np.array(param_vals, dtype=float)
f = partial(self, *var_vals)
# See also: scipy.misc.central_diff_weights
factors = np.array((3/2., -3/5., 1/10.))
orders = np.arange(1, len(factors) + 1)
out = []
# TODO: Dark numpy magic. Needs an extra dimension in out, and a sum
# over the right axis at the end.
# We can't make the output arrays yet, since we don't know the size of
# the components. So put a sentinel value.
out = None
for param_idx, param_val in enumerate(param_vals):
for order, factor in zip(orders, factors):
h = np.zeros(len(self.params))
# Note: stepsize (h) depends on the parameter values...
h[param_idx] = dx * order
if abs(param_val) >= 1e-7:
# ...but it'd better not be (too close to) 0.
h[param_idx] *= param_val
up = f(*(param_vals + h))
down = f(*(param_vals - h))
if out is None:
# Initialize output arrays. Now that we evaluated f, we
# know the size of our data.
out = []
# out is a list of length Ncomponents with numpy arrays of
# shape (Nparams, Ndata). Part of our misery comes from the
# fact that the length of the data may be different for all
# the components. Numpy doesn't like ragged arrays, so make
# a list of arrays.
for comp_idx in range(len(self)):
try:
len(up[comp_idx])
except TypeError: # output[comp_idx] is a number
data_shape = (1,)
else:
data_shape = up[comp_idx].shape
# Initialize at 0 so we can += all the contributions
param_grad = np.zeros([len(self.params)] + list(data_shape), dtype=float)
out.append(param_grad)
for comp_idx in range(len(self)):
diff = up[comp_idx] - down[comp_idx]
out[comp_idx][param_idx, :] += factor * diff / (2 * h[param_idx])
return out | Calculates a numerical approximation of the Jacobian of the model using
the sixth order central finite difference method. Accepts a `dx`
keyword to tune the relative stepsize used.
Makes 6*n_params calls to the model.
:return: A numerical approximation of the Jacobian of the model as a
list with length n_components containing numpy arrays of shape
(n_params, n_datapoints) | Below is the the instruction that describes the task:
### Input:
Calculates a numerical approximation of the Jacobian of the model using
the sixth order central finite difference method. Accepts a `dx`
keyword to tune the relative stepsize used.
Makes 6*n_params calls to the model.
:return: A numerical approximation of the Jacobian of the model as a
list with length n_components containing numpy arrays of shape
(n_params, n_datapoints)
### Response:
def finite_difference(self, *args, **kwargs):
"""
Calculates a numerical approximation of the Jacobian of the model using
the sixth order central finite difference method. Accepts a `dx`
keyword to tune the relative stepsize used.
Makes 6*n_params calls to the model.
:return: A numerical approximation of the Jacobian of the model as a
list with length n_components containing numpy arrays of shape
(n_params, n_datapoints)
"""
# See also: scipy.misc.derivative. It might be convinced to work, but
# it will make way too many function evaluations
dx = kwargs.pop('dx')
bound_arguments = self.__signature__.bind(*args, **kwargs)
var_vals = [bound_arguments.arguments[var.name] for var in self.independent_vars]
param_vals = [bound_arguments.arguments[param.name] for param in self.params]
param_vals = np.array(param_vals, dtype=float)
f = partial(self, *var_vals)
# See also: scipy.misc.central_diff_weights
factors = np.array((3/2., -3/5., 1/10.))
orders = np.arange(1, len(factors) + 1)
out = []
# TODO: Dark numpy magic. Needs an extra dimension in out, and a sum
# over the right axis at the end.
# We can't make the output arrays yet, since we don't know the size of
# the components. So put a sentinel value.
out = None
for param_idx, param_val in enumerate(param_vals):
for order, factor in zip(orders, factors):
h = np.zeros(len(self.params))
# Note: stepsize (h) depends on the parameter values...
h[param_idx] = dx * order
if abs(param_val) >= 1e-7:
# ...but it'd better not be (too close to) 0.
h[param_idx] *= param_val
up = f(*(param_vals + h))
down = f(*(param_vals - h))
if out is None:
# Initialize output arrays. Now that we evaluated f, we
# know the size of our data.
out = []
# out is a list of length Ncomponents with numpy arrays of
# shape (Nparams, Ndata). Part of our misery comes from the
# fact that the length of the data may be different for all
# the components. Numpy doesn't like ragged arrays, so make
# a list of arrays.
for comp_idx in range(len(self)):
try:
len(up[comp_idx])
except TypeError: # output[comp_idx] is a number
data_shape = (1,)
else:
data_shape = up[comp_idx].shape
# Initialize at 0 so we can += all the contributions
param_grad = np.zeros([len(self.params)] + list(data_shape), dtype=float)
out.append(param_grad)
for comp_idx in range(len(self)):
diff = up[comp_idx] - down[comp_idx]
out[comp_idx][param_idx, :] += factor * diff / (2 * h[param_idx])
return out |
def get_fmt_section(self):
"""Grey if printing header GOs and plain if not printing header GOs."""
if self.b_format_txt:
return self.fmtname2wbfmtobj.get("light grey")
return self.fmtname2wbfmtobj.get("plain bold") | Grey if printing header GOs and plain if not printing header GOs. | Below is the the instruction that describes the task:
### Input:
Grey if printing header GOs and plain if not printing header GOs.
### Response:
def get_fmt_section(self):
"""Grey if printing header GOs and plain if not printing header GOs."""
if self.b_format_txt:
return self.fmtname2wbfmtobj.get("light grey")
return self.fmtname2wbfmtobj.get("plain bold") |
def save_catalog(self):
"""
Saves the catalog data to given key
Cancels if the cmd is cancel
Notifies user with the process.
"""
if self.input["cmd"] == 'save_catalog':
try:
edited_object = dict()
for i in self.input["form"]["CatalogDatas"]:
edited_object[i["catalog_key"]] = {"en": i["en"], "tr": i["tr"]}
newobj = fixture_bucket.get(self.input["object_key"])
newobj.data = edited_object
newobj.store()
# notify user by passing notify in output object
self.output["notify"] = "catalog: %s successfully updated." % self.input[
"object_key"]
except:
raise HTTPError(500, "Form object could not be saved")
if self.input["cmd"] == 'cancel':
self.output["notify"] = "catalog: %s canceled." % self.input["object_key"] | Saves the catalog data to given key
Cancels if the cmd is cancel
Notifies user with the process. | Below is the the instruction that describes the task:
### Input:
Saves the catalog data to given key
Cancels if the cmd is cancel
Notifies user with the process.
### Response:
def save_catalog(self):
"""
Saves the catalog data to given key
Cancels if the cmd is cancel
Notifies user with the process.
"""
if self.input["cmd"] == 'save_catalog':
try:
edited_object = dict()
for i in self.input["form"]["CatalogDatas"]:
edited_object[i["catalog_key"]] = {"en": i["en"], "tr": i["tr"]}
newobj = fixture_bucket.get(self.input["object_key"])
newobj.data = edited_object
newobj.store()
# notify user by passing notify in output object
self.output["notify"] = "catalog: %s successfully updated." % self.input[
"object_key"]
except:
raise HTTPError(500, "Form object could not be saved")
if self.input["cmd"] == 'cancel':
self.output["notify"] = "catalog: %s canceled." % self.input["object_key"] |
def old_changelist_view(self, request, extra_context=None):
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ERROR_FLAG
from django.core.exceptions import PermissionDenied
from django.utils.encoding import force_text
from django.utils.translation import ungettext
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
# Remove action checkboxes if there aren't any actions available.
list_display = list(self.list_display)
if not actions:
try:
list_display.remove('action_checkbox')
except ValueError:
pass
try:
if django.VERSION[0] == 1 and django.VERSION[1] < 4:
params = (
request, self.model, list_display,
self.list_display_links, self.list_filter, self.date_hierarchy,
self.search_fields, self.list_select_related,
self.list_per_page, self.list_editable, self)
elif django.VERSION[0] == 1 or (django.VERSION[0] == 2 and django.VERSION[1] < 1):
params = (
request, self.model, list_display,
self.list_display_links, self.list_filter, self.date_hierarchy,
self.search_fields, self.list_select_related,
self.list_per_page, self.list_max_show_all,
self.list_editable, self)
else:
params = (
request, self.model, list_display,
self.list_display_links, self.list_filter, self.date_hierarchy,
self.search_fields, self.list_select_related,
self.list_per_page, self.list_max_show_all,
self.list_editable, self, self.sortable_by)
cl = TreeChangeList(*params)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given and
# the 'invalid=1' parameter was already in the query string, something
# is screwed up with the database, so display an error page.
if ERROR_FLAG in list(request.GET.keys()):
return render_to_response(
'admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk edit.
# Try to look up an action first, but if this isn't an action the POST
# will fall through to the bulk edit check, below.
if actions and request.method == 'POST':
response = self.response_action(request, queryset=cl.get_queryset())
if response:
return response
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if request.method == "POST" and self.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(
request.POST, request.FILES, queryset=cl.result_list
)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
form.save_m2m()
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_text(opts.verbose_name)
else:
name = force_text(opts.verbose_name_plural)
msg = ungettext(
"%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount) % {'count': changecount,
'name': name,
'obj': force_text(obj)}
self.message_user(request, msg)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif self.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
context = {
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'media': media,
'has_add_permission': self.has_add_permission(request),
'app_label': app_label,
'action_form': action_form,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
}
if django.VERSION[0] == 1 and django.VERSION[1] < 4:
context['root_path'] = self.admin_site.root_path
elif django.VERSION[0] == 1 or (django.VERSION[0] == 2 and django.VERSION[1] < 1):
selection_note_all = ungettext('%(total_count)s selected', 'All %(total_count)s selected', cl.result_count)
context.update({
'module_name': force_text(opts.verbose_name_plural),
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
'selection_note_all': selection_note_all % {'total_count': cl.result_count},
})
else:
context['opts'] = self.model._meta
context.update(extra_context or {})
return render_to_response(self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context=context) | The 'change list' admin view for this model. | Below is the the instruction that describes the task:
### Input:
The 'change list' admin view for this model.
### Response:
def old_changelist_view(self, request, extra_context=None):
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ERROR_FLAG
from django.core.exceptions import PermissionDenied
from django.utils.encoding import force_text
from django.utils.translation import ungettext
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
# Remove action checkboxes if there aren't any actions available.
list_display = list(self.list_display)
if not actions:
try:
list_display.remove('action_checkbox')
except ValueError:
pass
try:
if django.VERSION[0] == 1 and django.VERSION[1] < 4:
params = (
request, self.model, list_display,
self.list_display_links, self.list_filter, self.date_hierarchy,
self.search_fields, self.list_select_related,
self.list_per_page, self.list_editable, self)
elif django.VERSION[0] == 1 or (django.VERSION[0] == 2 and django.VERSION[1] < 1):
params = (
request, self.model, list_display,
self.list_display_links, self.list_filter, self.date_hierarchy,
self.search_fields, self.list_select_related,
self.list_per_page, self.list_max_show_all,
self.list_editable, self)
else:
params = (
request, self.model, list_display,
self.list_display_links, self.list_filter, self.date_hierarchy,
self.search_fields, self.list_select_related,
self.list_per_page, self.list_max_show_all,
self.list_editable, self, self.sortable_by)
cl = TreeChangeList(*params)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given and
# the 'invalid=1' parameter was already in the query string, something
# is screwed up with the database, so display an error page.
if ERROR_FLAG in list(request.GET.keys()):
return render_to_response(
'admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk edit.
# Try to look up an action first, but if this isn't an action the POST
# will fall through to the bulk edit check, below.
if actions and request.method == 'POST':
response = self.response_action(request, queryset=cl.get_queryset())
if response:
return response
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if request.method == "POST" and self.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(
request.POST, request.FILES, queryset=cl.result_list
)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
form.save_m2m()
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_text(opts.verbose_name)
else:
name = force_text(opts.verbose_name_plural)
msg = ungettext(
"%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount) % {'count': changecount,
'name': name,
'obj': force_text(obj)}
self.message_user(request, msg)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif self.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
context = {
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'media': media,
'has_add_permission': self.has_add_permission(request),
'app_label': app_label,
'action_form': action_form,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
}
if django.VERSION[0] == 1 and django.VERSION[1] < 4:
context['root_path'] = self.admin_site.root_path
elif django.VERSION[0] == 1 or (django.VERSION[0] == 2 and django.VERSION[1] < 1):
selection_note_all = ungettext('%(total_count)s selected', 'All %(total_count)s selected', cl.result_count)
context.update({
'module_name': force_text(opts.verbose_name_plural),
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
'selection_note_all': selection_note_all % {'total_count': cl.result_count},
})
else:
context['opts'] = self.model._meta
context.update(extra_context or {})
return render_to_response(self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context=context) |
def __generic_save(self):
"""Returns False if user has cancelled a "save as" operation, otherwise True."""
page = self._get_page()
f = page.editor.f
if not f:
return True
if not page.editor.flag_valid:
a99.show_error("Cannot save, {0!s} has error(s)!".format(f.description))
return True
if f.filename:
f.save_as()
self.add_log("Saved '{}'".format(f.filename))
page.flag_changed = False
self._update_gui_text_tabs()
if hasattr(page.editor, "update_gui_label_fn"):
page.editor.update_gui_label_fn() # duck typing
return True
else:
return self.__generic_save_as() | Returns False if user has cancelled a "save as" operation, otherwise True. | Below is the the instruction that describes the task:
### Input:
Returns False if user has cancelled a "save as" operation, otherwise True.
### Response:
def __generic_save(self):
"""Returns False if user has cancelled a "save as" operation, otherwise True."""
page = self._get_page()
f = page.editor.f
if not f:
return True
if not page.editor.flag_valid:
a99.show_error("Cannot save, {0!s} has error(s)!".format(f.description))
return True
if f.filename:
f.save_as()
self.add_log("Saved '{}'".format(f.filename))
page.flag_changed = False
self._update_gui_text_tabs()
if hasattr(page.editor, "update_gui_label_fn"):
page.editor.update_gui_label_fn() # duck typing
return True
else:
return self.__generic_save_as() |
def offsets(self):
""" Returns the offsets values of x, y, z as a numpy array
"""
return np.array([self.x_offset, self.y_offset, self.z_offset]) | Returns the offsets values of x, y, z as a numpy array | Below is the the instruction that describes the task:
### Input:
Returns the offsets values of x, y, z as a numpy array
### Response:
def offsets(self):
""" Returns the offsets values of x, y, z as a numpy array
"""
return np.array([self.x_offset, self.y_offset, self.z_offset]) |
def packing_job_ext_info(job_lsit_DO):
"""
Packing additional information of the job into the job_list_DO(JobListDO)
"""
ext_info = sqllite_agent.execute(ScrapydJobExtInfoSQLSet.SELECT_BY_ID, (job_lsit_DO.job_id,))
if ext_info is None or len(ext_info) <= 0: return
ext_info = ext_info[0]
job_lsit_DO.args = ext_info[1]
job_lsit_DO.priority = ext_info[2]
job_lsit_DO.creation_time = ext_info[3]
job_lsit_DO.logs_name = str_to_list(ext_info[4], ',')
job_lsit_DO.logs_url = str_to_list(ext_info[5], ',') | Packing additional information of the job into the job_list_DO(JobListDO) | Below is the the instruction that describes the task:
### Input:
Packing additional information of the job into the job_list_DO(JobListDO)
### Response:
def packing_job_ext_info(job_lsit_DO):
"""
Packing additional information of the job into the job_list_DO(JobListDO)
"""
ext_info = sqllite_agent.execute(ScrapydJobExtInfoSQLSet.SELECT_BY_ID, (job_lsit_DO.job_id,))
if ext_info is None or len(ext_info) <= 0: return
ext_info = ext_info[0]
job_lsit_DO.args = ext_info[1]
job_lsit_DO.priority = ext_info[2]
job_lsit_DO.creation_time = ext_info[3]
job_lsit_DO.logs_name = str_to_list(ext_info[4], ',')
job_lsit_DO.logs_url = str_to_list(ext_info[5], ',') |
def gather_data(registry):
"""Gathers the metrics"""
# Get the host name of the machine
host = socket.gethostname()
# Create our collectors
io_metric = Summary("write_file_io_example",
"Writing io file in disk example.",
{'host': host})
# register the metric collectors
registry.register(io_metric)
chunk = b'\xff'*4000 # 4000 bytes
filename_path = "/tmp/prometheus_test"
blocksizes = (100, 10000, 1000000, 100000000)
# Start gathering metrics every 0.7 seconds
while True:
time.sleep(0.7)
for i in blocksizes:
time_start = time.time()
# Action
with open(filename_path, "wb") as f:
for _ in range(i // 10000):
f.write(chunk)
io_metric.add({"file": filename_path, "block": i},
time.time() - time_start) | Gathers the metrics | Below is the the instruction that describes the task:
### Input:
Gathers the metrics
### Response:
def gather_data(registry):
"""Gathers the metrics"""
# Get the host name of the machine
host = socket.gethostname()
# Create our collectors
io_metric = Summary("write_file_io_example",
"Writing io file in disk example.",
{'host': host})
# register the metric collectors
registry.register(io_metric)
chunk = b'\xff'*4000 # 4000 bytes
filename_path = "/tmp/prometheus_test"
blocksizes = (100, 10000, 1000000, 100000000)
# Start gathering metrics every 0.7 seconds
while True:
time.sleep(0.7)
for i in blocksizes:
time_start = time.time()
# Action
with open(filename_path, "wb") as f:
for _ in range(i // 10000):
f.write(chunk)
io_metric.add({"file": filename_path, "block": i},
time.time() - time_start) |
def area_estimation(tech_in_nm=130, block=None):
""" Estimates the total area of the block.
:param tech_in_nm: the size of the circuit technology to be estimated
(for example, 65 is 65nm and 250 is 0.25um)
:return: tuple of estimated areas (logic, mem) in terms of mm^2
The estimations are based off of 130nm stdcell designs for the logic, and
custom memory blocks from the literature. The results are not fully validated
and we do not recommend that this function be used in carrying out science for
publication.
"""
def mem_area_estimate(tech_in_nm, bits, ports, is_rom):
# http://www.cs.ucsb.edu/~sherwood/pubs/ICCD-srammodel.pdf
# ROM is assumed to be 1/10th of area of SRAM
tech_in_um = tech_in_nm / 1000.0
area_estimate = 0.001 * tech_in_um**2.07 * bits**0.9 * ports**0.7 + 0.0048
return area_estimate if not is_rom else area_estimate / 10.0
# Subset of the raw data gathered from yosys, mapping to vsclib 130nm library
# Width Adder_Area Mult_Area (area in "tracks" as discussed below)
# 8 211 2684
# 16 495 12742
# 32 1110 49319
# 64 2397 199175
# 128 4966 749828
def adder_stdcell_estimate(width):
return width * 34.4 - 25.8
def multiplier_stdcell_estimate(width):
if width == 1:
return 5
elif width == 2:
return 39
elif width == 3:
return 219
else:
return -958 + (150 * width) + (45 * width**2)
def stdcell_estimate(net):
if net.op in 'w~sc':
return 0
elif net.op in '&|n':
return 40/8.0 * len(net.args[0]) # 40 lambda
elif net.op in '^=<>x':
return 80/8.0 * len(net.args[0]) # 80 lambda
elif net.op == 'r':
return 144/8.0 * len(net.args[0]) # 144 lambda
elif net.op in '+-':
return adder_stdcell_estimate(len(net.args[0]))
elif net.op == '*':
return multiplier_stdcell_estimate(len(net.args[0]))
elif net.op in 'm@':
return 0 # memories handled elsewhere
else:
raise PyrtlInternalError('Unable to estimate the following net '
'due to unimplemented op :\n%s' % str(net))
block = working_block(block)
# The functions above were gathered and calibrated by mapping
# reference designs to an openly available 130nm stdcell library.
# http://www.vlsitechnology.org/html/vsc_description.html
# http://www.vlsitechnology.org/html/cells/vsclib013/lib_gif_index.html
# In a standard cell design, each gate takes up a length of standard "track"
# in the chip. The functions above return that length for each of the different
# types of functions in the units of "tracks". In the 130nm process used,
# 1 lambda is 55nm, and 1 track is 8 lambda.
# first, sum up the area of all of the logic elements (including registers)
total_tracks = sum(stdcell_estimate(a_net) for a_net in block.logic)
total_length_in_nm = total_tracks * 8 * 55
# each track is then 72 lambda tall, and converted from nm2 to mm2
area_in_mm2_for_130nm = (total_length_in_nm * (72 * 55)) / 1e12
# scaling from 130nm to the target tech
logic_area = area_in_mm2_for_130nm / (130.0/tech_in_nm)**2
# now sum up the area of the memories
mem_area = 0
for mem in set(net.op_param[1] for net in block.logic_subset('@m')):
bits, ports, is_rom = _bits_ports_and_isrom_from_memory(mem)
mem_area += mem_area_estimate(tech_in_nm, bits, ports, is_rom)
return logic_area, mem_area | Estimates the total area of the block.
:param tech_in_nm: the size of the circuit technology to be estimated
(for example, 65 is 65nm and 250 is 0.25um)
:return: tuple of estimated areas (logic, mem) in terms of mm^2
The estimations are based off of 130nm stdcell designs for the logic, and
custom memory blocks from the literature. The results are not fully validated
and we do not recommend that this function be used in carrying out science for
publication. | Below is the the instruction that describes the task:
### Input:
Estimates the total area of the block.
:param tech_in_nm: the size of the circuit technology to be estimated
(for example, 65 is 65nm and 250 is 0.25um)
:return: tuple of estimated areas (logic, mem) in terms of mm^2
The estimations are based off of 130nm stdcell designs for the logic, and
custom memory blocks from the literature. The results are not fully validated
and we do not recommend that this function be used in carrying out science for
publication.
### Response:
def area_estimation(tech_in_nm=130, block=None):
""" Estimates the total area of the block.
:param tech_in_nm: the size of the circuit technology to be estimated
(for example, 65 is 65nm and 250 is 0.25um)
:return: tuple of estimated areas (logic, mem) in terms of mm^2
The estimations are based off of 130nm stdcell designs for the logic, and
custom memory blocks from the literature. The results are not fully validated
and we do not recommend that this function be used in carrying out science for
publication.
"""
def mem_area_estimate(tech_in_nm, bits, ports, is_rom):
# http://www.cs.ucsb.edu/~sherwood/pubs/ICCD-srammodel.pdf
# ROM is assumed to be 1/10th of area of SRAM
tech_in_um = tech_in_nm / 1000.0
area_estimate = 0.001 * tech_in_um**2.07 * bits**0.9 * ports**0.7 + 0.0048
return area_estimate if not is_rom else area_estimate / 10.0
# Subset of the raw data gathered from yosys, mapping to vsclib 130nm library
# Width Adder_Area Mult_Area (area in "tracks" as discussed below)
# 8 211 2684
# 16 495 12742
# 32 1110 49319
# 64 2397 199175
# 128 4966 749828
def adder_stdcell_estimate(width):
return width * 34.4 - 25.8
def multiplier_stdcell_estimate(width):
if width == 1:
return 5
elif width == 2:
return 39
elif width == 3:
return 219
else:
return -958 + (150 * width) + (45 * width**2)
def stdcell_estimate(net):
if net.op in 'w~sc':
return 0
elif net.op in '&|n':
return 40/8.0 * len(net.args[0]) # 40 lambda
elif net.op in '^=<>x':
return 80/8.0 * len(net.args[0]) # 80 lambda
elif net.op == 'r':
return 144/8.0 * len(net.args[0]) # 144 lambda
elif net.op in '+-':
return adder_stdcell_estimate(len(net.args[0]))
elif net.op == '*':
return multiplier_stdcell_estimate(len(net.args[0]))
elif net.op in 'm@':
return 0 # memories handled elsewhere
else:
raise PyrtlInternalError('Unable to estimate the following net '
'due to unimplemented op :\n%s' % str(net))
block = working_block(block)
# The functions above were gathered and calibrated by mapping
# reference designs to an openly available 130nm stdcell library.
# http://www.vlsitechnology.org/html/vsc_description.html
# http://www.vlsitechnology.org/html/cells/vsclib013/lib_gif_index.html
# In a standard cell design, each gate takes up a length of standard "track"
# in the chip. The functions above return that length for each of the different
# types of functions in the units of "tracks". In the 130nm process used,
# 1 lambda is 55nm, and 1 track is 8 lambda.
# first, sum up the area of all of the logic elements (including registers)
total_tracks = sum(stdcell_estimate(a_net) for a_net in block.logic)
total_length_in_nm = total_tracks * 8 * 55
# each track is then 72 lambda tall, and converted from nm2 to mm2
area_in_mm2_for_130nm = (total_length_in_nm * (72 * 55)) / 1e12
# scaling from 130nm to the target tech
logic_area = area_in_mm2_for_130nm / (130.0/tech_in_nm)**2
# now sum up the area of the memories
mem_area = 0
for mem in set(net.op_param[1] for net in block.logic_subset('@m')):
bits, ports, is_rom = _bits_ports_and_isrom_from_memory(mem)
mem_area += mem_area_estimate(tech_in_nm, bits, ports, is_rom)
return logic_area, mem_area |
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response) | Retrieve a list of available systems. | Below is the the instruction that describes the task:
### Input:
Retrieve a list of available systems.
### Response:
def server_systems(self):
"""
Retrieve a list of available systems.
"""
response = self._post(self.apiurl + "/v2/server/systems", data={'apikey': self.apikey})
return self._raise_or_extract(response) |
def get_url(path, host, port, method="http"):
"""
make url from path, host and port
:param method: str
:param path: str, path within the request, e.g. "/api/version"
:param host: str
:param port: str or int
:return: str
"""
return urlunsplit(
(method, "%s:%s" % (host, port), path, "", "")
) | make url from path, host and port
:param method: str
:param path: str, path within the request, e.g. "/api/version"
:param host: str
:param port: str or int
:return: str | Below is the the instruction that describes the task:
### Input:
make url from path, host and port
:param method: str
:param path: str, path within the request, e.g. "/api/version"
:param host: str
:param port: str or int
:return: str
### Response:
def get_url(path, host, port, method="http"):
"""
make url from path, host and port
:param method: str
:param path: str, path within the request, e.g. "/api/version"
:param host: str
:param port: str or int
:return: str
"""
return urlunsplit(
(method, "%s:%s" % (host, port), path, "", "")
) |
def _GetServerCipher(self):
"""Returns the cipher for self.server_name."""
if self.server_cipher is not None:
expiry = self.server_cipher_age + rdfvalue.Duration("1d")
if expiry > rdfvalue.RDFDatetime.Now():
return self.server_cipher
remote_public_key = self._GetRemotePublicKey(self.server_name)
self.server_cipher = Cipher(self.common_name, self.private_key,
remote_public_key)
self.server_cipher_age = rdfvalue.RDFDatetime.Now()
return self.server_cipher | Returns the cipher for self.server_name. | Below is the the instruction that describes the task:
### Input:
Returns the cipher for self.server_name.
### Response:
def _GetServerCipher(self):
"""Returns the cipher for self.server_name."""
if self.server_cipher is not None:
expiry = self.server_cipher_age + rdfvalue.Duration("1d")
if expiry > rdfvalue.RDFDatetime.Now():
return self.server_cipher
remote_public_key = self._GetRemotePublicKey(self.server_name)
self.server_cipher = Cipher(self.common_name, self.private_key,
remote_public_key)
self.server_cipher_age = rdfvalue.RDFDatetime.Now()
return self.server_cipher |
def create(self):
"""
Create the node on the compute server
"""
data = self._node_data()
data["node_id"] = self._id
if self._node_type == "docker":
timeout = None
else:
timeout = 1200
trial = 0
while trial != 6:
try:
response = yield from self._compute.post("/projects/{}/{}/nodes".format(self._project.id, self._node_type), data=data, timeout=timeout)
except ComputeConflict as e:
if e.response.get("exception") == "ImageMissingError":
res = yield from self._upload_missing_image(self._node_type, e.response["image"])
if not res:
raise e
else:
raise e
else:
yield from self.parse_node_response(response.json)
return True
trial += 1 | Create the node on the compute server | Below is the the instruction that describes the task:
### Input:
Create the node on the compute server
### Response:
def create(self):
"""
Create the node on the compute server
"""
data = self._node_data()
data["node_id"] = self._id
if self._node_type == "docker":
timeout = None
else:
timeout = 1200
trial = 0
while trial != 6:
try:
response = yield from self._compute.post("/projects/{}/{}/nodes".format(self._project.id, self._node_type), data=data, timeout=timeout)
except ComputeConflict as e:
if e.response.get("exception") == "ImageMissingError":
res = yield from self._upload_missing_image(self._node_type, e.response["image"])
if not res:
raise e
else:
raise e
else:
yield from self.parse_node_response(response.json)
return True
trial += 1 |
def ip4_address(self):
"""Returns the IPv4 address of the network interface.
If multiple interfaces are provided,
the address of the first found is returned.
"""
if self._ip4_address is None and self.network is not None:
self._ip4_address = self._get_ip_address(
libvirt.VIR_IP_ADDR_TYPE_IPV4)
return self._ip4_address | Returns the IPv4 address of the network interface.
If multiple interfaces are provided,
the address of the first found is returned. | Below is the the instruction that describes the task:
### Input:
Returns the IPv4 address of the network interface.
If multiple interfaces are provided,
the address of the first found is returned.
### Response:
def ip4_address(self):
"""Returns the IPv4 address of the network interface.
If multiple interfaces are provided,
the address of the first found is returned.
"""
if self._ip4_address is None and self.network is not None:
self._ip4_address = self._get_ip_address(
libvirt.VIR_IP_ADDR_TYPE_IPV4)
return self._ip4_address |
def from_response(cls, response, attrs):
""" Create an index from returned Dynamo data """
proj = response['Projection']
index = cls(proj['ProjectionType'], response['IndexName'],
attrs[response['KeySchema'][1]['AttributeName']],
proj.get('NonKeyAttributes'))
index.response = response
return index | Create an index from returned Dynamo data | Below is the the instruction that describes the task:
### Input:
Create an index from returned Dynamo data
### Response:
def from_response(cls, response, attrs):
""" Create an index from returned Dynamo data """
proj = response['Projection']
index = cls(proj['ProjectionType'], response['IndexName'],
attrs[response['KeySchema'][1]['AttributeName']],
proj.get('NonKeyAttributes'))
index.response = response
return index |
def stop(self, now=False):
"""Stop the server gracefully. Do not take any new transfers,
but complete the existing ones. If force is True, drop everything
and stop. Note, immediately will not interrupt the select loop, it
will happen when the server returns on ready data, or a timeout.
ie. SOCK_TIMEOUT"""
if now:
self.shutdown_immediately = True
else:
self.shutdown_gracefully = True | Stop the server gracefully. Do not take any new transfers,
but complete the existing ones. If force is True, drop everything
and stop. Note, immediately will not interrupt the select loop, it
will happen when the server returns on ready data, or a timeout.
ie. SOCK_TIMEOUT | Below is the the instruction that describes the task:
### Input:
Stop the server gracefully. Do not take any new transfers,
but complete the existing ones. If force is True, drop everything
and stop. Note, immediately will not interrupt the select loop, it
will happen when the server returns on ready data, or a timeout.
ie. SOCK_TIMEOUT
### Response:
def stop(self, now=False):
"""Stop the server gracefully. Do not take any new transfers,
but complete the existing ones. If force is True, drop everything
and stop. Note, immediately will not interrupt the select loop, it
will happen when the server returns on ready data, or a timeout.
ie. SOCK_TIMEOUT"""
if now:
self.shutdown_immediately = True
else:
self.shutdown_gracefully = True |
def report(data):
"""Create a Rmd report for small RNAseq analysis"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = op.join(work_dir, "report")
safe_makedir(out_dir)
summary_file = op.join(out_dir, "summary.csv")
with file_transaction(summary_file) as out_tx:
with open(out_tx, 'w') as out_handle:
out_handle.write("sample_id,%s\n" % _guess_header(data[0][0]))
for sample in data:
info = sample[0]
group = _guess_group(info)
files = info["seqbuster"] if "seqbuster" in info else "None"
out_handle.write(",".join([dd.get_sample_name(info),
group]) + "\n")
_modify_report(work_dir, out_dir)
return summary_file | Create a Rmd report for small RNAseq analysis | Below is the the instruction that describes the task:
### Input:
Create a Rmd report for small RNAseq analysis
### Response:
def report(data):
"""Create a Rmd report for small RNAseq analysis"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = op.join(work_dir, "report")
safe_makedir(out_dir)
summary_file = op.join(out_dir, "summary.csv")
with file_transaction(summary_file) as out_tx:
with open(out_tx, 'w') as out_handle:
out_handle.write("sample_id,%s\n" % _guess_header(data[0][0]))
for sample in data:
info = sample[0]
group = _guess_group(info)
files = info["seqbuster"] if "seqbuster" in info else "None"
out_handle.write(",".join([dd.get_sample_name(info),
group]) + "\n")
_modify_report(work_dir, out_dir)
return summary_file |
def add_mag_drift_unit_vectors_ecef(inst, steps=None, max_steps=40000, step_size=10.,
ref_height=120.):
"""Adds unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Parameters
----------
inst : pysat.Instrument
Instrument object that will get unit vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
Returns
-------
None
unit vectors are added to the passed Instrument object with a naming
scheme:
'unit_zon_ecef_*' : unit zonal vector, component along ECEF-(X,Y,or Z)
'unit_fa_ecef_*' : unit field-aligned vector, component along ECEF-(X,Y,or Z)
'unit_mer_ecef_*' : unit meridional vector, component along ECEF-(X,Y,or Z)
"""
# add unit vectors for magnetic drifts in ecef coordinates
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(inst['latitude'],
inst['longitude'], inst['altitude'], inst.data.index,
steps=steps, max_steps=max_steps, step_size=step_size, ref_height=ref_height)
inst['unit_zon_ecef_x'] = zvx
inst['unit_zon_ecef_y'] = zvy
inst['unit_zon_ecef_z'] = zvz
inst['unit_fa_ecef_x'] = bx
inst['unit_fa_ecef_y'] = by
inst['unit_fa_ecef_z'] = bz
inst['unit_mer_ecef_x'] = mx
inst['unit_mer_ecef_y'] = my
inst['unit_mer_ecef_z'] = mz
inst.meta['unit_zon_ecef_x'] = {'long_name': 'Zonal unit vector along ECEF-x',
'desc': 'Zonal unit vector along ECEF-x',
'label': 'Zonal unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_zon_ecef_y'] = {'long_name': 'Zonal unit vector along ECEF-y',
'desc': 'Zonal unit vector along ECEF-y',
'label': 'Zonal unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_zon_ecef_z'] = {'long_name': 'Zonal unit vector along ECEF-z',
'desc': 'Zonal unit vector along ECEF-z',
'label': 'Zonal unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_x'] = {'long_name': 'Field-aligned unit vector along ECEF-x',
'desc': 'Field-aligned unit vector along ECEF-x',
'label': 'Field-aligned unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_y'] = {'long_name': 'Field-aligned unit vector along ECEF-y',
'desc': 'Field-aligned unit vector along ECEF-y',
'label': 'Field-aligned unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_z'] = {'long_name': 'Field-aligned unit vector along ECEF-z',
'desc': 'Field-aligned unit vector along ECEF-z',
'label': 'Field-aligned unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_x'] = {'long_name': 'Meridional unit vector along ECEF-x',
'desc': 'Meridional unit vector along ECEF-x',
'label': 'Meridional unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_y'] = {'long_name': 'Meridional unit vector along ECEF-y',
'desc': 'Meridional unit vector along ECEF-y',
'label': 'Meridional unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_z'] = {'long_name': 'Meridional unit vector along ECEF-z',
'desc': 'Meridional unit vector along ECEF-z',
'label': 'Meridional unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
return | Adds unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Parameters
----------
inst : pysat.Instrument
Instrument object that will get unit vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
Returns
-------
None
unit vectors are added to the passed Instrument object with a naming
scheme:
'unit_zon_ecef_*' : unit zonal vector, component along ECEF-(X,Y,or Z)
'unit_fa_ecef_*' : unit field-aligned vector, component along ECEF-(X,Y,or Z)
'unit_mer_ecef_*' : unit meridional vector, component along ECEF-(X,Y,or Z) | Below is the the instruction that describes the task:
### Input:
Adds unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Parameters
----------
inst : pysat.Instrument
Instrument object that will get unit vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
Returns
-------
None
unit vectors are added to the passed Instrument object with a naming
scheme:
'unit_zon_ecef_*' : unit zonal vector, component along ECEF-(X,Y,or Z)
'unit_fa_ecef_*' : unit field-aligned vector, component along ECEF-(X,Y,or Z)
'unit_mer_ecef_*' : unit meridional vector, component along ECEF-(X,Y,or Z)
### Response:
def add_mag_drift_unit_vectors_ecef(inst, steps=None, max_steps=40000, step_size=10.,
ref_height=120.):
"""Adds unit vectors expressing the ion drift coordinate system
organized by the geomagnetic field. Unit vectors are expressed
in ECEF coordinates.
Parameters
----------
inst : pysat.Instrument
Instrument object that will get unit vectors
max_steps : int
Maximum number of steps allowed for field line tracing
step_size : float
Maximum step size (km) allowed when field line tracing
ref_height : float
Altitude used as cutoff for labeling a field line location a footpoint
Returns
-------
None
unit vectors are added to the passed Instrument object with a naming
scheme:
'unit_zon_ecef_*' : unit zonal vector, component along ECEF-(X,Y,or Z)
'unit_fa_ecef_*' : unit field-aligned vector, component along ECEF-(X,Y,or Z)
'unit_mer_ecef_*' : unit meridional vector, component along ECEF-(X,Y,or Z)
"""
# add unit vectors for magnetic drifts in ecef coordinates
zvx, zvy, zvz, bx, by, bz, mx, my, mz = calculate_mag_drift_unit_vectors_ecef(inst['latitude'],
inst['longitude'], inst['altitude'], inst.data.index,
steps=steps, max_steps=max_steps, step_size=step_size, ref_height=ref_height)
inst['unit_zon_ecef_x'] = zvx
inst['unit_zon_ecef_y'] = zvy
inst['unit_zon_ecef_z'] = zvz
inst['unit_fa_ecef_x'] = bx
inst['unit_fa_ecef_y'] = by
inst['unit_fa_ecef_z'] = bz
inst['unit_mer_ecef_x'] = mx
inst['unit_mer_ecef_y'] = my
inst['unit_mer_ecef_z'] = mz
inst.meta['unit_zon_ecef_x'] = {'long_name': 'Zonal unit vector along ECEF-x',
'desc': 'Zonal unit vector along ECEF-x',
'label': 'Zonal unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_zon_ecef_y'] = {'long_name': 'Zonal unit vector along ECEF-y',
'desc': 'Zonal unit vector along ECEF-y',
'label': 'Zonal unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_zon_ecef_z'] = {'long_name': 'Zonal unit vector along ECEF-z',
'desc': 'Zonal unit vector along ECEF-z',
'label': 'Zonal unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Zonal unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_x'] = {'long_name': 'Field-aligned unit vector along ECEF-x',
'desc': 'Field-aligned unit vector along ECEF-x',
'label': 'Field-aligned unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_y'] = {'long_name': 'Field-aligned unit vector along ECEF-y',
'desc': 'Field-aligned unit vector along ECEF-y',
'label': 'Field-aligned unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_fa_ecef_z'] = {'long_name': 'Field-aligned unit vector along ECEF-z',
'desc': 'Field-aligned unit vector along ECEF-z',
'label': 'Field-aligned unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Field-aligned unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_x'] = {'long_name': 'Meridional unit vector along ECEF-x',
'desc': 'Meridional unit vector along ECEF-x',
'label': 'Meridional unit vector along ECEF-x',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-x',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_y'] = {'long_name': 'Meridional unit vector along ECEF-y',
'desc': 'Meridional unit vector along ECEF-y',
'label': 'Meridional unit vector along ECEF-y',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-y',
'value_min': -1.,
'value_max': 1.,
}
inst.meta['unit_mer_ecef_z'] = {'long_name': 'Meridional unit vector along ECEF-z',
'desc': 'Meridional unit vector along ECEF-z',
'label': 'Meridional unit vector along ECEF-z',
'notes': ('Unit vector expressed using Earth Centered Earth Fixed (ECEF) frame. '
'Vector system is calcluated by field-line tracing along IGRF values '
'down to reference altitudes of 120 km in both the Northern and Southern '
'hemispheres. These two points, along with the satellite position, are '
'used to define the magnetic meridian. Vector math from here generates '
'the orthogonal system.'),
'axis': 'Meridional unit vector along ECEF-z',
'value_min': -1.,
'value_max': 1.,
}
return |
def denoise_image(image, mask=None, shrink_factor=1, p=1, r=3, noise_model='Rician', v=0 ):
"""
Denoise an image using a spatially adaptive filter originally described in
J. V. Manjon, P. Coupe, Luis Marti-Bonmati, D. L. Collins, and M. Robles.
Adaptive Non-Local Means Denoising of MR Images With Spatially Varying
Noise Levels, Journal of Magnetic Resonance Imaging, 31:192-203, June 2010.
ANTsR function: `denoiseImage`
Arguments
---------
image : ANTsImage
scalar image to denoise.
mask : ANTsImage
to limit the denoise region.
shrink_factor : scalar
downsampling level performed within the algorithm.
p : integer
patch radius for local sample.
r : integer
search radius from which to choose extra local samples.
noise_model : string
'Rician' or 'Gaussian'
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> import numpy as np
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> # add fairly large salt and pepper noise
>>> imagenoise = image + np.random.randn(*image.shape).astype('float32')*5
>>> imagedenoise = ants.denoise_image(imagenoise, ants.get_mask(image))
"""
inpixeltype = image.pixeltype
outimage = image.clone('float')
mydim = image.dimension
if mask is None:
myargs = {
'd': mydim,
'i': image,
'n': noise_model,
's': int(shrink_factor),
'p': p,
'r': r,
'o': outimage,
'v': v
}
else :
myargs = {
'd': mydim,
'i': image,
'n': noise_model,
'x': mask.clone('unsigned char'),
's': int(shrink_factor),
'p': p,
'r': r,
'o': outimage,
'v': v
}
processed_args = pargs._int_antsProcessArguments(myargs)
libfn = utils.get_lib_fn('DenoiseImage')
libfn(processed_args)
return outimage.clone(inpixeltype) | Denoise an image using a spatially adaptive filter originally described in
J. V. Manjon, P. Coupe, Luis Marti-Bonmati, D. L. Collins, and M. Robles.
Adaptive Non-Local Means Denoising of MR Images With Spatially Varying
Noise Levels, Journal of Magnetic Resonance Imaging, 31:192-203, June 2010.
ANTsR function: `denoiseImage`
Arguments
---------
image : ANTsImage
scalar image to denoise.
mask : ANTsImage
to limit the denoise region.
shrink_factor : scalar
downsampling level performed within the algorithm.
p : integer
patch radius for local sample.
r : integer
search radius from which to choose extra local samples.
noise_model : string
'Rician' or 'Gaussian'
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> import numpy as np
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> # add fairly large salt and pepper noise
>>> imagenoise = image + np.random.randn(*image.shape).astype('float32')*5
>>> imagedenoise = ants.denoise_image(imagenoise, ants.get_mask(image)) | Below is the the instruction that describes the task:
### Input:
Denoise an image using a spatially adaptive filter originally described in
J. V. Manjon, P. Coupe, Luis Marti-Bonmati, D. L. Collins, and M. Robles.
Adaptive Non-Local Means Denoising of MR Images With Spatially Varying
Noise Levels, Journal of Magnetic Resonance Imaging, 31:192-203, June 2010.
ANTsR function: `denoiseImage`
Arguments
---------
image : ANTsImage
scalar image to denoise.
mask : ANTsImage
to limit the denoise region.
shrink_factor : scalar
downsampling level performed within the algorithm.
p : integer
patch radius for local sample.
r : integer
search radius from which to choose extra local samples.
noise_model : string
'Rician' or 'Gaussian'
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> import numpy as np
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> # add fairly large salt and pepper noise
>>> imagenoise = image + np.random.randn(*image.shape).astype('float32')*5
>>> imagedenoise = ants.denoise_image(imagenoise, ants.get_mask(image))
### Response:
def denoise_image(image, mask=None, shrink_factor=1, p=1, r=3, noise_model='Rician', v=0 ):
"""
Denoise an image using a spatially adaptive filter originally described in
J. V. Manjon, P. Coupe, Luis Marti-Bonmati, D. L. Collins, and M. Robles.
Adaptive Non-Local Means Denoising of MR Images With Spatially Varying
Noise Levels, Journal of Magnetic Resonance Imaging, 31:192-203, June 2010.
ANTsR function: `denoiseImage`
Arguments
---------
image : ANTsImage
scalar image to denoise.
mask : ANTsImage
to limit the denoise region.
shrink_factor : scalar
downsampling level performed within the algorithm.
p : integer
patch radius for local sample.
r : integer
search radius from which to choose extra local samples.
noise_model : string
'Rician' or 'Gaussian'
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> import numpy as np
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> # add fairly large salt and pepper noise
>>> imagenoise = image + np.random.randn(*image.shape).astype('float32')*5
>>> imagedenoise = ants.denoise_image(imagenoise, ants.get_mask(image))
"""
inpixeltype = image.pixeltype
outimage = image.clone('float')
mydim = image.dimension
if mask is None:
myargs = {
'd': mydim,
'i': image,
'n': noise_model,
's': int(shrink_factor),
'p': p,
'r': r,
'o': outimage,
'v': v
}
else :
myargs = {
'd': mydim,
'i': image,
'n': noise_model,
'x': mask.clone('unsigned char'),
's': int(shrink_factor),
'p': p,
'r': r,
'o': outimage,
'v': v
}
processed_args = pargs._int_antsProcessArguments(myargs)
libfn = utils.get_lib_fn('DenoiseImage')
libfn(processed_args)
return outimage.clone(inpixeltype) |
def param_show(param=None):
'''
Show params of varnish cache
CLI Example:
.. code-block:: bash
salt '*' varnish.param_show param
'''
ret = _run_varnishadm('param.show', [param])
if ret['retcode']:
return False
else:
result = {}
for line in ret['stdout'].split('\n'):
m = re.search(r'^(\w+)\s+(.*)$', line)
result[m.group(1)] = m.group(2)
if param:
# When we ask to varnishadm for a specific param, it gives full
# info on what that parameter is, so we just process the first
# line and we get out of the loop
break
return result | Show params of varnish cache
CLI Example:
.. code-block:: bash
salt '*' varnish.param_show param | Below is the the instruction that describes the task:
### Input:
Show params of varnish cache
CLI Example:
.. code-block:: bash
salt '*' varnish.param_show param
### Response:
def param_show(param=None):
'''
Show params of varnish cache
CLI Example:
.. code-block:: bash
salt '*' varnish.param_show param
'''
ret = _run_varnishadm('param.show', [param])
if ret['retcode']:
return False
else:
result = {}
for line in ret['stdout'].split('\n'):
m = re.search(r'^(\w+)\s+(.*)$', line)
result[m.group(1)] = m.group(2)
if param:
# When we ask to varnishadm for a specific param, it gives full
# info on what that parameter is, so we just process the first
# line and we get out of the loop
break
return result |
def _replace_placeholder(sql_statement, variable):
"""
Return the string obtained by replacing the specified placeholders by
its corresponding value.
@param sql_statement: the string expression of a SQL statement to
replace placeholders with their corresponding values.
@param variable: the variable to use to replace the corresponding
placeholder(s) in the SQL statement.
* ``name``: name of the variable.
* ``type``: an instance of ``PlaceholderType``.
* ``value``: the value of this variable to replace the corresponding
placeholder(s) of this variable in the SQL statement.
@return: a string expression of the SQL statement where the
paceholders of the specified variable have been replace by the
value of this variable, depending on the type of this varialble.
"""
(variable_name, variable_type, variable_value) = variable
sql_value = RdbmsConnection._expand_placeholder_value(variable_value) if variable_type == PlaceholderType.simple_list \
else ','.join([ '(%s)' % RdbmsConnection._expand_placeholder_value(v) for v in variable_value ])
return re.sub(PATTERN_SQL_PLACEHOLDER_EXPRESSIONS[variable_type] % variable_name, sql_value, sql_statement) | Return the string obtained by replacing the specified placeholders by
its corresponding value.
@param sql_statement: the string expression of a SQL statement to
replace placeholders with their corresponding values.
@param variable: the variable to use to replace the corresponding
placeholder(s) in the SQL statement.
* ``name``: name of the variable.
* ``type``: an instance of ``PlaceholderType``.
* ``value``: the value of this variable to replace the corresponding
placeholder(s) of this variable in the SQL statement.
@return: a string expression of the SQL statement where the
paceholders of the specified variable have been replace by the
value of this variable, depending on the type of this varialble. | Below is the the instruction that describes the task:
### Input:
Return the string obtained by replacing the specified placeholders by
its corresponding value.
@param sql_statement: the string expression of a SQL statement to
replace placeholders with their corresponding values.
@param variable: the variable to use to replace the corresponding
placeholder(s) in the SQL statement.
* ``name``: name of the variable.
* ``type``: an instance of ``PlaceholderType``.
* ``value``: the value of this variable to replace the corresponding
placeholder(s) of this variable in the SQL statement.
@return: a string expression of the SQL statement where the
paceholders of the specified variable have been replace by the
value of this variable, depending on the type of this varialble.
### Response:
def _replace_placeholder(sql_statement, variable):
"""
Return the string obtained by replacing the specified placeholders by
its corresponding value.
@param sql_statement: the string expression of a SQL statement to
replace placeholders with their corresponding values.
@param variable: the variable to use to replace the corresponding
placeholder(s) in the SQL statement.
* ``name``: name of the variable.
* ``type``: an instance of ``PlaceholderType``.
* ``value``: the value of this variable to replace the corresponding
placeholder(s) of this variable in the SQL statement.
@return: a string expression of the SQL statement where the
paceholders of the specified variable have been replace by the
value of this variable, depending on the type of this varialble.
"""
(variable_name, variable_type, variable_value) = variable
sql_value = RdbmsConnection._expand_placeholder_value(variable_value) if variable_type == PlaceholderType.simple_list \
else ','.join([ '(%s)' % RdbmsConnection._expand_placeholder_value(v) for v in variable_value ])
return re.sub(PATTERN_SQL_PLACEHOLDER_EXPRESSIONS[variable_type] % variable_name, sql_value, sql_statement) |
def _choose_port(self):
"""
Return a port number from 5000-5999 based on the environment name
to be used as a default when the user hasn't selected one.
"""
# instead of random let's base it on the name chosen (and the site name)
return 5000 + unpack('Q',
sha((self.name + self.site_name)
.decode('ascii')).digest()[:8])[0] % 1000 | Return a port number from 5000-5999 based on the environment name
to be used as a default when the user hasn't selected one. | Below is the the instruction that describes the task:
### Input:
Return a port number from 5000-5999 based on the environment name
to be used as a default when the user hasn't selected one.
### Response:
def _choose_port(self):
"""
Return a port number from 5000-5999 based on the environment name
to be used as a default when the user hasn't selected one.
"""
# instead of random let's base it on the name chosen (and the site name)
return 5000 + unpack('Q',
sha((self.name + self.site_name)
.decode('ascii')).digest()[:8])[0] % 1000 |
def add_notes(self, notes):
"""Feed notes to self.add_note.
The notes can either be an other NoteContainer, a list of Note
objects or strings or a list of lists formatted like this:
>>> notes = [['C', 5], ['E', 5], ['G', 6]]
or even:
>>> notes = [['C', 5, {'volume': 20}], ['E', 6, {'volume': 20}]]
"""
if hasattr(notes, 'notes'):
for x in notes.notes:
self.add_note(x)
return self.notes
elif hasattr(notes, 'name'):
self.add_note(notes)
return self.notes
elif type(notes) == str:
self.add_note(notes)
return self.notes
for x in notes:
if type(x) == list and len(x) != 1:
if len(x) == 2:
self.add_note(x[0], x[1])
else:
self.add_note(x[0], x[1], x[2])
else:
self.add_note(x)
return self.notes | Feed notes to self.add_note.
The notes can either be an other NoteContainer, a list of Note
objects or strings or a list of lists formatted like this:
>>> notes = [['C', 5], ['E', 5], ['G', 6]]
or even:
>>> notes = [['C', 5, {'volume': 20}], ['E', 6, {'volume': 20}]] | Below is the the instruction that describes the task:
### Input:
Feed notes to self.add_note.
The notes can either be an other NoteContainer, a list of Note
objects or strings or a list of lists formatted like this:
>>> notes = [['C', 5], ['E', 5], ['G', 6]]
or even:
>>> notes = [['C', 5, {'volume': 20}], ['E', 6, {'volume': 20}]]
### Response:
def add_notes(self, notes):
"""Feed notes to self.add_note.
The notes can either be an other NoteContainer, a list of Note
objects or strings or a list of lists formatted like this:
>>> notes = [['C', 5], ['E', 5], ['G', 6]]
or even:
>>> notes = [['C', 5, {'volume': 20}], ['E', 6, {'volume': 20}]]
"""
if hasattr(notes, 'notes'):
for x in notes.notes:
self.add_note(x)
return self.notes
elif hasattr(notes, 'name'):
self.add_note(notes)
return self.notes
elif type(notes) == str:
self.add_note(notes)
return self.notes
for x in notes:
if type(x) == list and len(x) != 1:
if len(x) == 2:
self.add_note(x[0], x[1])
else:
self.add_note(x[0], x[1], x[2])
else:
self.add_note(x)
return self.notes |
def rime_solver(slvr_cfg):
""" Factory function that produces a RIME solver """
from montblanc.impl.rime.tensorflow.RimeSolver import RimeSolver
return RimeSolver(slvr_cfg) | Factory function that produces a RIME solver | Below is the the instruction that describes the task:
### Input:
Factory function that produces a RIME solver
### Response:
def rime_solver(slvr_cfg):
""" Factory function that produces a RIME solver """
from montblanc.impl.rime.tensorflow.RimeSolver import RimeSolver
return RimeSolver(slvr_cfg) |
def _select(self, tree, allow_select_scan):
""" Run a SELECT statement """
tablename = tree.table
desc = self.describe(tablename, require=True)
kwargs = {}
if tree.consistent:
kwargs["consistent"] = True
visitor = Visitor(self.reserved_words)
selection = SelectionExpression.from_selection(tree.attrs)
if selection.is_count:
kwargs["select"] = "COUNT"
if tree.keys_in:
if tree.limit:
raise SyntaxError("Cannot use LIMIT with KEYS IN")
elif tree.using:
raise SyntaxError("Cannot use USING with KEYS IN")
elif tree.order:
raise SyntaxError("Cannot use DESC/ASC with KEYS IN")
elif tree.where:
raise SyntaxError("Cannot use WHERE with KEYS IN")
keys = list(self._iter_where_in(tree))
kwargs["attributes"] = selection.build(visitor)
kwargs["alias"] = visitor.attribute_names
return self.connection.batch_get(tablename, keys=keys, **kwargs)
if tree.limit:
if tree.scan_limit:
kwargs["limit"] = Limit(
scan_limit=resolve(tree.scan_limit[2]),
item_limit=resolve(tree.limit[1]),
strict=True,
)
else:
kwargs["limit"] = Limit(item_limit=resolve(tree.limit[1]), strict=True)
elif tree.scan_limit:
kwargs["limit"] = Limit(scan_limit=resolve(tree.scan_limit[2]))
(action, query_kwargs, index) = self._build_query(desc, tree, visitor)
if action == "scan" and not allow_select_scan:
raise SyntaxError(
"No index found for query. Please use a SCAN query, or "
"set allow_select_scan=True\nopt allow_select_scan true"
)
order_by = None
if tree.order_by:
order_by = tree.order_by[0]
reverse = tree.order == "DESC"
if tree.order:
if action == "scan" and not tree.order_by:
raise SyntaxError(
"No index found for query, "
"cannot use ASC or DESC without "
"ORDER BY <field>"
)
if action == "query":
if order_by is None or order_by == index.range_key:
kwargs["desc"] = reverse
kwargs.update(query_kwargs)
# This is a special case for when we're querying an index and selecting
# fields that aren't projected into the index.
# We will change the query to only fetch the primary keys, and then
# fill in the selected attributes after the fact.
fetch_attrs_after = False
if index is not None and not index.projects_all_attributes(
selection.all_fields
):
kwargs["attributes"] = [
visitor.get_field(a) for a in desc.primary_key_attributes
]
fetch_attrs_after = True
else:
kwargs["attributes"] = selection.build(visitor)
kwargs["expr_values"] = visitor.expression_values
kwargs["alias"] = visitor.attribute_names
method = getattr(self.connection, action + "2")
result = method(tablename, **kwargs)
# If the queried index didn't project the selected attributes, we need
# to do a BatchGetItem to fetch all the data.
if fetch_attrs_after:
if not isinstance(result, list):
result = list(result)
# If no results, no need to batch_get
if not result:
return result
visitor = Visitor(self.reserved_words)
kwargs = {"keys": [desc.primary_key(item) for item in result]}
kwargs["attributes"] = selection.build(visitor)
kwargs["alias"] = visitor.attribute_names
result = self.connection.batch_get(tablename, **kwargs)
def order(items):
""" Sort the items by the specified keys """
if order_by is None:
return items
if index is None or order_by != index.range_key:
if not isinstance(items, list):
items = list(items)
items.sort(key=lambda x: x.get(order_by), reverse=reverse)
return items
# Save the data to a file
if tree.save_file:
if selection.is_count:
raise Exception("Cannot use count(*) with SAVE")
count = 0
result = order(selection.convert(item, True) for item in result)
filename = tree.save_file[0]
if filename[0] in ['"', "'"]:
filename = unwrap(filename)
# If it's still an iterator, convert to a list so we can iterate
# multiple times.
if not isinstance(result, list):
result = list(result)
remainder, ext = os.path.splitext(filename)
if ext.lower() in [".gz", ".gzip"]:
ext = os.path.splitext(remainder)[1]
opened = gzip.open(filename, "wb")
else:
opened = open(filename, "wb")
if ext.lower() == ".csv":
if selection.all_keys:
headers = selection.all_keys
else:
# Have to do this to get all the headers :(
result = list(result)
all_headers = set()
for item in result:
all_headers.update(item.keys())
headers = list(all_headers)
with opened as ofile:
writer = csv.DictWriter(
ofile, fieldnames=headers, extrasaction="ignore"
)
writer.writeheader()
for item in result:
count += 1
writer.writerow(item)
elif ext.lower() == ".json":
with opened as ofile:
for item in result:
count += 1
ofile.write(self._encoder.encode(item))
ofile.write("\n")
else:
with opened as ofile:
for item in result:
count += 1
pickle.dump(item, ofile)
return count
elif not selection.is_count:
result = order(selection.convert(item) for item in result)
return result | Run a SELECT statement | Below is the the instruction that describes the task:
### Input:
Run a SELECT statement
### Response:
def _select(self, tree, allow_select_scan):
""" Run a SELECT statement """
tablename = tree.table
desc = self.describe(tablename, require=True)
kwargs = {}
if tree.consistent:
kwargs["consistent"] = True
visitor = Visitor(self.reserved_words)
selection = SelectionExpression.from_selection(tree.attrs)
if selection.is_count:
kwargs["select"] = "COUNT"
if tree.keys_in:
if tree.limit:
raise SyntaxError("Cannot use LIMIT with KEYS IN")
elif tree.using:
raise SyntaxError("Cannot use USING with KEYS IN")
elif tree.order:
raise SyntaxError("Cannot use DESC/ASC with KEYS IN")
elif tree.where:
raise SyntaxError("Cannot use WHERE with KEYS IN")
keys = list(self._iter_where_in(tree))
kwargs["attributes"] = selection.build(visitor)
kwargs["alias"] = visitor.attribute_names
return self.connection.batch_get(tablename, keys=keys, **kwargs)
if tree.limit:
if tree.scan_limit:
kwargs["limit"] = Limit(
scan_limit=resolve(tree.scan_limit[2]),
item_limit=resolve(tree.limit[1]),
strict=True,
)
else:
kwargs["limit"] = Limit(item_limit=resolve(tree.limit[1]), strict=True)
elif tree.scan_limit:
kwargs["limit"] = Limit(scan_limit=resolve(tree.scan_limit[2]))
(action, query_kwargs, index) = self._build_query(desc, tree, visitor)
if action == "scan" and not allow_select_scan:
raise SyntaxError(
"No index found for query. Please use a SCAN query, or "
"set allow_select_scan=True\nopt allow_select_scan true"
)
order_by = None
if tree.order_by:
order_by = tree.order_by[0]
reverse = tree.order == "DESC"
if tree.order:
if action == "scan" and not tree.order_by:
raise SyntaxError(
"No index found for query, "
"cannot use ASC or DESC without "
"ORDER BY <field>"
)
if action == "query":
if order_by is None or order_by == index.range_key:
kwargs["desc"] = reverse
kwargs.update(query_kwargs)
# This is a special case for when we're querying an index and selecting
# fields that aren't projected into the index.
# We will change the query to only fetch the primary keys, and then
# fill in the selected attributes after the fact.
fetch_attrs_after = False
if index is not None and not index.projects_all_attributes(
selection.all_fields
):
kwargs["attributes"] = [
visitor.get_field(a) for a in desc.primary_key_attributes
]
fetch_attrs_after = True
else:
kwargs["attributes"] = selection.build(visitor)
kwargs["expr_values"] = visitor.expression_values
kwargs["alias"] = visitor.attribute_names
method = getattr(self.connection, action + "2")
result = method(tablename, **kwargs)
# If the queried index didn't project the selected attributes, we need
# to do a BatchGetItem to fetch all the data.
if fetch_attrs_after:
if not isinstance(result, list):
result = list(result)
# If no results, no need to batch_get
if not result:
return result
visitor = Visitor(self.reserved_words)
kwargs = {"keys": [desc.primary_key(item) for item in result]}
kwargs["attributes"] = selection.build(visitor)
kwargs["alias"] = visitor.attribute_names
result = self.connection.batch_get(tablename, **kwargs)
def order(items):
""" Sort the items by the specified keys """
if order_by is None:
return items
if index is None or order_by != index.range_key:
if not isinstance(items, list):
items = list(items)
items.sort(key=lambda x: x.get(order_by), reverse=reverse)
return items
# Save the data to a file
if tree.save_file:
if selection.is_count:
raise Exception("Cannot use count(*) with SAVE")
count = 0
result = order(selection.convert(item, True) for item in result)
filename = tree.save_file[0]
if filename[0] in ['"', "'"]:
filename = unwrap(filename)
# If it's still an iterator, convert to a list so we can iterate
# multiple times.
if not isinstance(result, list):
result = list(result)
remainder, ext = os.path.splitext(filename)
if ext.lower() in [".gz", ".gzip"]:
ext = os.path.splitext(remainder)[1]
opened = gzip.open(filename, "wb")
else:
opened = open(filename, "wb")
if ext.lower() == ".csv":
if selection.all_keys:
headers = selection.all_keys
else:
# Have to do this to get all the headers :(
result = list(result)
all_headers = set()
for item in result:
all_headers.update(item.keys())
headers = list(all_headers)
with opened as ofile:
writer = csv.DictWriter(
ofile, fieldnames=headers, extrasaction="ignore"
)
writer.writeheader()
for item in result:
count += 1
writer.writerow(item)
elif ext.lower() == ".json":
with opened as ofile:
for item in result:
count += 1
ofile.write(self._encoder.encode(item))
ofile.write("\n")
else:
with opened as ofile:
for item in result:
count += 1
pickle.dump(item, ofile)
return count
elif not selection.is_count:
result = order(selection.convert(item) for item in result)
return result |
def strip_secrets(qp, matcher, kwlist):
"""
This function will scrub the secrets from a query param string based on the passed in matcher and kwlist.
blah=1&secret=password&valid=true will result in blah=1&secret=<redacted>&valid=true
You can even pass in path query combinations:
/signup?blah=1&secret=password&valid=true will result in /signup?blah=1&secret=<redacted>&valid=true
:param qp: a string representing the query params in URL form (unencoded)
:param matcher: the matcher to use
:param kwlist: the list of keywords to match
:return: a scrubbed query param string
"""
path = None
try:
if qp is None:
return ''
if type(kwlist) is not list:
logger.debug("strip_secrets: bad keyword list")
return qp
# If there are no key=values, then just return
if not '=' in qp:
return qp
if '?' in qp:
path, query = qp.split('?')
else:
query = qp
params = parse.parse_qsl(query, keep_blank_values=True)
redacted = ['<redacted>']
if matcher == 'equals-ignore-case':
for keyword in kwlist:
for index, kv in enumerate(params):
if kv[0].lower() == keyword.lower():
params[index] = (kv[0], redacted)
elif matcher == 'equals':
for keyword in kwlist:
for index, kv in enumerate(params):
if kv[0] == keyword:
params[index] = (kv[0], redacted)
elif matcher == 'contains-ignore-case':
for keyword in kwlist:
for index, kv in enumerate(params):
if keyword.lower() in kv[0].lower():
params[index] = (kv[0], redacted)
elif matcher == 'contains':
for keyword in kwlist:
for index, kv in enumerate(params):
if keyword in kv[0]:
params[index] = (kv[0], redacted)
elif matcher == 'regex':
for regexp in kwlist:
for index, kv in enumerate(params):
if re.match(regexp, kv[0]):
params[index] = (kv[0], redacted)
else:
logger.debug("strip_secrets: unknown matcher")
return qp
if sys.version_info < (3, 0):
result = urllib.urlencode(params, doseq=True)
else:
result = parse.urlencode(params, doseq=True)
query = parse.unquote(result)
if path:
query = path + '?' + query
return query
except:
logger.debug("strip_secrets", exc_info=True) | This function will scrub the secrets from a query param string based on the passed in matcher and kwlist.
blah=1&secret=password&valid=true will result in blah=1&secret=<redacted>&valid=true
You can even pass in path query combinations:
/signup?blah=1&secret=password&valid=true will result in /signup?blah=1&secret=<redacted>&valid=true
:param qp: a string representing the query params in URL form (unencoded)
:param matcher: the matcher to use
:param kwlist: the list of keywords to match
:return: a scrubbed query param string | Below is the the instruction that describes the task:
### Input:
This function will scrub the secrets from a query param string based on the passed in matcher and kwlist.
blah=1&secret=password&valid=true will result in blah=1&secret=<redacted>&valid=true
You can even pass in path query combinations:
/signup?blah=1&secret=password&valid=true will result in /signup?blah=1&secret=<redacted>&valid=true
:param qp: a string representing the query params in URL form (unencoded)
:param matcher: the matcher to use
:param kwlist: the list of keywords to match
:return: a scrubbed query param string
### Response:
def strip_secrets(qp, matcher, kwlist):
"""
This function will scrub the secrets from a query param string based on the passed in matcher and kwlist.
blah=1&secret=password&valid=true will result in blah=1&secret=<redacted>&valid=true
You can even pass in path query combinations:
/signup?blah=1&secret=password&valid=true will result in /signup?blah=1&secret=<redacted>&valid=true
:param qp: a string representing the query params in URL form (unencoded)
:param matcher: the matcher to use
:param kwlist: the list of keywords to match
:return: a scrubbed query param string
"""
path = None
try:
if qp is None:
return ''
if type(kwlist) is not list:
logger.debug("strip_secrets: bad keyword list")
return qp
# If there are no key=values, then just return
if not '=' in qp:
return qp
if '?' in qp:
path, query = qp.split('?')
else:
query = qp
params = parse.parse_qsl(query, keep_blank_values=True)
redacted = ['<redacted>']
if matcher == 'equals-ignore-case':
for keyword in kwlist:
for index, kv in enumerate(params):
if kv[0].lower() == keyword.lower():
params[index] = (kv[0], redacted)
elif matcher == 'equals':
for keyword in kwlist:
for index, kv in enumerate(params):
if kv[0] == keyword:
params[index] = (kv[0], redacted)
elif matcher == 'contains-ignore-case':
for keyword in kwlist:
for index, kv in enumerate(params):
if keyword.lower() in kv[0].lower():
params[index] = (kv[0], redacted)
elif matcher == 'contains':
for keyword in kwlist:
for index, kv in enumerate(params):
if keyword in kv[0]:
params[index] = (kv[0], redacted)
elif matcher == 'regex':
for regexp in kwlist:
for index, kv in enumerate(params):
if re.match(regexp, kv[0]):
params[index] = (kv[0], redacted)
else:
logger.debug("strip_secrets: unknown matcher")
return qp
if sys.version_info < (3, 0):
result = urllib.urlencode(params, doseq=True)
else:
result = parse.urlencode(params, doseq=True)
query = parse.unquote(result)
if path:
query = path + '?' + query
return query
except:
logger.debug("strip_secrets", exc_info=True) |
def start_server(self, datacenter_id, server_id):
"""
Starts the server.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
"""
response = self._perform_request(
url='/datacenters/%s/servers/%s/start' % (
datacenter_id,
server_id),
method='POST-ACTION')
return response | Starts the server.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str`` | Below is the the instruction that describes the task:
### Input:
Starts the server.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
### Response:
def start_server(self, datacenter_id, server_id):
"""
Starts the server.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param server_id: The unique ID of the server.
:type server_id: ``str``
"""
response = self._perform_request(
url='/datacenters/%s/servers/%s/start' % (
datacenter_id,
server_id),
method='POST-ACTION')
return response |
def get_instance(self, payload):
"""
Build an instance of TriggerInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
return TriggerInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | Build an instance of TriggerInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of TriggerInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of TriggerInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
return TriggerInstance(self._version, payload, account_sid=self._solution['account_sid'], ) |
def parse(self):
"""Process file."""
self._fd = None
try:
self._parse_real()
finally:
if self._fd:
self._fd.close()
self._fd = None | Process file. | Below is the the instruction that describes the task:
### Input:
Process file.
### Response:
def parse(self):
"""Process file."""
self._fd = None
try:
self._parse_real()
finally:
if self._fd:
self._fd.close()
self._fd = None |
def hexdigest(self):
"""Terminate and return digest in HEX form.
Like digest() except the digest is returned as a string of
length 32, containing only hexadecimal digits. This may be
used to exchange the value safely in email or other non-
binary environments.
"""
d = map(None, self.digest())
d = map(ord, d)
d = map(lambda x:"%02x" % x, d)
d = string.join(d, '')
return d | Terminate and return digest in HEX form.
Like digest() except the digest is returned as a string of
length 32, containing only hexadecimal digits. This may be
used to exchange the value safely in email or other non-
binary environments. | Below is the the instruction that describes the task:
### Input:
Terminate and return digest in HEX form.
Like digest() except the digest is returned as a string of
length 32, containing only hexadecimal digits. This may be
used to exchange the value safely in email or other non-
binary environments.
### Response:
def hexdigest(self):
"""Terminate and return digest in HEX form.
Like digest() except the digest is returned as a string of
length 32, containing only hexadecimal digits. This may be
used to exchange the value safely in email or other non-
binary environments.
"""
d = map(None, self.digest())
d = map(ord, d)
d = map(lambda x:"%02x" % x, d)
d = string.join(d, '')
return d |
def _create_cached_db(
db_path,
tables,
version=1):
"""
Either create or retrieve sqlite database.
Parameters
--------
db_path : str
Path to sqlite3 database file
tables : dict
Dictionary mapping table names to datacache.DatabaseTable objects
version : int, optional
Version acceptable as cached data.
Returns sqlite3 connection
"""
require_string(db_path, "db_path")
require_iterable_of(tables, DatabaseTable)
require_integer(version, "version")
# if the database file doesn't already exist and we encounter an error
# later, delete the file before raising an exception
delete_on_error = not exists(db_path)
# if the database already exists, contains all the table
# names and has the right version, then just return it
db = Database(db_path)
# make sure to delete the database file in case anything goes wrong
# to avoid leaving behind an empty DB
table_names = [table.name for table in tables]
try:
if db.has_tables(table_names) and \
db.has_version() and \
db.version() == version:
logger.info("Found existing table in database %s", db_path)
else:
if len(db.table_names()) > 0:
logger.info(
"Dropping tables from database %s: %s",
db_path,
", ".join(db.table_names()))
db.drop_all_tables()
logger.info(
"Creating database %s containing: %s",
db_path,
", ".join(table_names))
db.create(tables, version)
except:
logger.warning(
"Failed to create tables %s in database %s",
table_names,
db_path)
db.close()
if delete_on_error:
remove(db_path)
raise
return db.connection | Either create or retrieve sqlite database.
Parameters
--------
db_path : str
Path to sqlite3 database file
tables : dict
Dictionary mapping table names to datacache.DatabaseTable objects
version : int, optional
Version acceptable as cached data.
Returns sqlite3 connection | Below is the the instruction that describes the task:
### Input:
Either create or retrieve sqlite database.
Parameters
--------
db_path : str
Path to sqlite3 database file
tables : dict
Dictionary mapping table names to datacache.DatabaseTable objects
version : int, optional
Version acceptable as cached data.
Returns sqlite3 connection
### Response:
def _create_cached_db(
db_path,
tables,
version=1):
"""
Either create or retrieve sqlite database.
Parameters
--------
db_path : str
Path to sqlite3 database file
tables : dict
Dictionary mapping table names to datacache.DatabaseTable objects
version : int, optional
Version acceptable as cached data.
Returns sqlite3 connection
"""
require_string(db_path, "db_path")
require_iterable_of(tables, DatabaseTable)
require_integer(version, "version")
# if the database file doesn't already exist and we encounter an error
# later, delete the file before raising an exception
delete_on_error = not exists(db_path)
# if the database already exists, contains all the table
# names and has the right version, then just return it
db = Database(db_path)
# make sure to delete the database file in case anything goes wrong
# to avoid leaving behind an empty DB
table_names = [table.name for table in tables]
try:
if db.has_tables(table_names) and \
db.has_version() and \
db.version() == version:
logger.info("Found existing table in database %s", db_path)
else:
if len(db.table_names()) > 0:
logger.info(
"Dropping tables from database %s: %s",
db_path,
", ".join(db.table_names()))
db.drop_all_tables()
logger.info(
"Creating database %s containing: %s",
db_path,
", ".join(table_names))
db.create(tables, version)
except:
logger.warning(
"Failed to create tables %s in database %s",
table_names,
db_path)
db.close()
if delete_on_error:
remove(db_path)
raise
return db.connection |
def _compute_closed_central_moments(self, central_from_raw_exprs, n_counter, k_counter):
"""
Computes parametric expressions (e.g. in terms of mean, variance, covariances) for all central moments
up to max_order + 1 order.
:param central_from_raw_exprs:
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments
:type k_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a vector of parametric expression for central moments
"""
n_species = len([None for pm in k_counter if pm.order == 1])
covariance_matrix = sp.Matrix(n_species, n_species, lambda x,y: self._get_covariance_symbol(n_counter,x,y))
positive_n_counter = [n for n in n_counter if n.order > 1]
out_mat = [self._compute_one_closed_central_moment(n, covariance_matrix) for n in positive_n_counter ]
return sp.Matrix(out_mat) | Computes parametric expressions (e.g. in terms of mean, variance, covariances) for all central moments
up to max_order + 1 order.
:param central_from_raw_exprs:
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments
:type k_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a vector of parametric expression for central moments | Below is the the instruction that describes the task:
### Input:
Computes parametric expressions (e.g. in terms of mean, variance, covariances) for all central moments
up to max_order + 1 order.
:param central_from_raw_exprs:
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments
:type k_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a vector of parametric expression for central moments
### Response:
def _compute_closed_central_moments(self, central_from_raw_exprs, n_counter, k_counter):
"""
Computes parametric expressions (e.g. in terms of mean, variance, covariances) for all central moments
up to max_order + 1 order.
:param central_from_raw_exprs:
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments
:type k_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a vector of parametric expression for central moments
"""
n_species = len([None for pm in k_counter if pm.order == 1])
covariance_matrix = sp.Matrix(n_species, n_species, lambda x,y: self._get_covariance_symbol(n_counter,x,y))
positive_n_counter = [n for n in n_counter if n.order > 1]
out_mat = [self._compute_one_closed_central_moment(n, covariance_matrix) for n in positive_n_counter ]
return sp.Matrix(out_mat) |
def detail_participants(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID = None,
) -> ParticipantsDetails:
""" Returns a ParticipantsDetails instance with the participants'
channel information.
Note:
For now one of the participants has to be the node_address
"""
if self.node_address not in (participant1, participant2):
raise ValueError('One participant must be the node address')
if self.node_address == participant2:
participant1, participant2 = participant2, participant1
channel_identifier = self._inspect_channel_identifier(
participant1=participant1,
participant2=participant2,
called_by_fn='details_participants',
block_identifier=block_identifier,
channel_identifier=channel_identifier,
)
our_data = self._detail_participant(
channel_identifier=channel_identifier,
participant=participant1,
partner=participant2,
block_identifier=block_identifier,
)
partner_data = self._detail_participant(
channel_identifier=channel_identifier,
participant=participant2,
partner=participant1,
block_identifier=block_identifier,
)
return ParticipantsDetails(our_details=our_data, partner_details=partner_data) | Returns a ParticipantsDetails instance with the participants'
channel information.
Note:
For now one of the participants has to be the node_address | Below is the the instruction that describes the task:
### Input:
Returns a ParticipantsDetails instance with the participants'
channel information.
Note:
For now one of the participants has to be the node_address
### Response:
def detail_participants(
self,
participant1: Address,
participant2: Address,
block_identifier: BlockSpecification,
channel_identifier: ChannelID = None,
) -> ParticipantsDetails:
""" Returns a ParticipantsDetails instance with the participants'
channel information.
Note:
For now one of the participants has to be the node_address
"""
if self.node_address not in (participant1, participant2):
raise ValueError('One participant must be the node address')
if self.node_address == participant2:
participant1, participant2 = participant2, participant1
channel_identifier = self._inspect_channel_identifier(
participant1=participant1,
participant2=participant2,
called_by_fn='details_participants',
block_identifier=block_identifier,
channel_identifier=channel_identifier,
)
our_data = self._detail_participant(
channel_identifier=channel_identifier,
participant=participant1,
partner=participant2,
block_identifier=block_identifier,
)
partner_data = self._detail_participant(
channel_identifier=channel_identifier,
participant=participant2,
partner=participant1,
block_identifier=block_identifier,
)
return ParticipantsDetails(our_details=our_data, partner_details=partner_data) |
def get_mpl_heatmap_axes(dfr, fig, heatmap_gs):
"""Return axis for Matplotlib heatmap."""
# Create heatmap axis
heatmap_axes = fig.add_subplot(heatmap_gs[1, 1])
heatmap_axes.set_xticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0]))
heatmap_axes.set_yticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0]))
heatmap_axes.grid(False)
heatmap_axes.xaxis.tick_bottom()
heatmap_axes.yaxis.tick_right()
return heatmap_axes | Return axis for Matplotlib heatmap. | Below is the the instruction that describes the task:
### Input:
Return axis for Matplotlib heatmap.
### Response:
def get_mpl_heatmap_axes(dfr, fig, heatmap_gs):
"""Return axis for Matplotlib heatmap."""
# Create heatmap axis
heatmap_axes = fig.add_subplot(heatmap_gs[1, 1])
heatmap_axes.set_xticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0]))
heatmap_axes.set_yticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0]))
heatmap_axes.grid(False)
heatmap_axes.xaxis.tick_bottom()
heatmap_axes.yaxis.tick_right()
return heatmap_axes |
def halt(self, subid, params=None):
''' /v1/server/halt
POST - account
Halt a virtual machine. This is a hard power off (basically, unplugging
the machine). The data on the machine will not be modified, and you
will still be billed for the machine. To completely delete a
machine, see v1/server/destroy
Link: https://www.vultr.com/api/#server_halt
'''
params = update_params(params, {'SUBID': subid})
return self.request('/v1/server/halt', params, 'POST') | /v1/server/halt
POST - account
Halt a virtual machine. This is a hard power off (basically, unplugging
the machine). The data on the machine will not be modified, and you
will still be billed for the machine. To completely delete a
machine, see v1/server/destroy
Link: https://www.vultr.com/api/#server_halt | Below is the the instruction that describes the task:
### Input:
/v1/server/halt
POST - account
Halt a virtual machine. This is a hard power off (basically, unplugging
the machine). The data on the machine will not be modified, and you
will still be billed for the machine. To completely delete a
machine, see v1/server/destroy
Link: https://www.vultr.com/api/#server_halt
### Response:
def halt(self, subid, params=None):
''' /v1/server/halt
POST - account
Halt a virtual machine. This is a hard power off (basically, unplugging
the machine). The data on the machine will not be modified, and you
will still be billed for the machine. To completely delete a
machine, see v1/server/destroy
Link: https://www.vultr.com/api/#server_halt
'''
params = update_params(params, {'SUBID': subid})
return self.request('/v1/server/halt', params, 'POST') |
def MEASURE(qubit, classical_reg):
"""
Produce a MEASURE instruction.
:param qubit: The qubit to measure.
:param classical_reg: The classical register to measure into, or None.
:return: A Measurement instance.
"""
qubit = unpack_qubit(qubit)
if classical_reg is None:
address = None
elif isinstance(classical_reg, int):
warn("Indexing measurement addresses by integers is deprecated. "
+ "Replacing this with the MemoryReference ro[i] instead.")
address = MemoryReference("ro", classical_reg)
else:
address = unpack_classical_reg(classical_reg)
return Measurement(qubit, address) | Produce a MEASURE instruction.
:param qubit: The qubit to measure.
:param classical_reg: The classical register to measure into, or None.
:return: A Measurement instance. | Below is the the instruction that describes the task:
### Input:
Produce a MEASURE instruction.
:param qubit: The qubit to measure.
:param classical_reg: The classical register to measure into, or None.
:return: A Measurement instance.
### Response:
def MEASURE(qubit, classical_reg):
"""
Produce a MEASURE instruction.
:param qubit: The qubit to measure.
:param classical_reg: The classical register to measure into, or None.
:return: A Measurement instance.
"""
qubit = unpack_qubit(qubit)
if classical_reg is None:
address = None
elif isinstance(classical_reg, int):
warn("Indexing measurement addresses by integers is deprecated. "
+ "Replacing this with the MemoryReference ro[i] instead.")
address = MemoryReference("ro", classical_reg)
else:
address = unpack_classical_reg(classical_reg)
return Measurement(qubit, address) |
def append(self):
"""Index documents onto an existing index"""
target_index = get_index_from_alias(self.alias_name)
if not target_index:
self.replace()
else:
self.index_all(target_index) | Index documents onto an existing index | Below is the the instruction that describes the task:
### Input:
Index documents onto an existing index
### Response:
def append(self):
"""Index documents onto an existing index"""
target_index = get_index_from_alias(self.alias_name)
if not target_index:
self.replace()
else:
self.index_all(target_index) |
def _add_record(table, data, buffer_size):
"""
Prepare and append a Record into its Table; flush to disk if necessary.
"""
fields = table.fields
# remove any keys that aren't relation fields
for invalid_key in set(data).difference([f.name for f in fields]):
del data[invalid_key]
table.append(Record.from_dict(fields, data))
# write if requested and possible
if buffer_size is not None and table.is_attached():
# for now there isn't a public method to get the number of new
# records, so use private members
if (len(table) - 1) - table._last_synced_index > buffer_size:
table.commit() | Prepare and append a Record into its Table; flush to disk if necessary. | Below is the the instruction that describes the task:
### Input:
Prepare and append a Record into its Table; flush to disk if necessary.
### Response:
def _add_record(table, data, buffer_size):
"""
Prepare and append a Record into its Table; flush to disk if necessary.
"""
fields = table.fields
# remove any keys that aren't relation fields
for invalid_key in set(data).difference([f.name for f in fields]):
del data[invalid_key]
table.append(Record.from_dict(fields, data))
# write if requested and possible
if buffer_size is not None and table.is_attached():
# for now there isn't a public method to get the number of new
# records, so use private members
if (len(table) - 1) - table._last_synced_index > buffer_size:
table.commit() |
def get_public_events(self):
"""
:calls: `GET /users/:user/events/public <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/events/public",
None
) | :calls: `GET /users/:user/events/public <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event` | Below is the the instruction that describes the task:
### Input:
:calls: `GET /users/:user/events/public <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
### Response:
def get_public_events(self):
"""
:calls: `GET /users/:user/events/public <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/events/public",
None
) |
def parse_file(cls, ctxt, fname, key=None, step_addr=None):
"""
Parse a YAML file containing test steps.
:param ctxt: The context object.
:param fname: The name of the file to parse.
:param key: An optional dictionary key. If specified, the
file must be a YAML dictionary, and the referenced
value will be interpreted as a list of steps. If
not provided, the file must be a YAML list, which
will be interpreted as the list of steps.
:param step_addr: The address of the step in the test
configuration. This may be used in the case
of includes, for instance.
:returns: A list of ``Step`` objects.
"""
# Load the YAML file
try:
with open(fname) as f:
step_data = yaml.load(f)
except Exception as exc:
raise ConfigError(
'Failed to read file "%s": %s' % (fname, exc),
step_addr,
)
# Do we have a key?
if key is not None:
if (not isinstance(step_data, collections.Mapping) or
key not in step_data):
raise ConfigError(
'Bad step configuration file "%s": expecting dictionary '
'with key "%s"' % (fname, key),
step_addr,
)
# Extract just the step data
step_data = step_data[key]
# Validate that it's a sequence
if not isinstance(step_data, collections.Sequence):
addr = ('%s[%s]' % (fname, key)) if key is not None else fname
raise ConfigError(
'Bad step configuration sequence at %s: expecting list, '
'not "%s"' % (addr, step_data.__class__.__name__),
step_addr,
)
# OK, assemble the step list and return it
steps = []
for idx, step_conf in enumerate(step_data):
steps.extend(cls.parse_step(
ctxt, StepAddress(fname, idx, key), step_conf))
return steps | Parse a YAML file containing test steps.
:param ctxt: The context object.
:param fname: The name of the file to parse.
:param key: An optional dictionary key. If specified, the
file must be a YAML dictionary, and the referenced
value will be interpreted as a list of steps. If
not provided, the file must be a YAML list, which
will be interpreted as the list of steps.
:param step_addr: The address of the step in the test
configuration. This may be used in the case
of includes, for instance.
:returns: A list of ``Step`` objects. | Below is the the instruction that describes the task:
### Input:
Parse a YAML file containing test steps.
:param ctxt: The context object.
:param fname: The name of the file to parse.
:param key: An optional dictionary key. If specified, the
file must be a YAML dictionary, and the referenced
value will be interpreted as a list of steps. If
not provided, the file must be a YAML list, which
will be interpreted as the list of steps.
:param step_addr: The address of the step in the test
configuration. This may be used in the case
of includes, for instance.
:returns: A list of ``Step`` objects.
### Response:
def parse_file(cls, ctxt, fname, key=None, step_addr=None):
"""
Parse a YAML file containing test steps.
:param ctxt: The context object.
:param fname: The name of the file to parse.
:param key: An optional dictionary key. If specified, the
file must be a YAML dictionary, and the referenced
value will be interpreted as a list of steps. If
not provided, the file must be a YAML list, which
will be interpreted as the list of steps.
:param step_addr: The address of the step in the test
configuration. This may be used in the case
of includes, for instance.
:returns: A list of ``Step`` objects.
"""
# Load the YAML file
try:
with open(fname) as f:
step_data = yaml.load(f)
except Exception as exc:
raise ConfigError(
'Failed to read file "%s": %s' % (fname, exc),
step_addr,
)
# Do we have a key?
if key is not None:
if (not isinstance(step_data, collections.Mapping) or
key not in step_data):
raise ConfigError(
'Bad step configuration file "%s": expecting dictionary '
'with key "%s"' % (fname, key),
step_addr,
)
# Extract just the step data
step_data = step_data[key]
# Validate that it's a sequence
if not isinstance(step_data, collections.Sequence):
addr = ('%s[%s]' % (fname, key)) if key is not None else fname
raise ConfigError(
'Bad step configuration sequence at %s: expecting list, '
'not "%s"' % (addr, step_data.__class__.__name__),
step_addr,
)
# OK, assemble the step list and return it
steps = []
for idx, step_conf in enumerate(step_data):
steps.extend(cls.parse_step(
ctxt, StepAddress(fname, idx, key), step_conf))
return steps |
def get_topology(self, topologyName, callback=None):
"""get topology"""
if callback:
self.topology_watchers[topologyName].append(callback)
else:
topology_path = self.get_topology_path(topologyName)
with open(topology_path) as f:
data = f.read()
topology = Topology()
topology.ParseFromString(data)
return topology | get topology | Below is the the instruction that describes the task:
### Input:
get topology
### Response:
def get_topology(self, topologyName, callback=None):
"""get topology"""
if callback:
self.topology_watchers[topologyName].append(callback)
else:
topology_path = self.get_topology_path(topologyName)
with open(topology_path) as f:
data = f.read()
topology = Topology()
topology.ParseFromString(data)
return topology |
def make_confidence_report_bundled(filepath, train_start=TRAIN_START,
train_end=TRAIN_END, test_start=TEST_START,
test_end=TEST_END, which_set=WHICH_SET,
recipe=RECIPE, report_path=REPORT_PATH,
nb_iter=NB_ITER, base_eps=None,
base_eps_iter=None, base_eps_iter_small=None,
batch_size=BATCH_SIZE):
"""
Load a saved model, gather its predictions, and save a confidence report.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param which_set: 'train' or 'test'
:param nb_iter: int, number of iterations of attack algorithm
(note that different recipes will use this differently,
for example many will run two attacks, one with nb_iter
iterations and one with 25X more)
:param base_eps: float, epsilon parameter for threat model, on a scale of [0, 1].
Inferred from the dataset if not specified.
:param base_eps_iter: float, a step size used in different ways by different recipes.
Typically the step size for a PGD attack.
Inferred from the dataset if not specified.
:param base_eps_iter_small: float, a second step size for a more fine-grained attack.
Inferred from the dataset if not specified.
:param batch_size: int, batch size
"""
# Avoid circular import
from cleverhans import attack_bundling
if callable(recipe):
run_recipe = recipe
else:
run_recipe = getattr(attack_bundling, recipe)
# Set logging level to see debug information
set_log_level(logging.INFO)
# Create TF session
sess = tf.Session()
assert filepath.endswith('.joblib')
if report_path is None:
report_path = filepath[:-len('.joblib')] + "_bundled_report.joblib"
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
factory = model.dataset_factory
factory.kwargs['train_start'] = train_start
factory.kwargs['train_end'] = train_end
factory.kwargs['test_start'] = test_start
factory.kwargs['test_end'] = test_end
dataset = factory()
center = dataset.kwargs['center']
if 'max_val' in factory.kwargs:
max_value = factory.kwargs['max_val']
elif hasattr(dataset, 'max_val'):
max_value = dataset.max_val
else:
raise AttributeError("Can't find max_value specification")
min_value = 0. - center * max_value
value_range = max_value - min_value
if 'CIFAR' in str(factory.cls):
if base_eps is None:
base_eps = 8. / 255.
if base_eps_iter is None:
base_eps_iter = 2. / 255.
if base_eps_iter_small is None:
base_eps_iter_small = 1. / 255.
elif 'MNIST' in str(factory.cls):
if base_eps is None:
base_eps = .3
if base_eps_iter is None:
base_eps_iter = .1
base_eps_iter_small = None
else:
# Note that it is not required to specify base_eps_iter_small
if base_eps is None or base_eps_iter is None:
raise NotImplementedError("Not able to infer threat model from " + str(factory.cls))
eps = base_eps * value_range
eps_iter = base_eps_iter * value_range
if base_eps_iter_small is None:
eps_iter_small = None
else:
eps_iter_small = base_eps_iter_small * value_range
clip_min = min_value
clip_max = max_value
x_data, y_data = dataset.get_set(which_set)
assert x_data.max() <= max_value
assert x_data.min() >= min_value
assert eps_iter <= eps
assert eps_iter_small is None or eps_iter_small <= eps
# Different recipes take different arguments.
# For now I don't have an idea for a beautiful unifying framework, so
# we get an if statement.
if recipe == 'random_search_max_confidence_recipe':
# pylint always checks against the default recipe here
# pylint: disable=no-value-for-parameter
run_recipe(sess=sess, model=model, x=x_data, y=y_data, eps=eps,
clip_min=clip_min, clip_max=clip_max, report_path=report_path)
else:
run_recipe(sess=sess, model=model, x=x_data, y=y_data,
nb_classes=dataset.NB_CLASSES, eps=eps, clip_min=clip_min,
clip_max=clip_max, eps_iter=eps_iter, nb_iter=nb_iter,
report_path=report_path, eps_iter_small=eps_iter_small, batch_size=batch_size) | Load a saved model, gather its predictions, and save a confidence report.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param which_set: 'train' or 'test'
:param nb_iter: int, number of iterations of attack algorithm
(note that different recipes will use this differently,
for example many will run two attacks, one with nb_iter
iterations and one with 25X more)
:param base_eps: float, epsilon parameter for threat model, on a scale of [0, 1].
Inferred from the dataset if not specified.
:param base_eps_iter: float, a step size used in different ways by different recipes.
Typically the step size for a PGD attack.
Inferred from the dataset if not specified.
:param base_eps_iter_small: float, a second step size for a more fine-grained attack.
Inferred from the dataset if not specified.
:param batch_size: int, batch size | Below is the the instruction that describes the task:
### Input:
Load a saved model, gather its predictions, and save a confidence report.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param which_set: 'train' or 'test'
:param nb_iter: int, number of iterations of attack algorithm
(note that different recipes will use this differently,
for example many will run two attacks, one with nb_iter
iterations and one with 25X more)
:param base_eps: float, epsilon parameter for threat model, on a scale of [0, 1].
Inferred from the dataset if not specified.
:param base_eps_iter: float, a step size used in different ways by different recipes.
Typically the step size for a PGD attack.
Inferred from the dataset if not specified.
:param base_eps_iter_small: float, a second step size for a more fine-grained attack.
Inferred from the dataset if not specified.
:param batch_size: int, batch size
### Response:
def make_confidence_report_bundled(filepath, train_start=TRAIN_START,
train_end=TRAIN_END, test_start=TEST_START,
test_end=TEST_END, which_set=WHICH_SET,
recipe=RECIPE, report_path=REPORT_PATH,
nb_iter=NB_ITER, base_eps=None,
base_eps_iter=None, base_eps_iter_small=None,
batch_size=BATCH_SIZE):
"""
Load a saved model, gather its predictions, and save a confidence report.
:param filepath: path to model to evaluate
:param train_start: index of first training set example to use
:param train_end: index of last training set example to use
:param test_start: index of first test set example to use
:param test_end: index of last test set example to use
:param which_set: 'train' or 'test'
:param nb_iter: int, number of iterations of attack algorithm
(note that different recipes will use this differently,
for example many will run two attacks, one with nb_iter
iterations and one with 25X more)
:param base_eps: float, epsilon parameter for threat model, on a scale of [0, 1].
Inferred from the dataset if not specified.
:param base_eps_iter: float, a step size used in different ways by different recipes.
Typically the step size for a PGD attack.
Inferred from the dataset if not specified.
:param base_eps_iter_small: float, a second step size for a more fine-grained attack.
Inferred from the dataset if not specified.
:param batch_size: int, batch size
"""
# Avoid circular import
from cleverhans import attack_bundling
if callable(recipe):
run_recipe = recipe
else:
run_recipe = getattr(attack_bundling, recipe)
# Set logging level to see debug information
set_log_level(logging.INFO)
# Create TF session
sess = tf.Session()
assert filepath.endswith('.joblib')
if report_path is None:
report_path = filepath[:-len('.joblib')] + "_bundled_report.joblib"
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
factory = model.dataset_factory
factory.kwargs['train_start'] = train_start
factory.kwargs['train_end'] = train_end
factory.kwargs['test_start'] = test_start
factory.kwargs['test_end'] = test_end
dataset = factory()
center = dataset.kwargs['center']
if 'max_val' in factory.kwargs:
max_value = factory.kwargs['max_val']
elif hasattr(dataset, 'max_val'):
max_value = dataset.max_val
else:
raise AttributeError("Can't find max_value specification")
min_value = 0. - center * max_value
value_range = max_value - min_value
if 'CIFAR' in str(factory.cls):
if base_eps is None:
base_eps = 8. / 255.
if base_eps_iter is None:
base_eps_iter = 2. / 255.
if base_eps_iter_small is None:
base_eps_iter_small = 1. / 255.
elif 'MNIST' in str(factory.cls):
if base_eps is None:
base_eps = .3
if base_eps_iter is None:
base_eps_iter = .1
base_eps_iter_small = None
else:
# Note that it is not required to specify base_eps_iter_small
if base_eps is None or base_eps_iter is None:
raise NotImplementedError("Not able to infer threat model from " + str(factory.cls))
eps = base_eps * value_range
eps_iter = base_eps_iter * value_range
if base_eps_iter_small is None:
eps_iter_small = None
else:
eps_iter_small = base_eps_iter_small * value_range
clip_min = min_value
clip_max = max_value
x_data, y_data = dataset.get_set(which_set)
assert x_data.max() <= max_value
assert x_data.min() >= min_value
assert eps_iter <= eps
assert eps_iter_small is None or eps_iter_small <= eps
# Different recipes take different arguments.
# For now I don't have an idea for a beautiful unifying framework, so
# we get an if statement.
if recipe == 'random_search_max_confidence_recipe':
# pylint always checks against the default recipe here
# pylint: disable=no-value-for-parameter
run_recipe(sess=sess, model=model, x=x_data, y=y_data, eps=eps,
clip_min=clip_min, clip_max=clip_max, report_path=report_path)
else:
run_recipe(sess=sess, model=model, x=x_data, y=y_data,
nb_classes=dataset.NB_CLASSES, eps=eps, clip_min=clip_min,
clip_max=clip_max, eps_iter=eps_iter, nb_iter=nb_iter,
report_path=report_path, eps_iter_small=eps_iter_small, batch_size=batch_size) |
def mcons(self, iterable):
"""
Return a new list with all elements of iterable repeatedly cons:ed to the current list.
NB! The elements will be inserted in the reverse order of the iterable.
Runs in O(len(iterable)).
>>> plist([1, 2]).mcons([3, 4])
plist([4, 3, 1, 2])
"""
head = self
for elem in iterable:
head = head.cons(elem)
return head | Return a new list with all elements of iterable repeatedly cons:ed to the current list.
NB! The elements will be inserted in the reverse order of the iterable.
Runs in O(len(iterable)).
>>> plist([1, 2]).mcons([3, 4])
plist([4, 3, 1, 2]) | Below is the the instruction that describes the task:
### Input:
Return a new list with all elements of iterable repeatedly cons:ed to the current list.
NB! The elements will be inserted in the reverse order of the iterable.
Runs in O(len(iterable)).
>>> plist([1, 2]).mcons([3, 4])
plist([4, 3, 1, 2])
### Response:
def mcons(self, iterable):
"""
Return a new list with all elements of iterable repeatedly cons:ed to the current list.
NB! The elements will be inserted in the reverse order of the iterable.
Runs in O(len(iterable)).
>>> plist([1, 2]).mcons([3, 4])
plist([4, 3, 1, 2])
"""
head = self
for elem in iterable:
head = head.cons(elem)
return head |
def render(template, **data):
"""shortcut to render data with `template`. Just add exception
catch to `renderer.render`"""
try:
return renderer.render(template, **data)
except JinjaTemplateNotFound as e:
logger.error(e.__doc__ + ', Template: %r' % template)
sys.exit(e.exit_code) | shortcut to render data with `template`. Just add exception
catch to `renderer.render` | Below is the the instruction that describes the task:
### Input:
shortcut to render data with `template`. Just add exception
catch to `renderer.render`
### Response:
def render(template, **data):
"""shortcut to render data with `template`. Just add exception
catch to `renderer.render`"""
try:
return renderer.render(template, **data)
except JinjaTemplateNotFound as e:
logger.error(e.__doc__ + ', Template: %r' % template)
sys.exit(e.exit_code) |
def check_all_logs(directory, time_thresh):
"""
Check all the log-files in a directory tree for timing errors.
:type directory: str
:param directory: Directory to search within
:type time_thresh: float
:param time_thresh: Time threshold in seconds
:returns: List of :class:`datetime.datetime` for which error timing is
above threshold, e.g. times when data are questionable.
:rtype: list
"""
log_files = glob.glob(directory + '/*/0/000000000_00000000')
print('I have ' + str(len(log_files)) + ' log files to scan')
total_phase_errs = []
for i, log_file in enumerate(log_files):
startdate = dt.datetime.strptime(log_file.split('/')[-4][0:7],
'%Y%j').date()
total_phase_errs += rt_time_log(log_file, startdate)
sys.stdout.write("\r" + str(float(i) / len(log_files) * 100) +
"% \r")
sys.stdout.flush()
time_errs = flag_time_err(total_phase_errs, time_thresh)
time_errs.sort()
return time_errs, total_phase_errs | Check all the log-files in a directory tree for timing errors.
:type directory: str
:param directory: Directory to search within
:type time_thresh: float
:param time_thresh: Time threshold in seconds
:returns: List of :class:`datetime.datetime` for which error timing is
above threshold, e.g. times when data are questionable.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Check all the log-files in a directory tree for timing errors.
:type directory: str
:param directory: Directory to search within
:type time_thresh: float
:param time_thresh: Time threshold in seconds
:returns: List of :class:`datetime.datetime` for which error timing is
above threshold, e.g. times when data are questionable.
:rtype: list
### Response:
def check_all_logs(directory, time_thresh):
"""
Check all the log-files in a directory tree for timing errors.
:type directory: str
:param directory: Directory to search within
:type time_thresh: float
:param time_thresh: Time threshold in seconds
:returns: List of :class:`datetime.datetime` for which error timing is
above threshold, e.g. times when data are questionable.
:rtype: list
"""
log_files = glob.glob(directory + '/*/0/000000000_00000000')
print('I have ' + str(len(log_files)) + ' log files to scan')
total_phase_errs = []
for i, log_file in enumerate(log_files):
startdate = dt.datetime.strptime(log_file.split('/')[-4][0:7],
'%Y%j').date()
total_phase_errs += rt_time_log(log_file, startdate)
sys.stdout.write("\r" + str(float(i) / len(log_files) * 100) +
"% \r")
sys.stdout.flush()
time_errs = flag_time_err(total_phase_errs, time_thresh)
time_errs.sort()
return time_errs, total_phase_errs |
def has_file_extension(filepath, ext_required):
'''Assert that a filepath has the required file extension
:param filepath: string filepath presumably containing a file extension
:param ext_required: the expected file extension
examples: ".pdf", ".html", ".tex"
'''
ext = os.path.splitext(filepath)[-1]
if ext != ext_required:
msg_tmpl = "The extension for {}, which is {}, does not equal {}"
msg_format = msg_tmpl.format(filepath, ext, ext_required)
raise ValueError(msg_format)
return True | Assert that a filepath has the required file extension
:param filepath: string filepath presumably containing a file extension
:param ext_required: the expected file extension
examples: ".pdf", ".html", ".tex" | Below is the the instruction that describes the task:
### Input:
Assert that a filepath has the required file extension
:param filepath: string filepath presumably containing a file extension
:param ext_required: the expected file extension
examples: ".pdf", ".html", ".tex"
### Response:
def has_file_extension(filepath, ext_required):
'''Assert that a filepath has the required file extension
:param filepath: string filepath presumably containing a file extension
:param ext_required: the expected file extension
examples: ".pdf", ".html", ".tex"
'''
ext = os.path.splitext(filepath)[-1]
if ext != ext_required:
msg_tmpl = "The extension for {}, which is {}, does not equal {}"
msg_format = msg_tmpl.format(filepath, ext, ext_required)
raise ValueError(msg_format)
return True |
def _get_hydrated_path(field):
"""Return HydratedPath object for file-type field."""
# Get only file path if whole file object is given.
if isinstance(field, str) and hasattr(field, 'file_name'):
# field is already actually a HydratedPath object
return field
if isinstance(field, dict) and 'file' in field:
hydrated_path = field['file']
if not hasattr(hydrated_path, 'file_name'):
raise TypeError("Filter argument must be a valid file-type field.")
return hydrated_path | Return HydratedPath object for file-type field. | Below is the the instruction that describes the task:
### Input:
Return HydratedPath object for file-type field.
### Response:
def _get_hydrated_path(field):
"""Return HydratedPath object for file-type field."""
# Get only file path if whole file object is given.
if isinstance(field, str) and hasattr(field, 'file_name'):
# field is already actually a HydratedPath object
return field
if isinstance(field, dict) and 'file' in field:
hydrated_path = field['file']
if not hasattr(hydrated_path, 'file_name'):
raise TypeError("Filter argument must be a valid file-type field.")
return hydrated_path |
def parse_files(self, fls):
"""Public method for parsing abricate output files.
This method is called at at class instantiation for the provided
output files. Additional abricate output files can be added using
this method after the class instantiation.
Parameters
----------
fls : list
List of paths to Abricate files
"""
for f in fls:
# Make sure paths exists
if os.path.exists(f):
self._parser(f)
else:
logger.warning("File {} does not exist".format(f)) | Public method for parsing abricate output files.
This method is called at at class instantiation for the provided
output files. Additional abricate output files can be added using
this method after the class instantiation.
Parameters
----------
fls : list
List of paths to Abricate files | Below is the the instruction that describes the task:
### Input:
Public method for parsing abricate output files.
This method is called at at class instantiation for the provided
output files. Additional abricate output files can be added using
this method after the class instantiation.
Parameters
----------
fls : list
List of paths to Abricate files
### Response:
def parse_files(self, fls):
"""Public method for parsing abricate output files.
This method is called at at class instantiation for the provided
output files. Additional abricate output files can be added using
this method after the class instantiation.
Parameters
----------
fls : list
List of paths to Abricate files
"""
for f in fls:
# Make sure paths exists
if os.path.exists(f):
self._parser(f)
else:
logger.warning("File {} does not exist".format(f)) |
def _get_federation_info(address_or_id, federation_service, fed_type='name'):
"""Send a federation query to a Stellar Federation service.
Note: The preferred method of making this call is via
:function:`federation`, as it handles error checking and parsing of
arguments.
:param str address_or_id: The address which you expect te retrieve
federation information about.
:param str federation_service: The url of the federation service you're
requesting information from.
:param str fed_type: The type of federation query that you are making. Must
be 'name', 'id', 'forward', or 'txid'.
:return dict: The federation query response decoded from JSON as a dict.
"""
params = {'q': address_or_id, 'type': fed_type}
r = requests.get(federation_service, params=params)
if r.status_code == 200:
return r.json()
else:
return None | Send a federation query to a Stellar Federation service.
Note: The preferred method of making this call is via
:function:`federation`, as it handles error checking and parsing of
arguments.
:param str address_or_id: The address which you expect te retrieve
federation information about.
:param str federation_service: The url of the federation service you're
requesting information from.
:param str fed_type: The type of federation query that you are making. Must
be 'name', 'id', 'forward', or 'txid'.
:return dict: The federation query response decoded from JSON as a dict. | Below is the the instruction that describes the task:
### Input:
Send a federation query to a Stellar Federation service.
Note: The preferred method of making this call is via
:function:`federation`, as it handles error checking and parsing of
arguments.
:param str address_or_id: The address which you expect te retrieve
federation information about.
:param str federation_service: The url of the federation service you're
requesting information from.
:param str fed_type: The type of federation query that you are making. Must
be 'name', 'id', 'forward', or 'txid'.
:return dict: The federation query response decoded from JSON as a dict.
### Response:
def _get_federation_info(address_or_id, federation_service, fed_type='name'):
"""Send a federation query to a Stellar Federation service.
Note: The preferred method of making this call is via
:function:`federation`, as it handles error checking and parsing of
arguments.
:param str address_or_id: The address which you expect te retrieve
federation information about.
:param str federation_service: The url of the federation service you're
requesting information from.
:param str fed_type: The type of federation query that you are making. Must
be 'name', 'id', 'forward', or 'txid'.
:return dict: The federation query response decoded from JSON as a dict.
"""
params = {'q': address_or_id, 'type': fed_type}
r = requests.get(federation_service, params=params)
if r.status_code == 200:
return r.json()
else:
return None |
def compute_k(self, memory_antecedent):
"""Compute key Tensor k.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {key_dim} + other_dims
"""
if self.shared_kv:
raise ValueError("compute_k cannot be called with shared_kv")
ret = mtf.einsum(
[memory_antecedent, self.wk], reduced_dims=[self.memory_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.k_dims)
return ret | Compute key Tensor k.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {key_dim} + other_dims | Below is the the instruction that describes the task:
### Input:
Compute key Tensor k.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {key_dim} + other_dims
### Response:
def compute_k(self, memory_antecedent):
"""Compute key Tensor k.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {key_dim} + other_dims
"""
if self.shared_kv:
raise ValueError("compute_k cannot be called with shared_kv")
ret = mtf.einsum(
[memory_antecedent, self.wk], reduced_dims=[self.memory_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.k_dims)
return ret |
def params(self, **kwargs):
"""
Specify query params to be used when executing the search. All the
keyword arguments will override the current values. See
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
for all available parameters.
Example::
s = Search()
s = s.params(routing='user-1', preference='local')
"""
s = self._clone()
s._params.update(kwargs)
return s | Specify query params to be used when executing the search. All the
keyword arguments will override the current values. See
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
for all available parameters.
Example::
s = Search()
s = s.params(routing='user-1', preference='local') | Below is the the instruction that describes the task:
### Input:
Specify query params to be used when executing the search. All the
keyword arguments will override the current values. See
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
for all available parameters.
Example::
s = Search()
s = s.params(routing='user-1', preference='local')
### Response:
def params(self, **kwargs):
"""
Specify query params to be used when executing the search. All the
keyword arguments will override the current values. See
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
for all available parameters.
Example::
s = Search()
s = s.params(routing='user-1', preference='local')
"""
s = self._clone()
s._params.update(kwargs)
return s |
def create_document(
self,
parent,
collection_id,
document_id,
document,
mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a new document.
Example:
>>> from google.cloud import firestore_v1beta1
>>>
>>> client = firestore_v1beta1.FirestoreClient()
>>>
>>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]')
>>>
>>> # TODO: Initialize `collection_id`:
>>> collection_id = ''
>>>
>>> # TODO: Initialize `document_id`:
>>> document_id = ''
>>>
>>> # TODO: Initialize `document`:
>>> document = {}
>>>
>>> response = client.create_document(parent, collection_id, document_id, document)
Args:
parent (str): The parent resource. For example:
``projects/{project_id}/databases/{database_id}/documents`` or
``projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}``
collection_id (str): The collection ID, relative to ``parent``, to list. For example:
``chatrooms``.
document_id (str): The client-assigned document ID to use for this document.
Optional. If not specified, an ID will be assigned by the service.
document (Union[dict, ~google.cloud.firestore_v1beta1.types.Document]): The document to create. ``name`` must not be set.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.Document`
mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields.
If the document has a field that is not present in this mask, that field
will not be returned in the response.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.firestore_v1beta1.types.Document` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_document" not in self._inner_api_calls:
self._inner_api_calls[
"create_document"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_document,
default_retry=self._method_configs["CreateDocument"].retry,
default_timeout=self._method_configs["CreateDocument"].timeout,
client_info=self._client_info,
)
request = firestore_pb2.CreateDocumentRequest(
parent=parent,
collection_id=collection_id,
document_id=document_id,
document=document,
mask=mask,
)
return self._inner_api_calls["create_document"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Creates a new document.
Example:
>>> from google.cloud import firestore_v1beta1
>>>
>>> client = firestore_v1beta1.FirestoreClient()
>>>
>>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]')
>>>
>>> # TODO: Initialize `collection_id`:
>>> collection_id = ''
>>>
>>> # TODO: Initialize `document_id`:
>>> document_id = ''
>>>
>>> # TODO: Initialize `document`:
>>> document = {}
>>>
>>> response = client.create_document(parent, collection_id, document_id, document)
Args:
parent (str): The parent resource. For example:
``projects/{project_id}/databases/{database_id}/documents`` or
``projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}``
collection_id (str): The collection ID, relative to ``parent``, to list. For example:
``chatrooms``.
document_id (str): The client-assigned document ID to use for this document.
Optional. If not specified, an ID will be assigned by the service.
document (Union[dict, ~google.cloud.firestore_v1beta1.types.Document]): The document to create. ``name`` must not be set.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.Document`
mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields.
If the document has a field that is not present in this mask, that field
will not be returned in the response.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.firestore_v1beta1.types.Document` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Creates a new document.
Example:
>>> from google.cloud import firestore_v1beta1
>>>
>>> client = firestore_v1beta1.FirestoreClient()
>>>
>>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]')
>>>
>>> # TODO: Initialize `collection_id`:
>>> collection_id = ''
>>>
>>> # TODO: Initialize `document_id`:
>>> document_id = ''
>>>
>>> # TODO: Initialize `document`:
>>> document = {}
>>>
>>> response = client.create_document(parent, collection_id, document_id, document)
Args:
parent (str): The parent resource. For example:
``projects/{project_id}/databases/{database_id}/documents`` or
``projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}``
collection_id (str): The collection ID, relative to ``parent``, to list. For example:
``chatrooms``.
document_id (str): The client-assigned document ID to use for this document.
Optional. If not specified, an ID will be assigned by the service.
document (Union[dict, ~google.cloud.firestore_v1beta1.types.Document]): The document to create. ``name`` must not be set.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.Document`
mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields.
If the document has a field that is not present in this mask, that field
will not be returned in the response.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.firestore_v1beta1.types.Document` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def create_document(
self,
parent,
collection_id,
document_id,
document,
mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a new document.
Example:
>>> from google.cloud import firestore_v1beta1
>>>
>>> client = firestore_v1beta1.FirestoreClient()
>>>
>>> parent = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]')
>>>
>>> # TODO: Initialize `collection_id`:
>>> collection_id = ''
>>>
>>> # TODO: Initialize `document_id`:
>>> document_id = ''
>>>
>>> # TODO: Initialize `document`:
>>> document = {}
>>>
>>> response = client.create_document(parent, collection_id, document_id, document)
Args:
parent (str): The parent resource. For example:
``projects/{project_id}/databases/{database_id}/documents`` or
``projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}``
collection_id (str): The collection ID, relative to ``parent``, to list. For example:
``chatrooms``.
document_id (str): The client-assigned document ID to use for this document.
Optional. If not specified, an ID will be assigned by the service.
document (Union[dict, ~google.cloud.firestore_v1beta1.types.Document]): The document to create. ``name`` must not be set.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.Document`
mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields.
If the document has a field that is not present in this mask, that field
will not be returned in the response.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.firestore_v1beta1.types.Document` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_document" not in self._inner_api_calls:
self._inner_api_calls[
"create_document"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_document,
default_retry=self._method_configs["CreateDocument"].retry,
default_timeout=self._method_configs["CreateDocument"].timeout,
client_info=self._client_info,
)
request = firestore_pb2.CreateDocumentRequest(
parent=parent,
collection_id=collection_id,
document_id=document_id,
document=document,
mask=mask,
)
return self._inner_api_calls["create_document"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
def _call(self, x, out, **kwargs):
"""Implement ``self(x, out[, **kwargs])``.
Parameters
----------
x : `domain` element
Discretized function to be transformed
out : `range` element
Element to which the output is written
Notes
-----
See the ``pyfftw_call`` function for ``**kwargs`` options.
The parameters ``axes`` and ``halfcomplex`` cannot be
overridden.
See Also
--------
odl.trafos.backends.pyfftw_bindings.pyfftw_call :
Call pyfftw backend directly
"""
# TODO: Implement zero padding
if self.impl == 'numpy':
out[:] = self._call_numpy(x.asarray())
else:
out[:] = self._call_pyfftw(x.asarray(), out.asarray(), **kwargs) | Implement ``self(x, out[, **kwargs])``.
Parameters
----------
x : `domain` element
Discretized function to be transformed
out : `range` element
Element to which the output is written
Notes
-----
See the ``pyfftw_call`` function for ``**kwargs`` options.
The parameters ``axes`` and ``halfcomplex`` cannot be
overridden.
See Also
--------
odl.trafos.backends.pyfftw_bindings.pyfftw_call :
Call pyfftw backend directly | Below is the the instruction that describes the task:
### Input:
Implement ``self(x, out[, **kwargs])``.
Parameters
----------
x : `domain` element
Discretized function to be transformed
out : `range` element
Element to which the output is written
Notes
-----
See the ``pyfftw_call`` function for ``**kwargs`` options.
The parameters ``axes`` and ``halfcomplex`` cannot be
overridden.
See Also
--------
odl.trafos.backends.pyfftw_bindings.pyfftw_call :
Call pyfftw backend directly
### Response:
def _call(self, x, out, **kwargs):
"""Implement ``self(x, out[, **kwargs])``.
Parameters
----------
x : `domain` element
Discretized function to be transformed
out : `range` element
Element to which the output is written
Notes
-----
See the ``pyfftw_call`` function for ``**kwargs`` options.
The parameters ``axes`` and ``halfcomplex`` cannot be
overridden.
See Also
--------
odl.trafos.backends.pyfftw_bindings.pyfftw_call :
Call pyfftw backend directly
"""
# TODO: Implement zero padding
if self.impl == 'numpy':
out[:] = self._call_numpy(x.asarray())
else:
out[:] = self._call_pyfftw(x.asarray(), out.asarray(), **kwargs) |
def decrypt_file(self, filename, always_trust=False, passphrase=None,
output=None):
"""Decrypt the contents of a file-like object ``filename`` .
:param str filename: A file-like object to decrypt.
:param bool always_trust: Instruct GnuPG to ignore trust checks.
:param str passphrase: The passphrase for the secret key used for decryption.
:param str output: A filename to write the decrypted output to.
"""
args = ["--decrypt"]
if output: # write the output to a file with the specified name
if os.path.exists(output):
os.remove(output) # to avoid overwrite confirmation message
args.append('--output %s' % output)
if always_trust:
args.append("--always-trust")
result = self._result_map['crypt'](self)
self._handle_io(args, filename, result, passphrase, binary=True)
log.debug('decrypt result: %r', result.data)
return result | Decrypt the contents of a file-like object ``filename`` .
:param str filename: A file-like object to decrypt.
:param bool always_trust: Instruct GnuPG to ignore trust checks.
:param str passphrase: The passphrase for the secret key used for decryption.
:param str output: A filename to write the decrypted output to. | Below is the the instruction that describes the task:
### Input:
Decrypt the contents of a file-like object ``filename`` .
:param str filename: A file-like object to decrypt.
:param bool always_trust: Instruct GnuPG to ignore trust checks.
:param str passphrase: The passphrase for the secret key used for decryption.
:param str output: A filename to write the decrypted output to.
### Response:
def decrypt_file(self, filename, always_trust=False, passphrase=None,
output=None):
"""Decrypt the contents of a file-like object ``filename`` .
:param str filename: A file-like object to decrypt.
:param bool always_trust: Instruct GnuPG to ignore trust checks.
:param str passphrase: The passphrase for the secret key used for decryption.
:param str output: A filename to write the decrypted output to.
"""
args = ["--decrypt"]
if output: # write the output to a file with the specified name
if os.path.exists(output):
os.remove(output) # to avoid overwrite confirmation message
args.append('--output %s' % output)
if always_trust:
args.append("--always-trust")
result = self._result_map['crypt'](self)
self._handle_io(args, filename, result, passphrase, binary=True)
log.debug('decrypt result: %r', result.data)
return result |
async def extract_rows(self, file_or_name, **reader_kwargs):
"""
Extract `self.sample_size` rows from the CSV file and analyze their
data-types.
:param str file_or_name: A string filename or a file handle.
:param reader_kwargs: Arbitrary parameters to pass to the CSV reader.
:returns: A 2-tuple containing a list of headers and list of rows
read from the CSV file.
"""
rows = []
rows_to_read = self.sample_size
async with self.get_reader(file_or_name, **reader_kwargs) as reader:
if self.has_header:
rows_to_read += 1
for i in range(self.sample_size):
try:
row = await reader.__anext__()
except AttributeError as te:
row = next(reader)
except:
raise
rows.append(row)
if self.has_header:
header, rows = rows[0], rows[1:]
else:
header = ['field_%d' % i for i in range(len(rows[0]))]
return header, rows | Extract `self.sample_size` rows from the CSV file and analyze their
data-types.
:param str file_or_name: A string filename or a file handle.
:param reader_kwargs: Arbitrary parameters to pass to the CSV reader.
:returns: A 2-tuple containing a list of headers and list of rows
read from the CSV file. | Below is the the instruction that describes the task:
### Input:
Extract `self.sample_size` rows from the CSV file and analyze their
data-types.
:param str file_or_name: A string filename or a file handle.
:param reader_kwargs: Arbitrary parameters to pass to the CSV reader.
:returns: A 2-tuple containing a list of headers and list of rows
read from the CSV file.
### Response:
async def extract_rows(self, file_or_name, **reader_kwargs):
"""
Extract `self.sample_size` rows from the CSV file and analyze their
data-types.
:param str file_or_name: A string filename or a file handle.
:param reader_kwargs: Arbitrary parameters to pass to the CSV reader.
:returns: A 2-tuple containing a list of headers and list of rows
read from the CSV file.
"""
rows = []
rows_to_read = self.sample_size
async with self.get_reader(file_or_name, **reader_kwargs) as reader:
if self.has_header:
rows_to_read += 1
for i in range(self.sample_size):
try:
row = await reader.__anext__()
except AttributeError as te:
row = next(reader)
except:
raise
rows.append(row)
if self.has_header:
header, rows = rows[0], rows[1:]
else:
header = ['field_%d' % i for i in range(len(rows[0]))]
return header, rows |
def _add_example_helper(self, example):
"""Validates examples for structs without enumerated subtypes."""
# Check for fields in the example that don't belong.
for label, example_field in example.fields.items():
if not any(label == f.name for f in self.all_fields):
raise InvalidSpec(
"Example for '%s' has unknown field '%s'." %
(self.name, label),
example_field.lineno, example_field.path,
)
for field in self.all_fields:
if field.name in example.fields:
example_field = example.fields[field.name]
try:
field.data_type.check_example(example_field)
except InvalidSpec as e:
e.msg = "Bad example for field '{}': {}".format(
field.name, e.msg)
raise
elif field.has_default or isinstance(field.data_type, Nullable):
# These don't need examples.
pass
else:
raise InvalidSpec(
"Missing field '%s' in example." % field.name,
example.lineno, example.path)
self._raw_examples[example.label] = example | Validates examples for structs without enumerated subtypes. | Below is the the instruction that describes the task:
### Input:
Validates examples for structs without enumerated subtypes.
### Response:
def _add_example_helper(self, example):
"""Validates examples for structs without enumerated subtypes."""
# Check for fields in the example that don't belong.
for label, example_field in example.fields.items():
if not any(label == f.name for f in self.all_fields):
raise InvalidSpec(
"Example for '%s' has unknown field '%s'." %
(self.name, label),
example_field.lineno, example_field.path,
)
for field in self.all_fields:
if field.name in example.fields:
example_field = example.fields[field.name]
try:
field.data_type.check_example(example_field)
except InvalidSpec as e:
e.msg = "Bad example for field '{}': {}".format(
field.name, e.msg)
raise
elif field.has_default or isinstance(field.data_type, Nullable):
# These don't need examples.
pass
else:
raise InvalidSpec(
"Missing field '%s' in example." % field.name,
example.lineno, example.path)
self._raw_examples[example.label] = example |
def rc_stats(stats):
"""
reverse completement stats
"""
rc_nucs = {'A':'T', 'T':'A', 'G':'C', 'C':'G', 'N':'N'}
rcs = []
for pos in reversed(stats):
rc = {}
rc['reference frequencey'] = pos['reference frequency']
rc['consensus frequencey'] = pos['consensus frequency']
rc['In'] = pos['In']
rc['Del'] = pos['Del']
rc['ref'] = rc_nucs[pos['ref']]
rc['consensus'] = (rc_nucs[pos['consensus'][0]], pos['consensus'][1])
for base, stat in list(pos.items()):
if base in rc_nucs:
rc[rc_nucs[base]] = stat
rcs.append(rc)
return rcs | reverse completement stats | Below is the the instruction that describes the task:
### Input:
reverse completement stats
### Response:
def rc_stats(stats):
"""
reverse completement stats
"""
rc_nucs = {'A':'T', 'T':'A', 'G':'C', 'C':'G', 'N':'N'}
rcs = []
for pos in reversed(stats):
rc = {}
rc['reference frequencey'] = pos['reference frequency']
rc['consensus frequencey'] = pos['consensus frequency']
rc['In'] = pos['In']
rc['Del'] = pos['Del']
rc['ref'] = rc_nucs[pos['ref']]
rc['consensus'] = (rc_nucs[pos['consensus'][0]], pos['consensus'][1])
for base, stat in list(pos.items()):
if base in rc_nucs:
rc[rc_nucs[base]] = stat
rcs.append(rc)
return rcs |
def read(self):
""" Get the logic input level for the pin
:return: True if the input is high
"""
m = getattr(self.chip, self.method)
return m(**self.arguments) | Get the logic input level for the pin
:return: True if the input is high | Below is the the instruction that describes the task:
### Input:
Get the logic input level for the pin
:return: True if the input is high
### Response:
def read(self):
""" Get the logic input level for the pin
:return: True if the input is high
"""
m = getattr(self.chip, self.method)
return m(**self.arguments) |
def credentials_required(method_func):
"""
Decorator for methods that checks that the client has credentials.
Throws a CredentialsMissingError when they are absent.
"""
def _checkcredentials(self, *args, **kwargs):
if self.username and self.password:
return method_func(self, *args, **kwargs)
else:
raise CredentialsMissingError("This is a private method. \
You must provide a username and password when you initialize the \
DocumentCloud client to attempt this type of request.")
return wraps(method_func)(_checkcredentials) | Decorator for methods that checks that the client has credentials.
Throws a CredentialsMissingError when they are absent. | Below is the the instruction that describes the task:
### Input:
Decorator for methods that checks that the client has credentials.
Throws a CredentialsMissingError when they are absent.
### Response:
def credentials_required(method_func):
"""
Decorator for methods that checks that the client has credentials.
Throws a CredentialsMissingError when they are absent.
"""
def _checkcredentials(self, *args, **kwargs):
if self.username and self.password:
return method_func(self, *args, **kwargs)
else:
raise CredentialsMissingError("This is a private method. \
You must provide a username and password when you initialize the \
DocumentCloud client to attempt this type of request.")
return wraps(method_func)(_checkcredentials) |
def remove_permission(FunctionName, StatementId, Qualifier=None,
region=None, key=None, keyid=None, profile=None):
'''
Remove a permission from a lambda function.
Returns {removed: true} if the permission was removed and returns
{removed: False} if the permission was not removed.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.remove_permission my_function my_id
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
if Qualifier is not None:
kwargs['Qualifier'] = Qualifier
conn.remove_permission(FunctionName=FunctionName, StatementId=StatementId,
**kwargs)
return {'updated': True}
except ClientError as e:
return {'updated': False, 'error': __utils__['boto3.get_error'](e)} | Remove a permission from a lambda function.
Returns {removed: true} if the permission was removed and returns
{removed: False} if the permission was not removed.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.remove_permission my_function my_id | Below is the the instruction that describes the task:
### Input:
Remove a permission from a lambda function.
Returns {removed: true} if the permission was removed and returns
{removed: False} if the permission was not removed.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.remove_permission my_function my_id
### Response:
def remove_permission(FunctionName, StatementId, Qualifier=None,
region=None, key=None, keyid=None, profile=None):
'''
Remove a permission from a lambda function.
Returns {removed: true} if the permission was removed and returns
{removed: False} if the permission was not removed.
CLI Example:
.. code-block:: bash
salt myminion boto_lamba.remove_permission my_function my_id
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
kwargs = {}
if Qualifier is not None:
kwargs['Qualifier'] = Qualifier
conn.remove_permission(FunctionName=FunctionName, StatementId=StatementId,
**kwargs)
return {'updated': True}
except ClientError as e:
return {'updated': False, 'error': __utils__['boto3.get_error'](e)} |
def generate(cls):
"""
Generates a random :class:`~SigningKey` object.
:rtype: :class:`~SigningKey`
"""
return cls(
libnacl.randombytes(libnacl.crypto_sign_SEEDBYTES),
encoder=encoding.RawEncoder,
) | Generates a random :class:`~SigningKey` object.
:rtype: :class:`~SigningKey` | Below is the the instruction that describes the task:
### Input:
Generates a random :class:`~SigningKey` object.
:rtype: :class:`~SigningKey`
### Response:
def generate(cls):
"""
Generates a random :class:`~SigningKey` object.
:rtype: :class:`~SigningKey`
"""
return cls(
libnacl.randombytes(libnacl.crypto_sign_SEEDBYTES),
encoder=encoding.RawEncoder,
) |
def delimiter_encodeseq(delimiter, encodeseq, charset):
'''Coerce delimiter and encodeseq to unicode and verify that they are not
the same'''
delimiter = coerce_unicode(delimiter, charset)
encodeseq = coerce_unicode(encodeseq, charset)
if 1 != len(encodeseq):
raise FSQEncodeError(errno.EINVAL, u'encode sequence must be 1'\
u' character, not {0}'.format(len(encodeseq)))
if 1 != len(delimiter):
raise FSQEncodeError(errno.EINVAL, u'delimiter must be 1 character,'\
u' not {0}'.format(len(delimiter)))
if delimiter == encodeseq:
raise FSQEncodeError(errno.EINVAL, u'delimiter and encoding may not'\
u' be the same: both: {0}'.format(encodeseq))
try:
delimiter.encode('ascii')
except UnicodeEncodeError:
raise FSQEncodeError(errno.EINVAL, u'delimiter must be ascii')
try:
encodeseq.encode('ascii')
except UnicodeEncodeError:
raise FSQEncodeError(errno.EINVAL, u'encodeseq must be ascii')
return delimiter, encodeseq | Coerce delimiter and encodeseq to unicode and verify that they are not
the same | Below is the the instruction that describes the task:
### Input:
Coerce delimiter and encodeseq to unicode and verify that they are not
the same
### Response:
def delimiter_encodeseq(delimiter, encodeseq, charset):
'''Coerce delimiter and encodeseq to unicode and verify that they are not
the same'''
delimiter = coerce_unicode(delimiter, charset)
encodeseq = coerce_unicode(encodeseq, charset)
if 1 != len(encodeseq):
raise FSQEncodeError(errno.EINVAL, u'encode sequence must be 1'\
u' character, not {0}'.format(len(encodeseq)))
if 1 != len(delimiter):
raise FSQEncodeError(errno.EINVAL, u'delimiter must be 1 character,'\
u' not {0}'.format(len(delimiter)))
if delimiter == encodeseq:
raise FSQEncodeError(errno.EINVAL, u'delimiter and encoding may not'\
u' be the same: both: {0}'.format(encodeseq))
try:
delimiter.encode('ascii')
except UnicodeEncodeError:
raise FSQEncodeError(errno.EINVAL, u'delimiter must be ascii')
try:
encodeseq.encode('ascii')
except UnicodeEncodeError:
raise FSQEncodeError(errno.EINVAL, u'encodeseq must be ascii')
return delimiter, encodeseq |
def post(self, url, postParameters=None, urlParameters=None):
"""
Convenience method for requesting to google with proper cookies/params.
"""
if urlParameters:
url = url + "?" + self.getParameters(urlParameters)
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token,
'Content-Type': 'application/x-www-form-urlencoded'
}
postString = self.postParameters(postParameters)
req = requests.post(url, data=postString, headers=headers)
return req.text | Convenience method for requesting to google with proper cookies/params. | Below is the the instruction that describes the task:
### Input:
Convenience method for requesting to google with proper cookies/params.
### Response:
def post(self, url, postParameters=None, urlParameters=None):
"""
Convenience method for requesting to google with proper cookies/params.
"""
if urlParameters:
url = url + "?" + self.getParameters(urlParameters)
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token,
'Content-Type': 'application/x-www-form-urlencoded'
}
postString = self.postParameters(postParameters)
req = requests.post(url, data=postString, headers=headers)
return req.text |
def dumps(self):
"""Return the Exception data in a format for JSON-RPC."""
error = {'code': self.code,
'message': str(self.message)}
if self.data is not None:
error['data'] = self.data
return error | Return the Exception data in a format for JSON-RPC. | Below is the the instruction that describes the task:
### Input:
Return the Exception data in a format for JSON-RPC.
### Response:
def dumps(self):
"""Return the Exception data in a format for JSON-RPC."""
error = {'code': self.code,
'message': str(self.message)}
if self.data is not None:
error['data'] = self.data
return error |
def __write(self, containers, initialize=True):
'''Write the given state information into a file'''
path = self._state_file
self._assure_dir()
try:
flags = os.O_WRONLY | os.O_CREAT
if initialize:
flags |= os.O_EXCL
with os.fdopen(os.open(path, flags), "w") as f:
yaml.safe_dump(self.__base_state(containers), f)
except OSError as err:
if err.errno == errno.EEXIST:
raise AlreadyInitializedError(
"Path %s exists. "
"You may need to destroy a previous blockade." % path)
raise
except Exception:
# clean up our created file
self._state_delete()
raise | Write the given state information into a file | Below is the the instruction that describes the task:
### Input:
Write the given state information into a file
### Response:
def __write(self, containers, initialize=True):
'''Write the given state information into a file'''
path = self._state_file
self._assure_dir()
try:
flags = os.O_WRONLY | os.O_CREAT
if initialize:
flags |= os.O_EXCL
with os.fdopen(os.open(path, flags), "w") as f:
yaml.safe_dump(self.__base_state(containers), f)
except OSError as err:
if err.errno == errno.EEXIST:
raise AlreadyInitializedError(
"Path %s exists. "
"You may need to destroy a previous blockade." % path)
raise
except Exception:
# clean up our created file
self._state_delete()
raise |
def init(self):
"""Init the connection to the OpenTSDB server."""
if not self.export_enable:
return None
try:
db = potsdb.Client(self.host,
port=int(self.port),
check_host=True)
except Exception as e:
logger.critical("Cannot connect to OpenTSDB server %s:%s (%s)" % (self.host, self.port, e))
sys.exit(2)
return db | Init the connection to the OpenTSDB server. | Below is the the instruction that describes the task:
### Input:
Init the connection to the OpenTSDB server.
### Response:
def init(self):
"""Init the connection to the OpenTSDB server."""
if not self.export_enable:
return None
try:
db = potsdb.Client(self.host,
port=int(self.port),
check_host=True)
except Exception as e:
logger.critical("Cannot connect to OpenTSDB server %s:%s (%s)" % (self.host, self.port, e))
sys.exit(2)
return db |
def discard_last(self, indices):
"""Discard the triggers added in the latest update"""
for i in indices:
self.buffer_expire[i] = self.buffer_expire[i][:-1]
self.buffer[i] = self.buffer[i][:-1] | Discard the triggers added in the latest update | Below is the the instruction that describes the task:
### Input:
Discard the triggers added in the latest update
### Response:
def discard_last(self, indices):
"""Discard the triggers added in the latest update"""
for i in indices:
self.buffer_expire[i] = self.buffer_expire[i][:-1]
self.buffer[i] = self.buffer[i][:-1] |
def Lookup(self, name):
"""Get the value associated with a name in the current context.
The current context could be an dictionary in a list, or a dictionary
outside a list.
Args:
name: name to lookup, e.g. 'foo' or 'foo.bar.baz'
Returns:
The value, or self.undefined_str
Raises:
UndefinedVariable if self.undefined_str is not set
"""
if name == '@':
return self.stack[-1].context
parts = name.split('.')
value = self._LookUpStack(parts[0])
# Now do simple lookups of the rest of the parts
for part in parts[1:]:
try:
value = value[part]
except (KeyError, TypeError): # TypeError for non-dictionaries
return self._Undefined(part)
return value | Get the value associated with a name in the current context.
The current context could be an dictionary in a list, or a dictionary
outside a list.
Args:
name: name to lookup, e.g. 'foo' or 'foo.bar.baz'
Returns:
The value, or self.undefined_str
Raises:
UndefinedVariable if self.undefined_str is not set | Below is the the instruction that describes the task:
### Input:
Get the value associated with a name in the current context.
The current context could be an dictionary in a list, or a dictionary
outside a list.
Args:
name: name to lookup, e.g. 'foo' or 'foo.bar.baz'
Returns:
The value, or self.undefined_str
Raises:
UndefinedVariable if self.undefined_str is not set
### Response:
def Lookup(self, name):
"""Get the value associated with a name in the current context.
The current context could be an dictionary in a list, or a dictionary
outside a list.
Args:
name: name to lookup, e.g. 'foo' or 'foo.bar.baz'
Returns:
The value, or self.undefined_str
Raises:
UndefinedVariable if self.undefined_str is not set
"""
if name == '@':
return self.stack[-1].context
parts = name.split('.')
value = self._LookUpStack(parts[0])
# Now do simple lookups of the rest of the parts
for part in parts[1:]:
try:
value = value[part]
except (KeyError, TypeError): # TypeError for non-dictionaries
return self._Undefined(part)
return value |
def inference(self, in_dims, out_dims, value=None):
""" Perform Bayesian inference on the gmm. Let's call V = V1...Vd the d-dimensional space on which the current GMM is defined, such that it represents P(V). Let's call X and Y to disjoint subspaces of V, with corresponding dimension indices in ran. This method returns the GMM for P(Y | X=value).
:param list in_dims: the dimension indices of X (a subset of range(d)). This can be the empty list if one want to compute the marginal P(Y).
:param list out_dims: the dimension indices of Y (a subset of range(d), without intersection with in_dims).
:param numpy.array value: the value of X for which one want to compute the conditional (ignored of in_dims=[]).
:returns: the gmm corresponding to P(Y | X=value) (or to P(Y) if in_dims=[])
.. note:: For example, if X = V1...Vm and Y = Vm+1...Vd, then P(Y | X=v1...vm) is returned by self.inference(in_dims=range(m), out_dims=range(m, d), array([v1, ..., vm])).
"""
if self.covariance_type != 'diag' and self.covariance_type != 'full':
raise ValueError("covariance type other than 'full' and 'diag' not allowed")
in_dims = array(in_dims)
out_dims = array(out_dims)
value = array(value)
means = zeros((self.n_components, len(out_dims)))
covars = zeros((self.n_components, len(out_dims), len(out_dims)))
weights = zeros((self.n_components,))
if in_dims.size:
for k, (weight_k, mean_k, covar_k) in enumerate(self):
sig_in = covar_k[ix_(in_dims, in_dims)]
inin_inv = matrix(sig_in).I
out_in = covar_k[ix_(out_dims, in_dims)]
mu_in = mean_k[in_dims].reshape(-1, 1)
means[k, :] = (mean_k[out_dims] +
(out_in *
inin_inv *
(value.reshape(-1, 1) - mu_in)).T)
if self.covariance_type == 'full':
covars[k, :, :] = (covar_k[ix_(out_dims, out_dims)] -
out_in *
inin_inv *
covar_k[ix_(in_dims, out_dims)])
elif self.covariance_type == 'diag':
covars[k, :] = covar_k[out_dims]
weights[k] = weight_k * Gaussian(mu_in.reshape(-1,),
sig_in).normal(value.reshape(-1,))
weights /= sum(weights)
else:
means = self.means_[:, out_dims]
if self.covariance_type == 'full':
covars = self.covariances_[ix_(range(self.n_components), out_dims, out_dims)]
if self.covariance_type == 'diag':
covars = self.covariances_[ix_(range(self.n_components), out_dims)]
weights = self.weights_
res = GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
res.weights_ = weights
res.means_ = means
res.covariances_ = covars
return res | Perform Bayesian inference on the gmm. Let's call V = V1...Vd the d-dimensional space on which the current GMM is defined, such that it represents P(V). Let's call X and Y to disjoint subspaces of V, with corresponding dimension indices in ran. This method returns the GMM for P(Y | X=value).
:param list in_dims: the dimension indices of X (a subset of range(d)). This can be the empty list if one want to compute the marginal P(Y).
:param list out_dims: the dimension indices of Y (a subset of range(d), without intersection with in_dims).
:param numpy.array value: the value of X for which one want to compute the conditional (ignored of in_dims=[]).
:returns: the gmm corresponding to P(Y | X=value) (or to P(Y) if in_dims=[])
.. note:: For example, if X = V1...Vm and Y = Vm+1...Vd, then P(Y | X=v1...vm) is returned by self.inference(in_dims=range(m), out_dims=range(m, d), array([v1, ..., vm])). | Below is the the instruction that describes the task:
### Input:
Perform Bayesian inference on the gmm. Let's call V = V1...Vd the d-dimensional space on which the current GMM is defined, such that it represents P(V). Let's call X and Y to disjoint subspaces of V, with corresponding dimension indices in ran. This method returns the GMM for P(Y | X=value).
:param list in_dims: the dimension indices of X (a subset of range(d)). This can be the empty list if one want to compute the marginal P(Y).
:param list out_dims: the dimension indices of Y (a subset of range(d), without intersection with in_dims).
:param numpy.array value: the value of X for which one want to compute the conditional (ignored of in_dims=[]).
:returns: the gmm corresponding to P(Y | X=value) (or to P(Y) if in_dims=[])
.. note:: For example, if X = V1...Vm and Y = Vm+1...Vd, then P(Y | X=v1...vm) is returned by self.inference(in_dims=range(m), out_dims=range(m, d), array([v1, ..., vm])).
### Response:
def inference(self, in_dims, out_dims, value=None):
""" Perform Bayesian inference on the gmm. Let's call V = V1...Vd the d-dimensional space on which the current GMM is defined, such that it represents P(V). Let's call X and Y to disjoint subspaces of V, with corresponding dimension indices in ran. This method returns the GMM for P(Y | X=value).
:param list in_dims: the dimension indices of X (a subset of range(d)). This can be the empty list if one want to compute the marginal P(Y).
:param list out_dims: the dimension indices of Y (a subset of range(d), without intersection with in_dims).
:param numpy.array value: the value of X for which one want to compute the conditional (ignored of in_dims=[]).
:returns: the gmm corresponding to P(Y | X=value) (or to P(Y) if in_dims=[])
.. note:: For example, if X = V1...Vm and Y = Vm+1...Vd, then P(Y | X=v1...vm) is returned by self.inference(in_dims=range(m), out_dims=range(m, d), array([v1, ..., vm])).
"""
if self.covariance_type != 'diag' and self.covariance_type != 'full':
raise ValueError("covariance type other than 'full' and 'diag' not allowed")
in_dims = array(in_dims)
out_dims = array(out_dims)
value = array(value)
means = zeros((self.n_components, len(out_dims)))
covars = zeros((self.n_components, len(out_dims), len(out_dims)))
weights = zeros((self.n_components,))
if in_dims.size:
for k, (weight_k, mean_k, covar_k) in enumerate(self):
sig_in = covar_k[ix_(in_dims, in_dims)]
inin_inv = matrix(sig_in).I
out_in = covar_k[ix_(out_dims, in_dims)]
mu_in = mean_k[in_dims].reshape(-1, 1)
means[k, :] = (mean_k[out_dims] +
(out_in *
inin_inv *
(value.reshape(-1, 1) - mu_in)).T)
if self.covariance_type == 'full':
covars[k, :, :] = (covar_k[ix_(out_dims, out_dims)] -
out_in *
inin_inv *
covar_k[ix_(in_dims, out_dims)])
elif self.covariance_type == 'diag':
covars[k, :] = covar_k[out_dims]
weights[k] = weight_k * Gaussian(mu_in.reshape(-1,),
sig_in).normal(value.reshape(-1,))
weights /= sum(weights)
else:
means = self.means_[:, out_dims]
if self.covariance_type == 'full':
covars = self.covariances_[ix_(range(self.n_components), out_dims, out_dims)]
if self.covariance_type == 'diag':
covars = self.covariances_[ix_(range(self.n_components), out_dims)]
weights = self.weights_
res = GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
res.weights_ = weights
res.means_ = means
res.covariances_ = covars
return res |
def _map_order_to_ticks(start, end, order, reverse=False):
"""Map elements from given `order` array to bins ranging from `start`
to `end`.
"""
size = len(order)
bounds = np.linspace(start, end, size + 1)
if reverse:
bounds = bounds[::-1]
mapping = list(zip(bounds[:-1]%(np.pi*2), order))
return mapping | Map elements from given `order` array to bins ranging from `start`
to `end`. | Below is the the instruction that describes the task:
### Input:
Map elements from given `order` array to bins ranging from `start`
to `end`.
### Response:
def _map_order_to_ticks(start, end, order, reverse=False):
"""Map elements from given `order` array to bins ranging from `start`
to `end`.
"""
size = len(order)
bounds = np.linspace(start, end, size + 1)
if reverse:
bounds = bounds[::-1]
mapping = list(zip(bounds[:-1]%(np.pi*2), order))
return mapping |
def _find_url_name(self, index_url, url_name, req):
"""
Finds the true URL name of a package, when the given name isn't quite
correct.
This is usually used to implement case-insensitivity.
"""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
# FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url, req)
if page is None:
logger.critical('Cannot fetch index base URL %s', index_url)
return
norm_name = normalize_name(req.url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.debug(
'Real name of requirement %s is %s', url_name, base,
)
return base
return None | Finds the true URL name of a package, when the given name isn't quite
correct.
This is usually used to implement case-insensitivity. | Below is the the instruction that describes the task:
### Input:
Finds the true URL name of a package, when the given name isn't quite
correct.
This is usually used to implement case-insensitivity.
### Response:
def _find_url_name(self, index_url, url_name, req):
"""
Finds the true URL name of a package, when the given name isn't quite
correct.
This is usually used to implement case-insensitivity.
"""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
# FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url, req)
if page is None:
logger.critical('Cannot fetch index base URL %s', index_url)
return
norm_name = normalize_name(req.url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.debug(
'Real name of requirement %s is %s', url_name, base,
)
return base
return None |
def show_service_profile(self, flavor_profile, **_params):
"""Fetches information for a certain Neutron service flavor profile."""
return self.get(self.service_profile_path % (flavor_profile),
params=_params) | Fetches information for a certain Neutron service flavor profile. | Below is the the instruction that describes the task:
### Input:
Fetches information for a certain Neutron service flavor profile.
### Response:
def show_service_profile(self, flavor_profile, **_params):
"""Fetches information for a certain Neutron service flavor profile."""
return self.get(self.service_profile_path % (flavor_profile),
params=_params) |
def get_ordered_children(self):
"""
Return the list of children ordered according to the element structure
:return: a list of :class:`Element <hl7apy.core.Element>`
"""
ordered_keys = self.element.ordered_children if self.element.ordered_children is not None else []
children = [self.indexes.get(k, None) for k in ordered_keys]
return children | Return the list of children ordered according to the element structure
:return: a list of :class:`Element <hl7apy.core.Element>` | Below is the the instruction that describes the task:
### Input:
Return the list of children ordered according to the element structure
:return: a list of :class:`Element <hl7apy.core.Element>`
### Response:
def get_ordered_children(self):
"""
Return the list of children ordered according to the element structure
:return: a list of :class:`Element <hl7apy.core.Element>`
"""
ordered_keys = self.element.ordered_children if self.element.ordered_children is not None else []
children = [self.indexes.get(k, None) for k in ordered_keys]
return children |
def PathExists(v):
"""Verify the path exists, regardless of its type.
>>> os.path.basename(PathExists()(__file__)).startswith('validators.py')
True
>>> with raises(Invalid, 'path does not exist'):
... PathExists()("random_filename_goes_here.py")
>>> with raises(PathInvalid, 'Not a Path'):
... PathExists()(None)
"""
try:
if v:
v = str(v)
return os.path.exists(v)
else:
raise PathInvalid("Not a Path")
except TypeError:
raise PathInvalid("Not a Path") | Verify the path exists, regardless of its type.
>>> os.path.basename(PathExists()(__file__)).startswith('validators.py')
True
>>> with raises(Invalid, 'path does not exist'):
... PathExists()("random_filename_goes_here.py")
>>> with raises(PathInvalid, 'Not a Path'):
... PathExists()(None) | Below is the the instruction that describes the task:
### Input:
Verify the path exists, regardless of its type.
>>> os.path.basename(PathExists()(__file__)).startswith('validators.py')
True
>>> with raises(Invalid, 'path does not exist'):
... PathExists()("random_filename_goes_here.py")
>>> with raises(PathInvalid, 'Not a Path'):
... PathExists()(None)
### Response:
def PathExists(v):
"""Verify the path exists, regardless of its type.
>>> os.path.basename(PathExists()(__file__)).startswith('validators.py')
True
>>> with raises(Invalid, 'path does not exist'):
... PathExists()("random_filename_goes_here.py")
>>> with raises(PathInvalid, 'Not a Path'):
... PathExists()(None)
"""
try:
if v:
v = str(v)
return os.path.exists(v)
else:
raise PathInvalid("Not a Path")
except TypeError:
raise PathInvalid("Not a Path") |
def read(self, addr, size):
'''
Parameters
----------
addr : int
The register address.
size : int
Length of data to be read (number of bytes).
Returns
-------
array : array
Data (byte array) read from memory. Returns 0 for each byte if it hasn't been written to.
'''
logger.debug("Dummy SiTransferLayer.read addr: %s size: %s" % (hex(addr), size))
return array.array('B', [self.mem[curr_addr] if curr_addr in self.mem else 0 for curr_addr in range(addr, addr + size)]) | Parameters
----------
addr : int
The register address.
size : int
Length of data to be read (number of bytes).
Returns
-------
array : array
Data (byte array) read from memory. Returns 0 for each byte if it hasn't been written to. | Below is the the instruction that describes the task:
### Input:
Parameters
----------
addr : int
The register address.
size : int
Length of data to be read (number of bytes).
Returns
-------
array : array
Data (byte array) read from memory. Returns 0 for each byte if it hasn't been written to.
### Response:
def read(self, addr, size):
'''
Parameters
----------
addr : int
The register address.
size : int
Length of data to be read (number of bytes).
Returns
-------
array : array
Data (byte array) read from memory. Returns 0 for each byte if it hasn't been written to.
'''
logger.debug("Dummy SiTransferLayer.read addr: %s size: %s" % (hex(addr), size))
return array.array('B', [self.mem[curr_addr] if curr_addr in self.mem else 0 for curr_addr in range(addr, addr + size)]) |
def download_unicodedata(version, output=HOME, no_zip=False):
"""Download Unicode data scripts and blocks."""
files = [
'UnicodeData.txt',
'Scripts.txt',
'Blocks.txt',
'PropList.txt',
'DerivedCoreProperties.txt',
'DerivedNormalizationProps.txt',
'CompositionExclusions.txt',
'PropertyValueAliases.txt',
'PropertyAliases.txt',
'EastAsianWidth.txt',
'LineBreak.txt',
'HangulSyllableType.txt',
'DerivedAge.txt',
'auxiliary/WordBreakProperty.txt',
'auxiliary/SentenceBreakProperty.txt',
'auxiliary/GraphemeBreakProperty.txt',
'extracted/DerivedDecompositionType.txt',
'extracted/DerivedNumericType.txt',
'extracted/DerivedNumericValues.txt',
'extracted/DerivedJoiningType.txt',
'extracted/DerivedJoiningGroup.txt',
'extracted/DerivedCombiningClass.txt'
]
files.append('ScriptExtensions.txt')
if PY35:
files.append('IndicPositionalCategory.txt')
else:
files.append('IndicMatraCategory.txt')
files.append('IndicSyllabicCategory.txt')
if PY34:
files.append('BidiBrackets.txt')
if PY37:
files.append('VerticalOrientation.txt')
http_url = 'http://www.unicode.org/Public/%s/ucd/' % version
ftp_url = 'ftp://ftp.unicode.org/Public/%s/ucd/' % version
destination = os.path.join(output, 'unicodedata', version)
if not os.path.exists(destination):
os.makedirs(destination)
zip_data = not no_zip
for f in files:
file_location = os.path.join(destination, os.path.basename(f))
retrieved = False
if not os.path.exists(file_location):
for url in (ftp_url, http_url):
furl = url + f
try:
print('Downloading: %s --> %s' % (furl, file_location))
response = urlopen(furl, timeout=30)
data = response.read()
except Exception:
print('Failed: %s' % url)
continue
with open(file_location, 'w') as uf:
uf.write(data.decode('utf-8'))
retrieved = True
break
if not retrieved:
print('Failed to acquire all needed Unicode files!')
break
else:
retrieved = True
print('Skipping: found %s' % file_location)
if not retrieved:
zip_data = False
break
if zip_data and not os.path.exists(os.path.join(output, 'unicodedata', '%s.zip' % version)):
zip_unicode(output, version) | Download Unicode data scripts and blocks. | Below is the the instruction that describes the task:
### Input:
Download Unicode data scripts and blocks.
### Response:
def download_unicodedata(version, output=HOME, no_zip=False):
"""Download Unicode data scripts and blocks."""
files = [
'UnicodeData.txt',
'Scripts.txt',
'Blocks.txt',
'PropList.txt',
'DerivedCoreProperties.txt',
'DerivedNormalizationProps.txt',
'CompositionExclusions.txt',
'PropertyValueAliases.txt',
'PropertyAliases.txt',
'EastAsianWidth.txt',
'LineBreak.txt',
'HangulSyllableType.txt',
'DerivedAge.txt',
'auxiliary/WordBreakProperty.txt',
'auxiliary/SentenceBreakProperty.txt',
'auxiliary/GraphemeBreakProperty.txt',
'extracted/DerivedDecompositionType.txt',
'extracted/DerivedNumericType.txt',
'extracted/DerivedNumericValues.txt',
'extracted/DerivedJoiningType.txt',
'extracted/DerivedJoiningGroup.txt',
'extracted/DerivedCombiningClass.txt'
]
files.append('ScriptExtensions.txt')
if PY35:
files.append('IndicPositionalCategory.txt')
else:
files.append('IndicMatraCategory.txt')
files.append('IndicSyllabicCategory.txt')
if PY34:
files.append('BidiBrackets.txt')
if PY37:
files.append('VerticalOrientation.txt')
http_url = 'http://www.unicode.org/Public/%s/ucd/' % version
ftp_url = 'ftp://ftp.unicode.org/Public/%s/ucd/' % version
destination = os.path.join(output, 'unicodedata', version)
if not os.path.exists(destination):
os.makedirs(destination)
zip_data = not no_zip
for f in files:
file_location = os.path.join(destination, os.path.basename(f))
retrieved = False
if not os.path.exists(file_location):
for url in (ftp_url, http_url):
furl = url + f
try:
print('Downloading: %s --> %s' % (furl, file_location))
response = urlopen(furl, timeout=30)
data = response.read()
except Exception:
print('Failed: %s' % url)
continue
with open(file_location, 'w') as uf:
uf.write(data.decode('utf-8'))
retrieved = True
break
if not retrieved:
print('Failed to acquire all needed Unicode files!')
break
else:
retrieved = True
print('Skipping: found %s' % file_location)
if not retrieved:
zip_data = False
break
if zip_data and not os.path.exists(os.path.join(output, 'unicodedata', '%s.zip' % version)):
zip_unicode(output, version) |
def pprnt(input, return_data=False):
"""
Prettier print for nested data
Args:
input: Input data
return_data (bool): Default False. Print outs if False, returns if True.
Returns:
None | Pretty formatted text representation of input data.
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[32m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
import json, re
result = json.dumps(input, sort_keys=True, indent=4)
result = re.sub(r'(")(\w*?_id)(":)', r'\1%s%s\2%s\3' % (BOLD, HEADER, ENDC), result)
result = re.sub(r'(")(\w*?_set)(":)', r'\1%s%s\2%s\3' % (BOLD, HEADER, ENDC), result)
result = re.sub(r'(\n *?")(\w*?)(":)', r'\1%s%s\2%s\3' % (BOLD, OKGREEN, ENDC), result)
if not return_data:
print(result)
else:
return result | Prettier print for nested data
Args:
input: Input data
return_data (bool): Default False. Print outs if False, returns if True.
Returns:
None | Pretty formatted text representation of input data. | Below is the the instruction that describes the task:
### Input:
Prettier print for nested data
Args:
input: Input data
return_data (bool): Default False. Print outs if False, returns if True.
Returns:
None | Pretty formatted text representation of input data.
### Response:
def pprnt(input, return_data=False):
"""
Prettier print for nested data
Args:
input: Input data
return_data (bool): Default False. Print outs if False, returns if True.
Returns:
None | Pretty formatted text representation of input data.
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[32m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
import json, re
result = json.dumps(input, sort_keys=True, indent=4)
result = re.sub(r'(")(\w*?_id)(":)', r'\1%s%s\2%s\3' % (BOLD, HEADER, ENDC), result)
result = re.sub(r'(")(\w*?_set)(":)', r'\1%s%s\2%s\3' % (BOLD, HEADER, ENDC), result)
result = re.sub(r'(\n *?")(\w*?)(":)', r'\1%s%s\2%s\3' % (BOLD, OKGREEN, ENDC), result)
if not return_data:
print(result)
else:
return result |
def setWorkingCollisionBoundsInfo(self, unQuadsCount):
"""Sets the Collision Bounds in the working copy."""
fn = self.function_table.setWorkingCollisionBoundsInfo
pQuadsBuffer = HmdQuad_t()
fn(byref(pQuadsBuffer), unQuadsCount)
return pQuadsBuffer | Sets the Collision Bounds in the working copy. | Below is the the instruction that describes the task:
### Input:
Sets the Collision Bounds in the working copy.
### Response:
def setWorkingCollisionBoundsInfo(self, unQuadsCount):
"""Sets the Collision Bounds in the working copy."""
fn = self.function_table.setWorkingCollisionBoundsInfo
pQuadsBuffer = HmdQuad_t()
fn(byref(pQuadsBuffer), unQuadsCount)
return pQuadsBuffer |
def unpack_request(environ, content_length=0):
"""
Unpacks a get or post request query string.
:param environ: whiskey application environment.
:return: A dictionary with parameters.
"""
data = None
if environ["REQUEST_METHOD"] == "GET":
data = unpack_get(environ)
elif environ["REQUEST_METHOD"] == "POST":
data = unpack_post(environ, content_length)
logger.debug("read request data: %s", data)
return data | Unpacks a get or post request query string.
:param environ: whiskey application environment.
:return: A dictionary with parameters. | Below is the the instruction that describes the task:
### Input:
Unpacks a get or post request query string.
:param environ: whiskey application environment.
:return: A dictionary with parameters.
### Response:
def unpack_request(environ, content_length=0):
"""
Unpacks a get or post request query string.
:param environ: whiskey application environment.
:return: A dictionary with parameters.
"""
data = None
if environ["REQUEST_METHOD"] == "GET":
data = unpack_get(environ)
elif environ["REQUEST_METHOD"] == "POST":
data = unpack_post(environ, content_length)
logger.debug("read request data: %s", data)
return data |
def _configure_from_module(self, item):
"""Configure from a module by import path.
Effectively, you give this an absolute or relative import path, it will
import it, and then pass the resulting object to
``_configure_from_object``.
Args:
item (str):
A string pointing to a valid import path.
Returns:
fleaker.App:
Returns itself.
"""
package = None
if item[0] == '.':
package = self.import_name
obj = importlib.import_module(item, package=package)
self.config.from_object(obj)
return self | Configure from a module by import path.
Effectively, you give this an absolute or relative import path, it will
import it, and then pass the resulting object to
``_configure_from_object``.
Args:
item (str):
A string pointing to a valid import path.
Returns:
fleaker.App:
Returns itself. | Below is the the instruction that describes the task:
### Input:
Configure from a module by import path.
Effectively, you give this an absolute or relative import path, it will
import it, and then pass the resulting object to
``_configure_from_object``.
Args:
item (str):
A string pointing to a valid import path.
Returns:
fleaker.App:
Returns itself.
### Response:
def _configure_from_module(self, item):
"""Configure from a module by import path.
Effectively, you give this an absolute or relative import path, it will
import it, and then pass the resulting object to
``_configure_from_object``.
Args:
item (str):
A string pointing to a valid import path.
Returns:
fleaker.App:
Returns itself.
"""
package = None
if item[0] == '.':
package = self.import_name
obj = importlib.import_module(item, package=package)
self.config.from_object(obj)
return self |
def repack(self):
"""Removes any blank ranks in the order."""
items = self.grouped_filter().order_by('rank').select_for_update()
for count, item in enumerate(items):
item.rank = count + 1
item.save(rerank=False) | Removes any blank ranks in the order. | Below is the the instruction that describes the task:
### Input:
Removes any blank ranks in the order.
### Response:
def repack(self):
"""Removes any blank ranks in the order."""
items = self.grouped_filter().order_by('rank').select_for_update()
for count, item in enumerate(items):
item.rank = count + 1
item.save(rerank=False) |
def with_timeout(
timeout: Union[float, datetime.timedelta],
future: _Yieldable,
quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (),
) -> Future:
"""Wraps a `.Future` (or other yieldable object) in a timeout.
Raises `tornado.util.TimeoutError` if the input future does not
complete before ``timeout``, which may be specified in any form
allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or
an absolute time relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
The wrapped `.Future` is not canceled when the timeout expires,
permitting it to be reused. `asyncio.wait_for` is similar to this
function but it does cancel the wrapped `.Future` on timeout.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
.. versionchanged:: 4.4
Added support for yieldable objects other than `.Future`.
"""
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
future_converted = convert_yielded(future)
result = _create_future()
chain_future(future_converted, result)
io_loop = IOLoop.current()
def error_callback(future: Future) -> None:
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error(
"Exception in Future %r after timeout", future, exc_info=True
)
def timeout_callback() -> None:
if not result.done():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future_add_done_callback(future_converted, error_callback)
timeout_handle = io_loop.add_timeout(timeout, timeout_callback)
if isinstance(future_converted, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future_add_done_callback(
future_converted, lambda future: io_loop.remove_timeout(timeout_handle)
)
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future_converted, lambda future: io_loop.remove_timeout(timeout_handle)
)
return result | Wraps a `.Future` (or other yieldable object) in a timeout.
Raises `tornado.util.TimeoutError` if the input future does not
complete before ``timeout``, which may be specified in any form
allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or
an absolute time relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
The wrapped `.Future` is not canceled when the timeout expires,
permitting it to be reused. `asyncio.wait_for` is similar to this
function but it does cancel the wrapped `.Future` on timeout.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
.. versionchanged:: 4.4
Added support for yieldable objects other than `.Future`. | Below is the the instruction that describes the task:
### Input:
Wraps a `.Future` (or other yieldable object) in a timeout.
Raises `tornado.util.TimeoutError` if the input future does not
complete before ``timeout``, which may be specified in any form
allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or
an absolute time relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
The wrapped `.Future` is not canceled when the timeout expires,
permitting it to be reused. `asyncio.wait_for` is similar to this
function but it does cancel the wrapped `.Future` on timeout.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
.. versionchanged:: 4.4
Added support for yieldable objects other than `.Future`.
### Response:
def with_timeout(
timeout: Union[float, datetime.timedelta],
future: _Yieldable,
quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (),
) -> Future:
"""Wraps a `.Future` (or other yieldable object) in a timeout.
Raises `tornado.util.TimeoutError` if the input future does not
complete before ``timeout``, which may be specified in any form
allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or
an absolute time relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
The wrapped `.Future` is not canceled when the timeout expires,
permitting it to be reused. `asyncio.wait_for` is similar to this
function but it does cancel the wrapped `.Future` on timeout.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
.. versionchanged:: 4.4
Added support for yieldable objects other than `.Future`.
"""
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
future_converted = convert_yielded(future)
result = _create_future()
chain_future(future_converted, result)
io_loop = IOLoop.current()
def error_callback(future: Future) -> None:
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error(
"Exception in Future %r after timeout", future, exc_info=True
)
def timeout_callback() -> None:
if not result.done():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future_add_done_callback(future_converted, error_callback)
timeout_handle = io_loop.add_timeout(timeout, timeout_callback)
if isinstance(future_converted, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future_add_done_callback(
future_converted, lambda future: io_loop.remove_timeout(timeout_handle)
)
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future_converted, lambda future: io_loop.remove_timeout(timeout_handle)
)
return result |
def invoke_script(self, script, id=None, endpoint=None):
"""
Invokes a script that has been assembled
Args:
script: (str) a hexlified string of a contract invocation script, example '00c10b746f74616c537570706c796754a64cac1b1073e662933ef3e30b007cd98d67d7'
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
"""
return self._call_endpoint(INVOKE_SCRIPT, params=[script], id=id, endpoint=endpoint) | Invokes a script that has been assembled
Args:
script: (str) a hexlified string of a contract invocation script, example '00c10b746f74616c537570706c796754a64cac1b1073e662933ef3e30b007cd98d67d7'
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call | Below is the the instruction that describes the task:
### Input:
Invokes a script that has been assembled
Args:
script: (str) a hexlified string of a contract invocation script, example '00c10b746f74616c537570706c796754a64cac1b1073e662933ef3e30b007cd98d67d7'
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
### Response:
def invoke_script(self, script, id=None, endpoint=None):
"""
Invokes a script that has been assembled
Args:
script: (str) a hexlified string of a contract invocation script, example '00c10b746f74616c537570706c796754a64cac1b1073e662933ef3e30b007cd98d67d7'
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
"""
return self._call_endpoint(INVOKE_SCRIPT, params=[script], id=id, endpoint=endpoint) |
def get_span(self, request=None):
"""
Returns the span tracing `request`, or the current request if
`request==None`.
If there is no such span, get_span returns None.
@param request the request to get the span from
"""
if request is None and stack.top:
request = stack.top.request
scope = self._current_scopes.get(request, None)
return None if scope is None else scope.span | Returns the span tracing `request`, or the current request if
`request==None`.
If there is no such span, get_span returns None.
@param request the request to get the span from | Below is the the instruction that describes the task:
### Input:
Returns the span tracing `request`, or the current request if
`request==None`.
If there is no such span, get_span returns None.
@param request the request to get the span from
### Response:
def get_span(self, request=None):
"""
Returns the span tracing `request`, or the current request if
`request==None`.
If there is no such span, get_span returns None.
@param request the request to get the span from
"""
if request is None and stack.top:
request = stack.top.request
scope = self._current_scopes.get(request, None)
return None if scope is None else scope.span |
def get_server_ipaddress(self, trust):
"""
Return the ip address of sender
Overview:
Extract a reliable sender IP address heuristically for each message.
Although the message format dictates a chain of relaying IP
addresses in each message, a malicious relay can easily alter that.
Therefore we cannot simply take the first IP in
the chain. Instead, our method is as follows.
First we trust the sender IP reported by our mail server in the
Received headers, and if the previous relay IP address is on our trust
list (e.g. other well-known mail services), we continue to
follow the previous Received line, till we reach the first unrecognized
IP address in the email header.
From article Characterizing Botnets from Email Spam Records:
Li Zhuang, J. D. Tygar
In our case we trust only our mail server with the trust string.
Args:
trust (string): String that identify our mail server
Returns:
string with the ip address
"""
log.debug("Trust string is {!r}".format(trust))
if not trust.strip():
return
received = self.message.get_all("received", [])
for i in received:
i = ported_string(i)
if trust in i:
log.debug("Trust string {!r} is in {!r}".format(trust, i))
check = REGXIP.findall(i[0:i.find("by")])
if check:
try:
ip_str = six.text_type(check[-1])
log.debug("Found sender IP {!r} in {!r}".format(
ip_str, i))
ip = ipaddress.ip_address(ip_str)
except ValueError:
return
else:
if not ip.is_private:
log.debug("IP {!r} not private".format(ip_str))
return ip_str | Return the ip address of sender
Overview:
Extract a reliable sender IP address heuristically for each message.
Although the message format dictates a chain of relaying IP
addresses in each message, a malicious relay can easily alter that.
Therefore we cannot simply take the first IP in
the chain. Instead, our method is as follows.
First we trust the sender IP reported by our mail server in the
Received headers, and if the previous relay IP address is on our trust
list (e.g. other well-known mail services), we continue to
follow the previous Received line, till we reach the first unrecognized
IP address in the email header.
From article Characterizing Botnets from Email Spam Records:
Li Zhuang, J. D. Tygar
In our case we trust only our mail server with the trust string.
Args:
trust (string): String that identify our mail server
Returns:
string with the ip address | Below is the the instruction that describes the task:
### Input:
Return the ip address of sender
Overview:
Extract a reliable sender IP address heuristically for each message.
Although the message format dictates a chain of relaying IP
addresses in each message, a malicious relay can easily alter that.
Therefore we cannot simply take the first IP in
the chain. Instead, our method is as follows.
First we trust the sender IP reported by our mail server in the
Received headers, and if the previous relay IP address is on our trust
list (e.g. other well-known mail services), we continue to
follow the previous Received line, till we reach the first unrecognized
IP address in the email header.
From article Characterizing Botnets from Email Spam Records:
Li Zhuang, J. D. Tygar
In our case we trust only our mail server with the trust string.
Args:
trust (string): String that identify our mail server
Returns:
string with the ip address
### Response:
def get_server_ipaddress(self, trust):
"""
Return the ip address of sender
Overview:
Extract a reliable sender IP address heuristically for each message.
Although the message format dictates a chain of relaying IP
addresses in each message, a malicious relay can easily alter that.
Therefore we cannot simply take the first IP in
the chain. Instead, our method is as follows.
First we trust the sender IP reported by our mail server in the
Received headers, and if the previous relay IP address is on our trust
list (e.g. other well-known mail services), we continue to
follow the previous Received line, till we reach the first unrecognized
IP address in the email header.
From article Characterizing Botnets from Email Spam Records:
Li Zhuang, J. D. Tygar
In our case we trust only our mail server with the trust string.
Args:
trust (string): String that identify our mail server
Returns:
string with the ip address
"""
log.debug("Trust string is {!r}".format(trust))
if not trust.strip():
return
received = self.message.get_all("received", [])
for i in received:
i = ported_string(i)
if trust in i:
log.debug("Trust string {!r} is in {!r}".format(trust, i))
check = REGXIP.findall(i[0:i.find("by")])
if check:
try:
ip_str = six.text_type(check[-1])
log.debug("Found sender IP {!r} in {!r}".format(
ip_str, i))
ip = ipaddress.ip_address(ip_str)
except ValueError:
return
else:
if not ip.is_private:
log.debug("IP {!r} not private".format(ip_str))
return ip_str |
def array(self):
"""
return the underlying numpy array
"""
return np.logspace(self.start, self.stop, self.num, self.endpoint, self.base) | return the underlying numpy array | Below is the the instruction that describes the task:
### Input:
return the underlying numpy array
### Response:
def array(self):
"""
return the underlying numpy array
"""
return np.logspace(self.start, self.stop, self.num, self.endpoint, self.base) |
def scaffold(args):
"""
%prog scaffold scaffold.fasta synteny.blast synteny.sizes synteny.bed
physicalmap.blast physicalmap.sizes physicalmap.bed
As evaluation of scaffolding, visualize external line of evidences:
* Plot synteny to an external genome
* Plot alignments to physical map
* Plot alignments to genetic map (TODO)
Each trio defines one panel to be plotted. blastfile defines the matchings
between the evidences vs scaffolds. Then the evidence sizes, and evidence
bed to plot dot plots.
This script will plot a dot in the dot plot in the corresponding location
the plots are one contig/scaffold per plot.
"""
from jcvi.utils.iter import grouper
p = OptionParser(scaffold.__doc__)
p.add_option("--cutoff", type="int", default=1000000,
help="Plot scaffolds with size larger than [default: %default]")
p.add_option("--highlights",
help="A set of regions in BED format to highlight [default: %default]")
opts, args, iopts = p.set_image_options(args, figsize="14x8", dpi=150)
if len(args) < 4 or len(args) % 3 != 1:
sys.exit(not p.print_help())
highlights = opts.highlights
scafsizes = Sizes(args[0])
trios = list(grouper(args[1:], 3))
trios = [(a, Sizes(b), Bed(c)) for a, b, c in trios]
if highlights:
hlbed = Bed(highlights)
for scaffoldID, scafsize in scafsizes.iter_sizes():
if scafsize < opts.cutoff:
continue
logging.debug("Loading {0} (size={1})".format(scaffoldID,
thousands(scafsize)))
tmpname = scaffoldID + ".sizes"
tmp = open(tmpname, "w")
tmp.write("{0}\t{1}".format(scaffoldID, scafsize))
tmp.close()
tmpsizes = Sizes(tmpname)
tmpsizes.close(clean=True)
if highlights:
subhighlights = list(hlbed.sub_bed(scaffoldID))
imagename = ".".join((scaffoldID, opts.format))
plot_one_scaffold(scaffoldID, tmpsizes, None, trios, imagename, iopts,
highlights=subhighlights) | %prog scaffold scaffold.fasta synteny.blast synteny.sizes synteny.bed
physicalmap.blast physicalmap.sizes physicalmap.bed
As evaluation of scaffolding, visualize external line of evidences:
* Plot synteny to an external genome
* Plot alignments to physical map
* Plot alignments to genetic map (TODO)
Each trio defines one panel to be plotted. blastfile defines the matchings
between the evidences vs scaffolds. Then the evidence sizes, and evidence
bed to plot dot plots.
This script will plot a dot in the dot plot in the corresponding location
the plots are one contig/scaffold per plot. | Below is the the instruction that describes the task:
### Input:
%prog scaffold scaffold.fasta synteny.blast synteny.sizes synteny.bed
physicalmap.blast physicalmap.sizes physicalmap.bed
As evaluation of scaffolding, visualize external line of evidences:
* Plot synteny to an external genome
* Plot alignments to physical map
* Plot alignments to genetic map (TODO)
Each trio defines one panel to be plotted. blastfile defines the matchings
between the evidences vs scaffolds. Then the evidence sizes, and evidence
bed to plot dot plots.
This script will plot a dot in the dot plot in the corresponding location
the plots are one contig/scaffold per plot.
### Response:
def scaffold(args):
"""
%prog scaffold scaffold.fasta synteny.blast synteny.sizes synteny.bed
physicalmap.blast physicalmap.sizes physicalmap.bed
As evaluation of scaffolding, visualize external line of evidences:
* Plot synteny to an external genome
* Plot alignments to physical map
* Plot alignments to genetic map (TODO)
Each trio defines one panel to be plotted. blastfile defines the matchings
between the evidences vs scaffolds. Then the evidence sizes, and evidence
bed to plot dot plots.
This script will plot a dot in the dot plot in the corresponding location
the plots are one contig/scaffold per plot.
"""
from jcvi.utils.iter import grouper
p = OptionParser(scaffold.__doc__)
p.add_option("--cutoff", type="int", default=1000000,
help="Plot scaffolds with size larger than [default: %default]")
p.add_option("--highlights",
help="A set of regions in BED format to highlight [default: %default]")
opts, args, iopts = p.set_image_options(args, figsize="14x8", dpi=150)
if len(args) < 4 or len(args) % 3 != 1:
sys.exit(not p.print_help())
highlights = opts.highlights
scafsizes = Sizes(args[0])
trios = list(grouper(args[1:], 3))
trios = [(a, Sizes(b), Bed(c)) for a, b, c in trios]
if highlights:
hlbed = Bed(highlights)
for scaffoldID, scafsize in scafsizes.iter_sizes():
if scafsize < opts.cutoff:
continue
logging.debug("Loading {0} (size={1})".format(scaffoldID,
thousands(scafsize)))
tmpname = scaffoldID + ".sizes"
tmp = open(tmpname, "w")
tmp.write("{0}\t{1}".format(scaffoldID, scafsize))
tmp.close()
tmpsizes = Sizes(tmpname)
tmpsizes.close(clean=True)
if highlights:
subhighlights = list(hlbed.sub_bed(scaffoldID))
imagename = ".".join((scaffoldID, opts.format))
plot_one_scaffold(scaffoldID, tmpsizes, None, trios, imagename, iopts,
highlights=subhighlights) |
def status(DomainName,
region=None, key=None, keyid=None, profile=None):
'''
Given a domain name describe its status.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_elasticsearch_domain.status mydomain
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
domain = conn.describe_elasticsearch_domain(DomainName=DomainName)
if domain and 'DomainStatus' in domain:
domain = domain.get('DomainStatus', {})
keys = ('Endpoint', 'Created', 'Deleted',
'DomainName', 'DomainId', 'EBSOptions', 'SnapshotOptions',
'AccessPolicies', 'Processing', 'AdvancedOptions', 'ARN',
'ElasticsearchVersion')
return {'domain': dict([(k, domain.get(k)) for k in keys if k in domain])}
else:
return {'domain': None}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | Given a domain name describe its status.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_elasticsearch_domain.status mydomain | Below is the the instruction that describes the task:
### Input:
Given a domain name describe its status.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_elasticsearch_domain.status mydomain
### Response:
def status(DomainName,
region=None, key=None, keyid=None, profile=None):
'''
Given a domain name describe its status.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_elasticsearch_domain.status mydomain
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
domain = conn.describe_elasticsearch_domain(DomainName=DomainName)
if domain and 'DomainStatus' in domain:
domain = domain.get('DomainStatus', {})
keys = ('Endpoint', 'Created', 'Deleted',
'DomainName', 'DomainId', 'EBSOptions', 'SnapshotOptions',
'AccessPolicies', 'Processing', 'AdvancedOptions', 'ARN',
'ElasticsearchVersion')
return {'domain': dict([(k, domain.get(k)) for k in keys if k in domain])}
else:
return {'domain': None}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} |
def target_types_for_alias(self, alias):
"""Returns all the target types that might be produced by the given alias.
Normally there is 1 target type per alias, but macros can expand a single alias to several
target types.
:param string alias: The alias to look up associated target types for.
:returns: The set of target types that can be produced by the given alias.
:raises :class:`TargetFilterTaskMixin.InvalidTargetType`: when no target types correspond to
the given `alias`.
"""
registered_aliases = self.context.build_configuration.registered_aliases()
target_types = registered_aliases.target_types_by_alias.get(alias, None)
if not target_types:
raise self.InvalidTargetType('Not a target type: {}'.format(alias))
return target_types | Returns all the target types that might be produced by the given alias.
Normally there is 1 target type per alias, but macros can expand a single alias to several
target types.
:param string alias: The alias to look up associated target types for.
:returns: The set of target types that can be produced by the given alias.
:raises :class:`TargetFilterTaskMixin.InvalidTargetType`: when no target types correspond to
the given `alias`. | Below is the the instruction that describes the task:
### Input:
Returns all the target types that might be produced by the given alias.
Normally there is 1 target type per alias, but macros can expand a single alias to several
target types.
:param string alias: The alias to look up associated target types for.
:returns: The set of target types that can be produced by the given alias.
:raises :class:`TargetFilterTaskMixin.InvalidTargetType`: when no target types correspond to
the given `alias`.
### Response:
def target_types_for_alias(self, alias):
"""Returns all the target types that might be produced by the given alias.
Normally there is 1 target type per alias, but macros can expand a single alias to several
target types.
:param string alias: The alias to look up associated target types for.
:returns: The set of target types that can be produced by the given alias.
:raises :class:`TargetFilterTaskMixin.InvalidTargetType`: when no target types correspond to
the given `alias`.
"""
registered_aliases = self.context.build_configuration.registered_aliases()
target_types = registered_aliases.target_types_by_alias.get(alias, None)
if not target_types:
raise self.InvalidTargetType('Not a target type: {}'.format(alias))
return target_types |
def _interpolate(self, factor, minInfo, maxInfo, round=True, suppressError=True):
"""
Subclasses may override this method.
"""
minInfo = minInfo._toMathInfo()
maxInfo = maxInfo._toMathInfo()
result = interpolate(minInfo, maxInfo, factor)
if result is None and not suppressError:
raise FontPartsError(("Info from font '%s' and font '%s' could not be "
"interpolated.")
% (minInfo.font.name, maxInfo.font.name))
if round:
result = result.round()
self._fromMathInfo(result) | Subclasses may override this method. | Below is the the instruction that describes the task:
### Input:
Subclasses may override this method.
### Response:
def _interpolate(self, factor, minInfo, maxInfo, round=True, suppressError=True):
"""
Subclasses may override this method.
"""
minInfo = minInfo._toMathInfo()
maxInfo = maxInfo._toMathInfo()
result = interpolate(minInfo, maxInfo, factor)
if result is None and not suppressError:
raise FontPartsError(("Info from font '%s' and font '%s' could not be "
"interpolated.")
% (minInfo.font.name, maxInfo.font.name))
if round:
result = result.round()
self._fromMathInfo(result) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.