code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def Rsky(self):
"""
Projected angular distance between "primary" and "secondary" (exact meaning varies)
"""
r = (self.orbpop.Rsky/self.distance)
return r.to('arcsec',equivalencies=u.dimensionless_angles()) | Projected angular distance between "primary" and "secondary" (exact meaning varies) | Below is the the instruction that describes the task:
### Input:
Projected angular distance between "primary" and "secondary" (exact meaning varies)
### Response:
def Rsky(self):
"""
Projected angular distance between "primary" and "secondary" (exact meaning varies)
"""
r = (self.orbpop.Rsky/self.distance)
return r.to('arcsec',equivalencies=u.dimensionless_angles()) |
def operator(self, operatorType, value):
"""
Returns the operator that best matches the type and value.
:param operatorType | <Query.Op>
value | <variant>
:return <str>
"""
if value is None:
if 'is none' in self._operatorMap and operatorType == Query.Op.Is:
return 'is none'
elif 'is not none' in self._operatorMap:
return 'is not none'
for op, data in self._operatorMap.items():
if data.op == operatorType:
return op
return '' | Returns the operator that best matches the type and value.
:param operatorType | <Query.Op>
value | <variant>
:return <str> | Below is the the instruction that describes the task:
### Input:
Returns the operator that best matches the type and value.
:param operatorType | <Query.Op>
value | <variant>
:return <str>
### Response:
def operator(self, operatorType, value):
"""
Returns the operator that best matches the type and value.
:param operatorType | <Query.Op>
value | <variant>
:return <str>
"""
if value is None:
if 'is none' in self._operatorMap and operatorType == Query.Op.Is:
return 'is none'
elif 'is not none' in self._operatorMap:
return 'is not none'
for op, data in self._operatorMap.items():
if data.op == operatorType:
return op
return '' |
def split_arg_string(string):
"""Given an argument string this attempts to split it into small parts."""
rv = []
for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)"'
r'|\S+)\s*', string, re.S):
arg = match.group().strip()
if arg[:1] == arg[-1:] and arg[:1] in '"\'':
arg = arg[1:-1].encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
try:
arg = type(string)(arg)
except UnicodeError:
pass
rv.append(arg)
return rv | Given an argument string this attempts to split it into small parts. | Below is the the instruction that describes the task:
### Input:
Given an argument string this attempts to split it into small parts.
### Response:
def split_arg_string(string):
"""Given an argument string this attempts to split it into small parts."""
rv = []
for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)"'
r'|\S+)\s*', string, re.S):
arg = match.group().strip()
if arg[:1] == arg[-1:] and arg[:1] in '"\'':
arg = arg[1:-1].encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
try:
arg = type(string)(arg)
except UnicodeError:
pass
rv.append(arg)
return rv |
def _finalize(self, fill_value=None, dtype=np.uint8, keep_palette=False, cmap=None):
"""Wrapper around 'finalize' method for backwards compatibility."""
import warnings
warnings.warn("'_finalize' is deprecated, use 'finalize' instead.",
DeprecationWarning)
return self.finalize(fill_value, dtype, keep_palette, cmap) | Wrapper around 'finalize' method for backwards compatibility. | Below is the the instruction that describes the task:
### Input:
Wrapper around 'finalize' method for backwards compatibility.
### Response:
def _finalize(self, fill_value=None, dtype=np.uint8, keep_palette=False, cmap=None):
"""Wrapper around 'finalize' method for backwards compatibility."""
import warnings
warnings.warn("'_finalize' is deprecated, use 'finalize' instead.",
DeprecationWarning)
return self.finalize(fill_value, dtype, keep_palette, cmap) |
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value) | Convert a single char to a bytes object. | Below is the the instruction that describes the task:
### Input:
Convert a single char to a bytes object.
### Response:
def _send_char(self,value):
"""
Convert a single char to a bytes object.
"""
if type(value) != str and type(value) != bytes:
err = "char requires a string or bytes array of length 1"
raise ValueError(err)
if len(value) != 1:
err = "char must be a single character, not \"{}\"".format(value)
raise ValueError(err)
if type(value) != bytes:
value = value.encode("ascii")
if value in self._escaped_characters:
err = "Cannot send a control character as a single char to arduino. Send as string instead."
raise OverflowError(err)
return struct.pack('c',value) |
def read_templates(folder=None):
"""
Load yaml templates from template folder. Return list of dicts.
Use built-in templates if no folder is set.
Parameters
----------
folder : str
user defined folder where they stores their files, if None uses built-in templates
Returns
-------
output : Instance of `InvoiceTemplate`
template which match based on keywords
Examples
--------
>>> read_template("home/duskybomb/invoice-templates/")
InvoiceTemplate([('issuer', 'OYO'), ('fields', OrderedDict([('amount', 'GrandTotalRs(\\d+)'),
('date', 'Date:(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4})'), ('invoice_number', '([A-Z0-9]+)CashatHotel')])),
('keywords', ['OYO', 'Oravel', 'Stays']), ('options', OrderedDict([('currency', 'INR'), ('decimal_separator', '.'),
('remove_whitespace', True)])), ('template_name', 'com.oyo.invoice.yml')])
After reading the template you can use the result as an instance of `InvoiceTemplate` to extract fields from
`extract_data()`
>>> my_template = InvoiceTemplate([('issuer', 'OYO'), ('fields', OrderedDict([('amount', 'GrandTotalRs(\\d+)'),
('date', 'Date:(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4})'), ('invoice_number', '([A-Z0-9]+)CashatHotel')])),
('keywords', ['OYO', 'Oravel', 'Stays']), ('options', OrderedDict([('currency', 'INR'), ('decimal_separator', '.'),
('remove_whitespace', True)])), ('template_name', 'com.oyo.invoice.yml')])
>>> extract_data("invoice2data/test/pdfs/oyo.pdf", my_template, pdftotext)
{'issuer': 'OYO', 'amount': 1939.0, 'date': datetime.datetime(2017, 12, 31, 0, 0), 'invoice_number': 'IBZY2087',
'currency': 'INR', 'desc': 'Invoice IBZY2087 from OYO'}
"""
output = []
if folder is None:
folder = pkg_resources.resource_filename(__name__, 'templates')
for path, subdirs, files in os.walk(folder):
for name in sorted(files):
if name.endswith('.yml'):
with open(os.path.join(path, name), 'rb') as f:
encoding = chardet.detect(f.read())['encoding']
with codecs.open(os.path.join(path, name), encoding=encoding) as template_file:
tpl = ordered_load(template_file.read())
tpl['template_name'] = name
# Test if all required fields are in template:
assert 'keywords' in tpl.keys(), 'Missing keywords field.'
# Keywords as list, if only one.
if type(tpl['keywords']) is not list:
tpl['keywords'] = [tpl['keywords']]
output.append(InvoiceTemplate(tpl))
return output | Load yaml templates from template folder. Return list of dicts.
Use built-in templates if no folder is set.
Parameters
----------
folder : str
user defined folder where they stores their files, if None uses built-in templates
Returns
-------
output : Instance of `InvoiceTemplate`
template which match based on keywords
Examples
--------
>>> read_template("home/duskybomb/invoice-templates/")
InvoiceTemplate([('issuer', 'OYO'), ('fields', OrderedDict([('amount', 'GrandTotalRs(\\d+)'),
('date', 'Date:(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4})'), ('invoice_number', '([A-Z0-9]+)CashatHotel')])),
('keywords', ['OYO', 'Oravel', 'Stays']), ('options', OrderedDict([('currency', 'INR'), ('decimal_separator', '.'),
('remove_whitespace', True)])), ('template_name', 'com.oyo.invoice.yml')])
After reading the template you can use the result as an instance of `InvoiceTemplate` to extract fields from
`extract_data()`
>>> my_template = InvoiceTemplate([('issuer', 'OYO'), ('fields', OrderedDict([('amount', 'GrandTotalRs(\\d+)'),
('date', 'Date:(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4})'), ('invoice_number', '([A-Z0-9]+)CashatHotel')])),
('keywords', ['OYO', 'Oravel', 'Stays']), ('options', OrderedDict([('currency', 'INR'), ('decimal_separator', '.'),
('remove_whitespace', True)])), ('template_name', 'com.oyo.invoice.yml')])
>>> extract_data("invoice2data/test/pdfs/oyo.pdf", my_template, pdftotext)
{'issuer': 'OYO', 'amount': 1939.0, 'date': datetime.datetime(2017, 12, 31, 0, 0), 'invoice_number': 'IBZY2087',
'currency': 'INR', 'desc': 'Invoice IBZY2087 from OYO'} | Below is the the instruction that describes the task:
### Input:
Load yaml templates from template folder. Return list of dicts.
Use built-in templates if no folder is set.
Parameters
----------
folder : str
user defined folder where they stores their files, if None uses built-in templates
Returns
-------
output : Instance of `InvoiceTemplate`
template which match based on keywords
Examples
--------
>>> read_template("home/duskybomb/invoice-templates/")
InvoiceTemplate([('issuer', 'OYO'), ('fields', OrderedDict([('amount', 'GrandTotalRs(\\d+)'),
('date', 'Date:(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4})'), ('invoice_number', '([A-Z0-9]+)CashatHotel')])),
('keywords', ['OYO', 'Oravel', 'Stays']), ('options', OrderedDict([('currency', 'INR'), ('decimal_separator', '.'),
('remove_whitespace', True)])), ('template_name', 'com.oyo.invoice.yml')])
After reading the template you can use the result as an instance of `InvoiceTemplate` to extract fields from
`extract_data()`
>>> my_template = InvoiceTemplate([('issuer', 'OYO'), ('fields', OrderedDict([('amount', 'GrandTotalRs(\\d+)'),
('date', 'Date:(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4})'), ('invoice_number', '([A-Z0-9]+)CashatHotel')])),
('keywords', ['OYO', 'Oravel', 'Stays']), ('options', OrderedDict([('currency', 'INR'), ('decimal_separator', '.'),
('remove_whitespace', True)])), ('template_name', 'com.oyo.invoice.yml')])
>>> extract_data("invoice2data/test/pdfs/oyo.pdf", my_template, pdftotext)
{'issuer': 'OYO', 'amount': 1939.0, 'date': datetime.datetime(2017, 12, 31, 0, 0), 'invoice_number': 'IBZY2087',
'currency': 'INR', 'desc': 'Invoice IBZY2087 from OYO'}
### Response:
def read_templates(folder=None):
"""
Load yaml templates from template folder. Return list of dicts.
Use built-in templates if no folder is set.
Parameters
----------
folder : str
user defined folder where they stores their files, if None uses built-in templates
Returns
-------
output : Instance of `InvoiceTemplate`
template which match based on keywords
Examples
--------
>>> read_template("home/duskybomb/invoice-templates/")
InvoiceTemplate([('issuer', 'OYO'), ('fields', OrderedDict([('amount', 'GrandTotalRs(\\d+)'),
('date', 'Date:(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4})'), ('invoice_number', '([A-Z0-9]+)CashatHotel')])),
('keywords', ['OYO', 'Oravel', 'Stays']), ('options', OrderedDict([('currency', 'INR'), ('decimal_separator', '.'),
('remove_whitespace', True)])), ('template_name', 'com.oyo.invoice.yml')])
After reading the template you can use the result as an instance of `InvoiceTemplate` to extract fields from
`extract_data()`
>>> my_template = InvoiceTemplate([('issuer', 'OYO'), ('fields', OrderedDict([('amount', 'GrandTotalRs(\\d+)'),
('date', 'Date:(\\d{1,2}\\/\\d{1,2}\\/\\d{1,4})'), ('invoice_number', '([A-Z0-9]+)CashatHotel')])),
('keywords', ['OYO', 'Oravel', 'Stays']), ('options', OrderedDict([('currency', 'INR'), ('decimal_separator', '.'),
('remove_whitespace', True)])), ('template_name', 'com.oyo.invoice.yml')])
>>> extract_data("invoice2data/test/pdfs/oyo.pdf", my_template, pdftotext)
{'issuer': 'OYO', 'amount': 1939.0, 'date': datetime.datetime(2017, 12, 31, 0, 0), 'invoice_number': 'IBZY2087',
'currency': 'INR', 'desc': 'Invoice IBZY2087 from OYO'}
"""
output = []
if folder is None:
folder = pkg_resources.resource_filename(__name__, 'templates')
for path, subdirs, files in os.walk(folder):
for name in sorted(files):
if name.endswith('.yml'):
with open(os.path.join(path, name), 'rb') as f:
encoding = chardet.detect(f.read())['encoding']
with codecs.open(os.path.join(path, name), encoding=encoding) as template_file:
tpl = ordered_load(template_file.read())
tpl['template_name'] = name
# Test if all required fields are in template:
assert 'keywords' in tpl.keys(), 'Missing keywords field.'
# Keywords as list, if only one.
if type(tpl['keywords']) is not list:
tpl['keywords'] = [tpl['keywords']]
output.append(InvoiceTemplate(tpl))
return output |
def font_info(font_str):
"""Extract font information from a font string, such as supplied to the
'font' argument to a widget.
"""
vals = font_str.split(';')
point_size, style, weight = 8, 'normal', 'normal'
family = vals[0]
if len(vals) > 1:
style = vals[1]
if len(vals) > 2:
weight = vals[2]
match = font_regex.match(family)
if match:
family, point_size = match.groups()
point_size = int(point_size)
return Bunch.Bunch(family=family, point_size=point_size,
style=style, weight=weight) | Extract font information from a font string, such as supplied to the
'font' argument to a widget. | Below is the the instruction that describes the task:
### Input:
Extract font information from a font string, such as supplied to the
'font' argument to a widget.
### Response:
def font_info(font_str):
"""Extract font information from a font string, such as supplied to the
'font' argument to a widget.
"""
vals = font_str.split(';')
point_size, style, weight = 8, 'normal', 'normal'
family = vals[0]
if len(vals) > 1:
style = vals[1]
if len(vals) > 2:
weight = vals[2]
match = font_regex.match(family)
if match:
family, point_size = match.groups()
point_size = int(point_size)
return Bunch.Bunch(family=family, point_size=point_size,
style=style, weight=weight) |
def _image_width(image):
"""
Returns the width of the image found at the path supplied by `image`
relative to your project's images directory.
"""
if not Image:
raise Exception("Images manipulation require PIL")
file = StringValue(image).value
path = None
try:
width = sprite_images[file][0]
except KeyError:
width = 0
if callable(STATIC_ROOT):
try:
_file, _storage = list(STATIC_ROOT(file))[0]
path = _storage.open(_file)
except:
pass
else:
_path = os.path.join(STATIC_ROOT, file)
if os.path.exists(_path):
path = open(_path, 'rb')
if path:
image = Image.open(path)
size = image.size
width = size[0]
sprite_images[file] = size
return NumberValue(width, 'px') | Returns the width of the image found at the path supplied by `image`
relative to your project's images directory. | Below is the the instruction that describes the task:
### Input:
Returns the width of the image found at the path supplied by `image`
relative to your project's images directory.
### Response:
def _image_width(image):
"""
Returns the width of the image found at the path supplied by `image`
relative to your project's images directory.
"""
if not Image:
raise Exception("Images manipulation require PIL")
file = StringValue(image).value
path = None
try:
width = sprite_images[file][0]
except KeyError:
width = 0
if callable(STATIC_ROOT):
try:
_file, _storage = list(STATIC_ROOT(file))[0]
path = _storage.open(_file)
except:
pass
else:
_path = os.path.join(STATIC_ROOT, file)
if os.path.exists(_path):
path = open(_path, 'rb')
if path:
image = Image.open(path)
size = image.size
width = size[0]
sprite_images[file] = size
return NumberValue(width, 'px') |
def setnonce(self, text=None):
"""
Set I{nonce} which is arbitraty set of bytes to prevent
reply attacks.
@param text: The nonce text value.
Generated when I{None}.
@type text: str
"""
if text is None:
s = []
s.append(self.username)
s.append(self.password)
s.append(Token.sysdate())
m = md5()
m.update(':'.join(s).encode("utf-8"))
self.nonce = m.hexdigest()
else:
self.nonce = text | Set I{nonce} which is arbitraty set of bytes to prevent
reply attacks.
@param text: The nonce text value.
Generated when I{None}.
@type text: str | Below is the the instruction that describes the task:
### Input:
Set I{nonce} which is arbitraty set of bytes to prevent
reply attacks.
@param text: The nonce text value.
Generated when I{None}.
@type text: str
### Response:
def setnonce(self, text=None):
"""
Set I{nonce} which is arbitraty set of bytes to prevent
reply attacks.
@param text: The nonce text value.
Generated when I{None}.
@type text: str
"""
if text is None:
s = []
s.append(self.username)
s.append(self.password)
s.append(Token.sysdate())
m = md5()
m.update(':'.join(s).encode("utf-8"))
self.nonce = m.hexdigest()
else:
self.nonce = text |
def _get_operation_input_field_values(self, metadata, file_input):
"""Returns a dictionary of envs or file inputs for an operation.
Args:
metadata: operation metadata field
file_input: True to return a dict of file inputs, False to return envs.
Returns:
A dictionary of input field name value pairs
"""
# To determine input parameter type, we iterate through the
# pipeline inputParameters.
# The values come from the pipelineArgs inputs.
input_args = metadata['request']['ephemeralPipeline']['inputParameters']
vals_dict = metadata['request']['pipelineArgs']['inputs']
# Get the names for files or envs
names = [
arg['name'] for arg in input_args if ('localCopy' in arg) == file_input
]
# Build the return dict
return {name: vals_dict[name] for name in names if name in vals_dict} | Returns a dictionary of envs or file inputs for an operation.
Args:
metadata: operation metadata field
file_input: True to return a dict of file inputs, False to return envs.
Returns:
A dictionary of input field name value pairs | Below is the the instruction that describes the task:
### Input:
Returns a dictionary of envs or file inputs for an operation.
Args:
metadata: operation metadata field
file_input: True to return a dict of file inputs, False to return envs.
Returns:
A dictionary of input field name value pairs
### Response:
def _get_operation_input_field_values(self, metadata, file_input):
"""Returns a dictionary of envs or file inputs for an operation.
Args:
metadata: operation metadata field
file_input: True to return a dict of file inputs, False to return envs.
Returns:
A dictionary of input field name value pairs
"""
# To determine input parameter type, we iterate through the
# pipeline inputParameters.
# The values come from the pipelineArgs inputs.
input_args = metadata['request']['ephemeralPipeline']['inputParameters']
vals_dict = metadata['request']['pipelineArgs']['inputs']
# Get the names for files or envs
names = [
arg['name'] for arg in input_args if ('localCopy' in arg) == file_input
]
# Build the return dict
return {name: vals_dict[name] for name in names if name in vals_dict} |
def result_report_parameters(self):
"""Report metric parameters
Returns
-------
str
result report in string format
"""
output = self.ui.data(field='Tags', value=len(self.tag_label_list)) + '\n'
output += self.ui.data(field='Evaluated units', value=int(self.overall['Nref'])) + '\n'
return output | Report metric parameters
Returns
-------
str
result report in string format | Below is the the instruction that describes the task:
### Input:
Report metric parameters
Returns
-------
str
result report in string format
### Response:
def result_report_parameters(self):
"""Report metric parameters
Returns
-------
str
result report in string format
"""
output = self.ui.data(field='Tags', value=len(self.tag_label_list)) + '\n'
output += self.ui.data(field='Evaluated units', value=int(self.overall['Nref'])) + '\n'
return output |
def minimize_sigmas(sigmas, weights, combs):
"""Varies sigmas to minimize gaussian sigma12 - sqrt(sigma1² + sigma2²).
Parameters
----------
sigmas: numpy array of fitted sigmas of gaussians
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_sigmas: optimal sigma values for all PMTs
"""
def make_quality_function(sigmas, weights, combs):
def quality_function(s):
sq_sum = 0
for sigma, comb, weight in zip(sigmas, combs, weights):
sigma_sqsum = np.sqrt(s[comb[1]]**2 + s[comb[0]]**2)
sq_sum += ((sigma - sigma_sqsum) * weight)**2
return sq_sum
return quality_function
qfunc = make_quality_function(sigmas, weights, combs)
s = np.ones(31) * 2.5
# s = np.random.rand(31)
bounds = [(0., 5.)] * 31
opt_sigmas = optimize.minimize(qfunc, s, bounds=bounds)
return opt_sigmas | Varies sigmas to minimize gaussian sigma12 - sqrt(sigma1² + sigma2²).
Parameters
----------
sigmas: numpy array of fitted sigmas of gaussians
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_sigmas: optimal sigma values for all PMTs | Below is the the instruction that describes the task:
### Input:
Varies sigmas to minimize gaussian sigma12 - sqrt(sigma1² + sigma2²).
Parameters
----------
sigmas: numpy array of fitted sigmas of gaussians
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_sigmas: optimal sigma values for all PMTs
### Response:
def minimize_sigmas(sigmas, weights, combs):
"""Varies sigmas to minimize gaussian sigma12 - sqrt(sigma1² + sigma2²).
Parameters
----------
sigmas: numpy array of fitted sigmas of gaussians
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_sigmas: optimal sigma values for all PMTs
"""
def make_quality_function(sigmas, weights, combs):
def quality_function(s):
sq_sum = 0
for sigma, comb, weight in zip(sigmas, combs, weights):
sigma_sqsum = np.sqrt(s[comb[1]]**2 + s[comb[0]]**2)
sq_sum += ((sigma - sigma_sqsum) * weight)**2
return sq_sum
return quality_function
qfunc = make_quality_function(sigmas, weights, combs)
s = np.ones(31) * 2.5
# s = np.random.rand(31)
bounds = [(0., 5.)] * 31
opt_sigmas = optimize.minimize(qfunc, s, bounds=bounds)
return opt_sigmas |
def new_project():
"""New Project."""
form = NewProjectForm()
if not form.validate_on_submit():
return jsonify(errors=form.errors), 400
data = form.data
data['slug'] = slugify(data['name'])
data['owner_id'] = get_current_user_id()
id = add_instance('project', **data)
if not id:
return jsonify(errors={'name': ['duplicated slug.']}), 400
project = get_data_or_404('project', id)
return jsonify(**project) | New Project. | Below is the the instruction that describes the task:
### Input:
New Project.
### Response:
def new_project():
"""New Project."""
form = NewProjectForm()
if not form.validate_on_submit():
return jsonify(errors=form.errors), 400
data = form.data
data['slug'] = slugify(data['name'])
data['owner_id'] = get_current_user_id()
id = add_instance('project', **data)
if not id:
return jsonify(errors={'name': ['duplicated slug.']}), 400
project = get_data_or_404('project', id)
return jsonify(**project) |
def send_scheduled_messages(priority=None, ignore_unknown_messengers=False, ignore_unknown_message_types=False):
"""Sends scheduled messages.
:param int, None priority: number to limit sending message by this priority.
:param bool ignore_unknown_messengers: to silence UnknownMessengerError
:param bool ignore_unknown_message_types: to silence UnknownMessageTypeError
:raises UnknownMessengerError:
:raises UnknownMessageTypeError:
"""
dispatches_by_messengers = Dispatch.group_by_messengers(Dispatch.get_unsent(priority=priority))
for messenger_id, messages in dispatches_by_messengers.items():
try:
messenger_obj = get_registered_messenger_object(messenger_id)
messenger_obj._process_messages(messages, ignore_unknown_message_types=ignore_unknown_message_types)
except UnknownMessengerError:
if ignore_unknown_messengers:
continue
raise | Sends scheduled messages.
:param int, None priority: number to limit sending message by this priority.
:param bool ignore_unknown_messengers: to silence UnknownMessengerError
:param bool ignore_unknown_message_types: to silence UnknownMessageTypeError
:raises UnknownMessengerError:
:raises UnknownMessageTypeError: | Below is the the instruction that describes the task:
### Input:
Sends scheduled messages.
:param int, None priority: number to limit sending message by this priority.
:param bool ignore_unknown_messengers: to silence UnknownMessengerError
:param bool ignore_unknown_message_types: to silence UnknownMessageTypeError
:raises UnknownMessengerError:
:raises UnknownMessageTypeError:
### Response:
def send_scheduled_messages(priority=None, ignore_unknown_messengers=False, ignore_unknown_message_types=False):
"""Sends scheduled messages.
:param int, None priority: number to limit sending message by this priority.
:param bool ignore_unknown_messengers: to silence UnknownMessengerError
:param bool ignore_unknown_message_types: to silence UnknownMessageTypeError
:raises UnknownMessengerError:
:raises UnknownMessageTypeError:
"""
dispatches_by_messengers = Dispatch.group_by_messengers(Dispatch.get_unsent(priority=priority))
for messenger_id, messages in dispatches_by_messengers.items():
try:
messenger_obj = get_registered_messenger_object(messenger_id)
messenger_obj._process_messages(messages, ignore_unknown_message_types=ignore_unknown_message_types)
except UnknownMessengerError:
if ignore_unknown_messengers:
continue
raise |
def filter_messages(relative_filepaths, root, messages):
"""
This method post-processes all messages output by all tools, in order to filter
out any based on the overall output.
The main aim currently is to use information about messages suppressed by
pylint due to inline comments, and use that to suppress messages from other
tools representing the same problem.
For example:
import banana # pylint:disable=unused-import
In this situation, pylint will not warn about an unused import as there is
inline configuration to disable the warning. Pyflakes will still raise that
error, however, because it does not understand pylint disabling messages.
This method uses the information about suppressed messages from pylint to
squash the unwanted redundant error from pyflakes and frosted.
"""
paths_to_ignore, lines_to_ignore, messages_to_ignore = get_suppressions(relative_filepaths, root, messages)
filtered = []
for message in messages:
# first get rid of the pylint informational messages
relative_message_path = os.path.relpath(message.location.path)
if message.source == 'pylint' and message.code in ('suppressed-message', 'file-ignored',):
continue
# some files are skipped entirely by messages
if relative_message_path in paths_to_ignore:
continue
# some lines are skipped entirely by messages
if relative_message_path in lines_to_ignore:
if message.location.line in lines_to_ignore[relative_message_path]:
continue
# and some lines have only certain messages explicitly ignored
if relative_message_path in messages_to_ignore:
if message.location.line in messages_to_ignore[relative_message_path]:
if message.code in messages_to_ignore[relative_message_path][message.location.line]:
continue
# otherwise this message was not filtered
filtered.append(message)
return filtered | This method post-processes all messages output by all tools, in order to filter
out any based on the overall output.
The main aim currently is to use information about messages suppressed by
pylint due to inline comments, and use that to suppress messages from other
tools representing the same problem.
For example:
import banana # pylint:disable=unused-import
In this situation, pylint will not warn about an unused import as there is
inline configuration to disable the warning. Pyflakes will still raise that
error, however, because it does not understand pylint disabling messages.
This method uses the information about suppressed messages from pylint to
squash the unwanted redundant error from pyflakes and frosted. | Below is the the instruction that describes the task:
### Input:
This method post-processes all messages output by all tools, in order to filter
out any based on the overall output.
The main aim currently is to use information about messages suppressed by
pylint due to inline comments, and use that to suppress messages from other
tools representing the same problem.
For example:
import banana # pylint:disable=unused-import
In this situation, pylint will not warn about an unused import as there is
inline configuration to disable the warning. Pyflakes will still raise that
error, however, because it does not understand pylint disabling messages.
This method uses the information about suppressed messages from pylint to
squash the unwanted redundant error from pyflakes and frosted.
### Response:
def filter_messages(relative_filepaths, root, messages):
"""
This method post-processes all messages output by all tools, in order to filter
out any based on the overall output.
The main aim currently is to use information about messages suppressed by
pylint due to inline comments, and use that to suppress messages from other
tools representing the same problem.
For example:
import banana # pylint:disable=unused-import
In this situation, pylint will not warn about an unused import as there is
inline configuration to disable the warning. Pyflakes will still raise that
error, however, because it does not understand pylint disabling messages.
This method uses the information about suppressed messages from pylint to
squash the unwanted redundant error from pyflakes and frosted.
"""
paths_to_ignore, lines_to_ignore, messages_to_ignore = get_suppressions(relative_filepaths, root, messages)
filtered = []
for message in messages:
# first get rid of the pylint informational messages
relative_message_path = os.path.relpath(message.location.path)
if message.source == 'pylint' and message.code in ('suppressed-message', 'file-ignored',):
continue
# some files are skipped entirely by messages
if relative_message_path in paths_to_ignore:
continue
# some lines are skipped entirely by messages
if relative_message_path in lines_to_ignore:
if message.location.line in lines_to_ignore[relative_message_path]:
continue
# and some lines have only certain messages explicitly ignored
if relative_message_path in messages_to_ignore:
if message.location.line in messages_to_ignore[relative_message_path]:
if message.code in messages_to_ignore[relative_message_path][message.location.line]:
continue
# otherwise this message was not filtered
filtered.append(message)
return filtered |
def calculateLocalElasticitySegments(self, bp, span=2, frameGap=None, helical=False, unit='kT',
err_type='block', tool='gmx analyze', outFile=None):
"""Calculate local elastic properties of consecutive overlapped DNA segments
Calculate local elastic properties of consecutive overlapped DNA segments of length given by `span`.
Parameters
----------
bp : list
List of two base-steps forming the global DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
span : int
Length of overlapping (local) DNA segments. It should be less than four.
frameGap : int
How many frames to skip for next time-frame. Lower the number, slower will be the calculation.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
err_type : str
Error estimation by autocorrelation method ``err_type='acf'`` or
block averaging method ``err_type='block'``
tool : str
GROMACS tool to calculate error. In older versions it is `g_analyze` while in
newer versions (above 2016) it is `gmx analyze`.
outFile : str
Output file in csv format.
Returns
-------
segments : list
list of DNA segments for which local elastic properties was calculated.
elasticities : OrderedDict
A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in
the same order as listed above.
error : OrderedDict
A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in
the same order as listed above..
"""
if helical:
props_name = helical_local_props_vector
else:
props_name = local_props_vector
segments, errors, elasticities = [], OrderedDict(), OrderedDict()
for name in props_name:
elasticities[name] = []
errors[name] = []
for s in range(bp[0], bp[1]):
if s+span-1 > bp[1]:
break
time, elasticity_t = self.getLocalElasticityByTime([s, s+span-1], frameGap=frameGap, helical=helical, unit=unit)
error_t = dnaMD.get_error(time, list(elasticity_t.values()), len(props_name), err_type=err_type, tool=tool)
for i in range(len(props_name)):
esy_t = elasticity_t[props_name[i]][-1] # only take last entry
elasticities[props_name[i]].append(esy_t)
errors[props_name[i]].append(error_t[i])
segments.append('{0}-{1}'.format(s, s+span-1))
# Write output file
if outFile is not None:
with open(outFile, 'w') as fout:
fout.write('#bps')
for name in props_name:
fout.write(', {0}, {0}-error'.format(name))
fout.write('\n')
for s in range(len(segments)):
fout.write('{0}'.format(segments[s]))
for name in props_name:
fout.write(', {0:.5f}, {1:.5f}'.format(elasticities[name][s], errors[name][s]))
fout.write('\n')
return segments, elasticities, errors | Calculate local elastic properties of consecutive overlapped DNA segments
Calculate local elastic properties of consecutive overlapped DNA segments of length given by `span`.
Parameters
----------
bp : list
List of two base-steps forming the global DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
span : int
Length of overlapping (local) DNA segments. It should be less than four.
frameGap : int
How many frames to skip for next time-frame. Lower the number, slower will be the calculation.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
err_type : str
Error estimation by autocorrelation method ``err_type='acf'`` or
block averaging method ``err_type='block'``
tool : str
GROMACS tool to calculate error. In older versions it is `g_analyze` while in
newer versions (above 2016) it is `gmx analyze`.
outFile : str
Output file in csv format.
Returns
-------
segments : list
list of DNA segments for which local elastic properties was calculated.
elasticities : OrderedDict
A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in
the same order as listed above.
error : OrderedDict
A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in
the same order as listed above.. | Below is the the instruction that describes the task:
### Input:
Calculate local elastic properties of consecutive overlapped DNA segments
Calculate local elastic properties of consecutive overlapped DNA segments of length given by `span`.
Parameters
----------
bp : list
List of two base-steps forming the global DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
span : int
Length of overlapping (local) DNA segments. It should be less than four.
frameGap : int
How many frames to skip for next time-frame. Lower the number, slower will be the calculation.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
err_type : str
Error estimation by autocorrelation method ``err_type='acf'`` or
block averaging method ``err_type='block'``
tool : str
GROMACS tool to calculate error. In older versions it is `g_analyze` while in
newer versions (above 2016) it is `gmx analyze`.
outFile : str
Output file in csv format.
Returns
-------
segments : list
list of DNA segments for which local elastic properties was calculated.
elasticities : OrderedDict
A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in
the same order as listed above.
error : OrderedDict
A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in
the same order as listed above..
### Response:
def calculateLocalElasticitySegments(self, bp, span=2, frameGap=None, helical=False, unit='kT',
err_type='block', tool='gmx analyze', outFile=None):
"""Calculate local elastic properties of consecutive overlapped DNA segments
Calculate local elastic properties of consecutive overlapped DNA segments of length given by `span`.
Parameters
----------
bp : list
List of two base-steps forming the global DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
span : int
Length of overlapping (local) DNA segments. It should be less than four.
frameGap : int
How many frames to skip for next time-frame. Lower the number, slower will be the calculation.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
err_type : str
Error estimation by autocorrelation method ``err_type='acf'`` or
block averaging method ``err_type='block'``
tool : str
GROMACS tool to calculate error. In older versions it is `g_analyze` while in
newer versions (above 2016) it is `gmx analyze`.
outFile : str
Output file in csv format.
Returns
-------
segments : list
list of DNA segments for which local elastic properties was calculated.
elasticities : OrderedDict
A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in
the same order as listed above.
error : OrderedDict
A ordered dictionary of 1D arrays of shape (segments). The keys in dictionary are name of the elasticity in
the same order as listed above..
"""
if helical:
props_name = helical_local_props_vector
else:
props_name = local_props_vector
segments, errors, elasticities = [], OrderedDict(), OrderedDict()
for name in props_name:
elasticities[name] = []
errors[name] = []
for s in range(bp[0], bp[1]):
if s+span-1 > bp[1]:
break
time, elasticity_t = self.getLocalElasticityByTime([s, s+span-1], frameGap=frameGap, helical=helical, unit=unit)
error_t = dnaMD.get_error(time, list(elasticity_t.values()), len(props_name), err_type=err_type, tool=tool)
for i in range(len(props_name)):
esy_t = elasticity_t[props_name[i]][-1] # only take last entry
elasticities[props_name[i]].append(esy_t)
errors[props_name[i]].append(error_t[i])
segments.append('{0}-{1}'.format(s, s+span-1))
# Write output file
if outFile is not None:
with open(outFile, 'w') as fout:
fout.write('#bps')
for name in props_name:
fout.write(', {0}, {0}-error'.format(name))
fout.write('\n')
for s in range(len(segments)):
fout.write('{0}'.format(segments[s]))
for name in props_name:
fout.write(', {0:.5f}, {1:.5f}'.format(elasticities[name][s], errors[name][s]))
fout.write('\n')
return segments, elasticities, errors |
def lookup_handles(ids):
""" Fetch the twitter screen_names of each id. """
names = set()
for id_list in [ids[100 * i:100 * i + 100] for i in range(len(ids))]:
if len(id_list) > 0:
while True:
r = twapi.request('users/lookup', {'user_id': ','.join([str(i) for i in id_list])})
if r.status_code in [88, 130, 420, 429]: # rate limit
sys.stderr.write('Sleeping off rate limit for %s: %s\n' % (str(id_list), r.text))
time.sleep(301)
elif r.status_code == 200:
for item in r.get_iterator():
names.add((item['screen_name'], item['id_str']))
break
else:
sys.stderr.write('Error: %s\nSkipping %s...\n' % (str(id_list), r.text))
break
return names | Fetch the twitter screen_names of each id. | Below is the the instruction that describes the task:
### Input:
Fetch the twitter screen_names of each id.
### Response:
def lookup_handles(ids):
""" Fetch the twitter screen_names of each id. """
names = set()
for id_list in [ids[100 * i:100 * i + 100] for i in range(len(ids))]:
if len(id_list) > 0:
while True:
r = twapi.request('users/lookup', {'user_id': ','.join([str(i) for i in id_list])})
if r.status_code in [88, 130, 420, 429]: # rate limit
sys.stderr.write('Sleeping off rate limit for %s: %s\n' % (str(id_list), r.text))
time.sleep(301)
elif r.status_code == 200:
for item in r.get_iterator():
names.add((item['screen_name'], item['id_str']))
break
else:
sys.stderr.write('Error: %s\nSkipping %s...\n' % (str(id_list), r.text))
break
return names |
def __update_offset_table(self, fileobj, fmt, atom, delta, offset):
"""Update offset table in the specified atom."""
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 12)
data = fileobj.read(atom.length - 12)
fmt = fmt % cdata.uint_be(data[:4])
offsets = struct.unpack(fmt, data[4:])
offsets = [o + (0, delta)[offset < o] for o in offsets]
fileobj.seek(atom.offset + 16)
fileobj.write(struct.pack(fmt, *offsets)) | Update offset table in the specified atom. | Below is the the instruction that describes the task:
### Input:
Update offset table in the specified atom.
### Response:
def __update_offset_table(self, fileobj, fmt, atom, delta, offset):
"""Update offset table in the specified atom."""
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 12)
data = fileobj.read(atom.length - 12)
fmt = fmt % cdata.uint_be(data[:4])
offsets = struct.unpack(fmt, data[4:])
offsets = [o + (0, delta)[offset < o] for o in offsets]
fileobj.seek(atom.offset + 16)
fileobj.write(struct.pack(fmt, *offsets)) |
def load_or_create_vocabs(source_paths: List[str],
target_path: str,
source_vocab_paths: List[Optional[str]],
target_vocab_path: Optional[str],
shared_vocab: bool,
num_words_source: Optional[int], word_min_count_source: int,
num_words_target: Optional[int], word_min_count_target: int,
pad_to_multiple_of: Optional[int] = None) -> Tuple[List[Vocab], Vocab]:
"""
Returns vocabularies for source files (including factors) and target.
If the respective vocabulary paths are not None, the vocabulary is read from the path and returned.
Otherwise, it is built from the support and saved to the path.
:param source_paths: The path to the source text (and optional token-parallel factor files).
:param target_path: The target text.
:param source_vocab_paths: The source vocabulary path (and optional factor vocabulary paths).
:param target_vocab_path: The target vocabulary path.
:param shared_vocab: Whether the source and target vocabularies are shared.
:param num_words_source: Number of words in the source vocabulary.
:param word_min_count_source: Minimum frequency of words in the source vocabulary.
:param num_words_target: Number of words in the target vocabulary.
:param word_min_count_target: Minimum frequency of words in the target vocabulary.
:param pad_to_multiple_of: If not None, pads the vocabularies to a size that is the next multiple of this int.
:return: List of source vocabularies (for source and factors), and target vocabulary.
"""
source_path, *source_factor_paths = source_paths
source_vocab_path, *source_factor_vocab_paths = source_vocab_paths
logger.info("=============================")
logger.info("Loading/creating vocabularies")
logger.info("=============================")
logger.info("(1) Surface form vocabularies (source & target)")
if shared_vocab:
if source_vocab_path and target_vocab_path:
vocab_source = vocab_from_json(source_vocab_path)
vocab_target = vocab_from_json(target_vocab_path)
utils.check_condition(are_identical(vocab_source, vocab_target),
"Shared vocabulary requires identical source and target vocabularies. "
"The vocabularies in %s and %s are not identical." % (source_vocab_path,
target_vocab_path))
elif source_vocab_path is None and target_vocab_path is None:
utils.check_condition(num_words_source == num_words_target,
"A shared vocabulary requires the number of source and target words to be the same.")
utils.check_condition(word_min_count_source == word_min_count_target,
"A shared vocabulary requires the minimum word count for source and target "
"to be the same.")
vocab_source = vocab_target = build_from_paths(paths=[source_path, target_path],
num_words=num_words_source,
min_count=word_min_count_source,
pad_to_multiple_of=pad_to_multiple_of)
else:
vocab_path = source_vocab_path if source_vocab_path is not None else target_vocab_path
logger.info("Using %s as a shared source/target vocabulary." % vocab_path)
vocab_source = vocab_target = vocab_from_json(vocab_path)
else:
vocab_source = load_or_create_vocab(source_path, source_vocab_path, num_words_source, word_min_count_source,
pad_to_multiple_of=pad_to_multiple_of)
vocab_target = load_or_create_vocab(target_path, target_vocab_path, num_words_target, word_min_count_target,
pad_to_multiple_of=pad_to_multiple_of)
vocab_source_factors = [] # type: List[Vocab]
if source_factor_paths:
logger.info("(2) Additional source factor vocabularies")
# source factor vocabs are always created
for factor_path, factor_vocab_path in zip(source_factor_paths, source_factor_vocab_paths):
vocab_source_factors.append(load_or_create_vocab(factor_path, factor_vocab_path,
num_words_source, word_min_count_source))
return [vocab_source] + vocab_source_factors, vocab_target | Returns vocabularies for source files (including factors) and target.
If the respective vocabulary paths are not None, the vocabulary is read from the path and returned.
Otherwise, it is built from the support and saved to the path.
:param source_paths: The path to the source text (and optional token-parallel factor files).
:param target_path: The target text.
:param source_vocab_paths: The source vocabulary path (and optional factor vocabulary paths).
:param target_vocab_path: The target vocabulary path.
:param shared_vocab: Whether the source and target vocabularies are shared.
:param num_words_source: Number of words in the source vocabulary.
:param word_min_count_source: Minimum frequency of words in the source vocabulary.
:param num_words_target: Number of words in the target vocabulary.
:param word_min_count_target: Minimum frequency of words in the target vocabulary.
:param pad_to_multiple_of: If not None, pads the vocabularies to a size that is the next multiple of this int.
:return: List of source vocabularies (for source and factors), and target vocabulary. | Below is the the instruction that describes the task:
### Input:
Returns vocabularies for source files (including factors) and target.
If the respective vocabulary paths are not None, the vocabulary is read from the path and returned.
Otherwise, it is built from the support and saved to the path.
:param source_paths: The path to the source text (and optional token-parallel factor files).
:param target_path: The target text.
:param source_vocab_paths: The source vocabulary path (and optional factor vocabulary paths).
:param target_vocab_path: The target vocabulary path.
:param shared_vocab: Whether the source and target vocabularies are shared.
:param num_words_source: Number of words in the source vocabulary.
:param word_min_count_source: Minimum frequency of words in the source vocabulary.
:param num_words_target: Number of words in the target vocabulary.
:param word_min_count_target: Minimum frequency of words in the target vocabulary.
:param pad_to_multiple_of: If not None, pads the vocabularies to a size that is the next multiple of this int.
:return: List of source vocabularies (for source and factors), and target vocabulary.
### Response:
def load_or_create_vocabs(source_paths: List[str],
target_path: str,
source_vocab_paths: List[Optional[str]],
target_vocab_path: Optional[str],
shared_vocab: bool,
num_words_source: Optional[int], word_min_count_source: int,
num_words_target: Optional[int], word_min_count_target: int,
pad_to_multiple_of: Optional[int] = None) -> Tuple[List[Vocab], Vocab]:
"""
Returns vocabularies for source files (including factors) and target.
If the respective vocabulary paths are not None, the vocabulary is read from the path and returned.
Otherwise, it is built from the support and saved to the path.
:param source_paths: The path to the source text (and optional token-parallel factor files).
:param target_path: The target text.
:param source_vocab_paths: The source vocabulary path (and optional factor vocabulary paths).
:param target_vocab_path: The target vocabulary path.
:param shared_vocab: Whether the source and target vocabularies are shared.
:param num_words_source: Number of words in the source vocabulary.
:param word_min_count_source: Minimum frequency of words in the source vocabulary.
:param num_words_target: Number of words in the target vocabulary.
:param word_min_count_target: Minimum frequency of words in the target vocabulary.
:param pad_to_multiple_of: If not None, pads the vocabularies to a size that is the next multiple of this int.
:return: List of source vocabularies (for source and factors), and target vocabulary.
"""
source_path, *source_factor_paths = source_paths
source_vocab_path, *source_factor_vocab_paths = source_vocab_paths
logger.info("=============================")
logger.info("Loading/creating vocabularies")
logger.info("=============================")
logger.info("(1) Surface form vocabularies (source & target)")
if shared_vocab:
if source_vocab_path and target_vocab_path:
vocab_source = vocab_from_json(source_vocab_path)
vocab_target = vocab_from_json(target_vocab_path)
utils.check_condition(are_identical(vocab_source, vocab_target),
"Shared vocabulary requires identical source and target vocabularies. "
"The vocabularies in %s and %s are not identical." % (source_vocab_path,
target_vocab_path))
elif source_vocab_path is None and target_vocab_path is None:
utils.check_condition(num_words_source == num_words_target,
"A shared vocabulary requires the number of source and target words to be the same.")
utils.check_condition(word_min_count_source == word_min_count_target,
"A shared vocabulary requires the minimum word count for source and target "
"to be the same.")
vocab_source = vocab_target = build_from_paths(paths=[source_path, target_path],
num_words=num_words_source,
min_count=word_min_count_source,
pad_to_multiple_of=pad_to_multiple_of)
else:
vocab_path = source_vocab_path if source_vocab_path is not None else target_vocab_path
logger.info("Using %s as a shared source/target vocabulary." % vocab_path)
vocab_source = vocab_target = vocab_from_json(vocab_path)
else:
vocab_source = load_or_create_vocab(source_path, source_vocab_path, num_words_source, word_min_count_source,
pad_to_multiple_of=pad_to_multiple_of)
vocab_target = load_or_create_vocab(target_path, target_vocab_path, num_words_target, word_min_count_target,
pad_to_multiple_of=pad_to_multiple_of)
vocab_source_factors = [] # type: List[Vocab]
if source_factor_paths:
logger.info("(2) Additional source factor vocabularies")
# source factor vocabs are always created
for factor_path, factor_vocab_path in zip(source_factor_paths, source_factor_vocab_paths):
vocab_source_factors.append(load_or_create_vocab(factor_path, factor_vocab_path,
num_words_source, word_min_count_source))
return [vocab_source] + vocab_source_factors, vocab_target |
def profile_cancel(self, query_id, timeout=10):
"""
Cancel the query that has the given queryid.
:param query_id: The UUID of the query in standard UUID format that Drill assigns to each query.
:param timeout: int
:return: pydrill.client.Result
"""
result = Result(*self.perform_request(**{
'method': 'GET',
'url': '/profiles/cancel/{0}'.format(query_id),
'params': {
'request_timeout': timeout
}
}))
return result | Cancel the query that has the given queryid.
:param query_id: The UUID of the query in standard UUID format that Drill assigns to each query.
:param timeout: int
:return: pydrill.client.Result | Below is the the instruction that describes the task:
### Input:
Cancel the query that has the given queryid.
:param query_id: The UUID of the query in standard UUID format that Drill assigns to each query.
:param timeout: int
:return: pydrill.client.Result
### Response:
def profile_cancel(self, query_id, timeout=10):
"""
Cancel the query that has the given queryid.
:param query_id: The UUID of the query in standard UUID format that Drill assigns to each query.
:param timeout: int
:return: pydrill.client.Result
"""
result = Result(*self.perform_request(**{
'method': 'GET',
'url': '/profiles/cancel/{0}'.format(query_id),
'params': {
'request_timeout': timeout
}
}))
return result |
def item_str_path_or(default, keys, dct):
"""
Given a string of path segments separated by ., splits them into an array. Int strings are converted
to numbers to serve as an array index
:param default: Value if any part yields None or undefined
:param keys: e.g. 'foo.bar.1.goo'
:param dct: e.g. dict(foo=dict(bar=[dict(goo='a'), dict(goo='b')])
:return: The resolved value or an error. E.g. for above the result would be b
"""
return item_path_or(default, map(lambda segment: int(segment) if isint(segment) else segment, keys.split('.')), dct) | Given a string of path segments separated by ., splits them into an array. Int strings are converted
to numbers to serve as an array index
:param default: Value if any part yields None or undefined
:param keys: e.g. 'foo.bar.1.goo'
:param dct: e.g. dict(foo=dict(bar=[dict(goo='a'), dict(goo='b')])
:return: The resolved value or an error. E.g. for above the result would be b | Below is the the instruction that describes the task:
### Input:
Given a string of path segments separated by ., splits them into an array. Int strings are converted
to numbers to serve as an array index
:param default: Value if any part yields None or undefined
:param keys: e.g. 'foo.bar.1.goo'
:param dct: e.g. dict(foo=dict(bar=[dict(goo='a'), dict(goo='b')])
:return: The resolved value or an error. E.g. for above the result would be b
### Response:
def item_str_path_or(default, keys, dct):
"""
Given a string of path segments separated by ., splits them into an array. Int strings are converted
to numbers to serve as an array index
:param default: Value if any part yields None or undefined
:param keys: e.g. 'foo.bar.1.goo'
:param dct: e.g. dict(foo=dict(bar=[dict(goo='a'), dict(goo='b')])
:return: The resolved value or an error. E.g. for above the result would be b
"""
return item_path_or(default, map(lambda segment: int(segment) if isint(segment) else segment, keys.split('.')), dct) |
def validate_csv(file):
"""Return dialect information about given csv file."""
try:
# Detect encoding and dialect
with file.open() as fp:
encoding = detect_encoding(fp, default='utf-8')
sample = fp.read(
current_app.config.get('PREVIEWER_CSV_VALIDATION_BYTES', 1024))
delimiter = csv.Sniffer().sniff(sample.decode(encoding)).delimiter
is_valid = True
except Exception as e:
current_app.logger.debug(
'File {0} is not valid CSV: {1}'.format(file.uri, e))
encoding = ''
delimiter = ''
is_valid = False
return {
'delimiter': delimiter,
'encoding': encoding,
'is_valid': is_valid
} | Return dialect information about given csv file. | Below is the the instruction that describes the task:
### Input:
Return dialect information about given csv file.
### Response:
def validate_csv(file):
"""Return dialect information about given csv file."""
try:
# Detect encoding and dialect
with file.open() as fp:
encoding = detect_encoding(fp, default='utf-8')
sample = fp.read(
current_app.config.get('PREVIEWER_CSV_VALIDATION_BYTES', 1024))
delimiter = csv.Sniffer().sniff(sample.decode(encoding)).delimiter
is_valid = True
except Exception as e:
current_app.logger.debug(
'File {0} is not valid CSV: {1}'.format(file.uri, e))
encoding = ''
delimiter = ''
is_valid = False
return {
'delimiter': delimiter,
'encoding': encoding,
'is_valid': is_valid
} |
def get_fields(self, strip_labels=False):
"""
Hook to dynamically change the fields that will be displayed
"""
if strip_labels:
return [
f[0] if type(f) in (tuple, list) else f for f in self.fields
]
return self.fields | Hook to dynamically change the fields that will be displayed | Below is the the instruction that describes the task:
### Input:
Hook to dynamically change the fields that will be displayed
### Response:
def get_fields(self, strip_labels=False):
"""
Hook to dynamically change the fields that will be displayed
"""
if strip_labels:
return [
f[0] if type(f) in (tuple, list) else f for f in self.fields
]
return self.fields |
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z,phi,t)
HISTORY:
2010-11-24 - Started - Bovy (NYU)
"""
#Calculate relevant time
if t < self._tform:
smooth= 0.
elif t < self._tsteady:
deltat= t-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else: #bar is fully on
smooth= 1.
r2= R**2.+z**2.
r= numpy.sqrt(r2)
if r <= self._rb:
return self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))\
*((r/self._rb)**3.-2.)*R**2./r2
else:
return -self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-
self._barphi))\
*(self._rb/r)**3.\
*R**2./r2 | NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z,phi,t)
HISTORY:
2010-11-24 - Started - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z,phi,t)
HISTORY:
2010-11-24 - Started - Bovy (NYU)
### Response:
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z,phi,t)
HISTORY:
2010-11-24 - Started - Bovy (NYU)
"""
#Calculate relevant time
if t < self._tform:
smooth= 0.
elif t < self._tsteady:
deltat= t-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else: #bar is fully on
smooth= 1.
r2= R**2.+z**2.
r= numpy.sqrt(r2)
if r <= self._rb:
return self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-self._barphi))\
*((r/self._rb)**3.-2.)*R**2./r2
else:
return -self._af*smooth*numpy.cos(2.*(phi-self._omegab*t-
self._barphi))\
*(self._rb/r)**3.\
*R**2./r2 |
def _prep(e):
"""
Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.
"""
if 'lastupdate' in e:
e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate']))
for k in ['farm', 'server', 'id', 'secret']:
if not k in e:
return e
e["url"] = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (e["farm"], e["server"], e["id"], e["secret"])
return e | Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes. | Below is the the instruction that describes the task:
### Input:
Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.
### Response:
def _prep(e):
"""
Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.
"""
if 'lastupdate' in e:
e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate']))
for k in ['farm', 'server', 'id', 'secret']:
if not k in e:
return e
e["url"] = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (e["farm"], e["server"], e["id"], e["secret"])
return e |
def get_state(cls, clz):
"""
Retrieve the state of a given Class.
:param clz: types.ClassType
:return: Class state.
:rtype: dict
"""
if clz not in cls.__shared_state:
cls.__shared_state[clz] = (
clz.init_state() if hasattr(clz, "init_state") else {}
)
return cls.__shared_state[clz] | Retrieve the state of a given Class.
:param clz: types.ClassType
:return: Class state.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Retrieve the state of a given Class.
:param clz: types.ClassType
:return: Class state.
:rtype: dict
### Response:
def get_state(cls, clz):
"""
Retrieve the state of a given Class.
:param clz: types.ClassType
:return: Class state.
:rtype: dict
"""
if clz not in cls.__shared_state:
cls.__shared_state[clz] = (
clz.init_state() if hasattr(clz, "init_state") else {}
)
return cls.__shared_state[clz] |
def update_monitor(self):
"""Update the monitor and monitor status from the ZM server."""
result = self._client.get_state(self._monitor_url)
self._raw_result = result['monitor'] | Update the monitor and monitor status from the ZM server. | Below is the the instruction that describes the task:
### Input:
Update the monitor and monitor status from the ZM server.
### Response:
def update_monitor(self):
"""Update the monitor and monitor status from the ZM server."""
result = self._client.get_state(self._monitor_url)
self._raw_result = result['monitor'] |
def unregister(self, provider):
'''Unregister an existing data provider.
*provider* must be an instance of DataProvider.
If provider name is already available, it will be replaced.
'''
if isinstance(provider, type):
provider = provider()
if isinstance(provider, DataProvider):
provider = provider.code
return self.pop(str(provider).upper(), None) | Unregister an existing data provider.
*provider* must be an instance of DataProvider.
If provider name is already available, it will be replaced. | Below is the the instruction that describes the task:
### Input:
Unregister an existing data provider.
*provider* must be an instance of DataProvider.
If provider name is already available, it will be replaced.
### Response:
def unregister(self, provider):
'''Unregister an existing data provider.
*provider* must be an instance of DataProvider.
If provider name is already available, it will be replaced.
'''
if isinstance(provider, type):
provider = provider()
if isinstance(provider, DataProvider):
provider = provider.code
return self.pop(str(provider).upper(), None) |
def remove(self, path, recursive=True, skip_trash=False):
"""
Use snakebite.delete, if available.
:param path: delete-able file(s) or directory(ies)
:type path: either a string or a sequence of strings
:param recursive: delete directories trees like \\*nix: rm -r
:type recursive: boolean, default is True
:param skip_trash: do or don't move deleted items into the trash first
:type skip_trash: boolean, default is False (use trash)
:return: list of deleted items
"""
return list(self.get_bite().delete(self.list_path(path), recurse=recursive)) | Use snakebite.delete, if available.
:param path: delete-able file(s) or directory(ies)
:type path: either a string or a sequence of strings
:param recursive: delete directories trees like \\*nix: rm -r
:type recursive: boolean, default is True
:param skip_trash: do or don't move deleted items into the trash first
:type skip_trash: boolean, default is False (use trash)
:return: list of deleted items | Below is the the instruction that describes the task:
### Input:
Use snakebite.delete, if available.
:param path: delete-able file(s) or directory(ies)
:type path: either a string or a sequence of strings
:param recursive: delete directories trees like \\*nix: rm -r
:type recursive: boolean, default is True
:param skip_trash: do or don't move deleted items into the trash first
:type skip_trash: boolean, default is False (use trash)
:return: list of deleted items
### Response:
def remove(self, path, recursive=True, skip_trash=False):
"""
Use snakebite.delete, if available.
:param path: delete-able file(s) or directory(ies)
:type path: either a string or a sequence of strings
:param recursive: delete directories trees like \\*nix: rm -r
:type recursive: boolean, default is True
:param skip_trash: do or don't move deleted items into the trash first
:type skip_trash: boolean, default is False (use trash)
:return: list of deleted items
"""
return list(self.get_bite().delete(self.list_path(path), recurse=recursive)) |
def stop(self):
'''Stop listener.'''
self.logger.debug("Stopping listener.")
self.listener.stop()
if self.thread is not None:
self.thread.join()
self.thread = None
self.logger.debug("Listener stopped.") | Stop listener. | Below is the the instruction that describes the task:
### Input:
Stop listener.
### Response:
def stop(self):
'''Stop listener.'''
self.logger.debug("Stopping listener.")
self.listener.stop()
if self.thread is not None:
self.thread.join()
self.thread = None
self.logger.debug("Listener stopped.") |
def _expand_filename(self, line):
"""expands the filename if there is a . as leading path"""
# expand .
newline = line
path = os.getcwd()
if newline.startswith("."):
newline = newline.replace(".", path, 1)
# expand ~
newline = os.path.expanduser(newline)
return newline | expands the filename if there is a . as leading path | Below is the the instruction that describes the task:
### Input:
expands the filename if there is a . as leading path
### Response:
def _expand_filename(self, line):
"""expands the filename if there is a . as leading path"""
# expand .
newline = line
path = os.getcwd()
if newline.startswith("."):
newline = newline.replace(".", path, 1)
# expand ~
newline = os.path.expanduser(newline)
return newline |
def save(self, to_save, **kwargs):
"""save method
"""
check = kwargs.pop('check', True)
if check:
self._valid_record(to_save)
if '_id' in to_save:
self.__collect.replace_one(
{'_id': to_save['_id']}, to_save, **kwargs)
return to_save['_id']
else:
result = self.__collect.insert_one(to_save, **kwargs)
return result.inserted_id | save method | Below is the the instruction that describes the task:
### Input:
save method
### Response:
def save(self, to_save, **kwargs):
"""save method
"""
check = kwargs.pop('check', True)
if check:
self._valid_record(to_save)
if '_id' in to_save:
self.__collect.replace_one(
{'_id': to_save['_id']}, to_save, **kwargs)
return to_save['_id']
else:
result = self.__collect.insert_one(to_save, **kwargs)
return result.inserted_id |
def insert(self, index, filename):
"""Insert a new subclass with filename at index, mockup __module__."""
base = self._base
dct = {'__module__': base.__module__, 'filename': filename, '_stack': self}
cls = type(base.__name__, (base,), dct)
self._map[cls.filename] = cls
self._classes.insert(index, cls) | Insert a new subclass with filename at index, mockup __module__. | Below is the the instruction that describes the task:
### Input:
Insert a new subclass with filename at index, mockup __module__.
### Response:
def insert(self, index, filename):
"""Insert a new subclass with filename at index, mockup __module__."""
base = self._base
dct = {'__module__': base.__module__, 'filename': filename, '_stack': self}
cls = type(base.__name__, (base,), dct)
self._map[cls.filename] = cls
self._classes.insert(index, cls) |
def process_file(pyfile_name):
'''Process a Python source file with Google style docstring comments.
Reads file header comment, function definitions, function docstrings.
Returns dictionary encapsulation for subsequent writing.
Args:
pyfile_name (str): file name to read.
Returns:
Dictionary object containing summary comment, with a list of entries for each function.
'''
print('Processing file: ' + pyfile_name)
# load the source file
with open(pyfile_name) as fpyfile:
pyfile_str = fpyfile.readlines()
# meta-doc for a source file
file_dict = {'source_file': pyfile_name.replace('\\', '/')}
# get file summary line at the top of the file
if pyfile_str[0].startswith("'''"):
file_dict['summary_comment'] = pyfile_str[0][:-1].strip("'")
else:
file_dict['summary_comment'] = pyfile_name
file_dict['functions'] = []
# find every function definition
for line in pyfile_str:
# process definition
if line.startswith('def '):
line_num = pyfile_str.index(line)
fn_def = line[4:]
fn_name = fn_def.split('(')[0]
function_info = {'name': fn_name}
extract = extract_code(':', fn_def, pyfile_str, line_num)
function_info['definition'] = extract['current_str']
# process docstring
line_num = extract['line_num'] + 1
doc_line = pyfile_str[line_num]
if doc_line.startswith(" '''"):
comment_str = doc_line[7:]
extract = extract_code(
"'''", comment_str, pyfile_str, line_num)
function_info['comments'] = extract['current_str']
file_dict['functions'].append(function_info)
return file_dict | Process a Python source file with Google style docstring comments.
Reads file header comment, function definitions, function docstrings.
Returns dictionary encapsulation for subsequent writing.
Args:
pyfile_name (str): file name to read.
Returns:
Dictionary object containing summary comment, with a list of entries for each function. | Below is the the instruction that describes the task:
### Input:
Process a Python source file with Google style docstring comments.
Reads file header comment, function definitions, function docstrings.
Returns dictionary encapsulation for subsequent writing.
Args:
pyfile_name (str): file name to read.
Returns:
Dictionary object containing summary comment, with a list of entries for each function.
### Response:
def process_file(pyfile_name):
'''Process a Python source file with Google style docstring comments.
Reads file header comment, function definitions, function docstrings.
Returns dictionary encapsulation for subsequent writing.
Args:
pyfile_name (str): file name to read.
Returns:
Dictionary object containing summary comment, with a list of entries for each function.
'''
print('Processing file: ' + pyfile_name)
# load the source file
with open(pyfile_name) as fpyfile:
pyfile_str = fpyfile.readlines()
# meta-doc for a source file
file_dict = {'source_file': pyfile_name.replace('\\', '/')}
# get file summary line at the top of the file
if pyfile_str[0].startswith("'''"):
file_dict['summary_comment'] = pyfile_str[0][:-1].strip("'")
else:
file_dict['summary_comment'] = pyfile_name
file_dict['functions'] = []
# find every function definition
for line in pyfile_str:
# process definition
if line.startswith('def '):
line_num = pyfile_str.index(line)
fn_def = line[4:]
fn_name = fn_def.split('(')[0]
function_info = {'name': fn_name}
extract = extract_code(':', fn_def, pyfile_str, line_num)
function_info['definition'] = extract['current_str']
# process docstring
line_num = extract['line_num'] + 1
doc_line = pyfile_str[line_num]
if doc_line.startswith(" '''"):
comment_str = doc_line[7:]
extract = extract_code(
"'''", comment_str, pyfile_str, line_num)
function_info['comments'] = extract['current_str']
file_dict['functions'].append(function_info)
return file_dict |
def _init_table(self):
"""
Initialize the observation table.
"""
self.observation_table.sm_vector.append(self.epsilon)
self.observation_table.smi_vector = list(self.alphabet)
self.observation_table.em_vector.append(self.epsilon)
self._fill_table_entry(self.epsilon, self.epsilon)
for s in self.observation_table.smi_vector:
self._fill_table_entry(s, self.epsilon) | Initialize the observation table. | Below is the the instruction that describes the task:
### Input:
Initialize the observation table.
### Response:
def _init_table(self):
"""
Initialize the observation table.
"""
self.observation_table.sm_vector.append(self.epsilon)
self.observation_table.smi_vector = list(self.alphabet)
self.observation_table.em_vector.append(self.epsilon)
self._fill_table_entry(self.epsilon, self.epsilon)
for s in self.observation_table.smi_vector:
self._fill_table_entry(s, self.epsilon) |
def make_if_statement(instr, queue, stack, context):
"""
Make an ast.If block from a POP_JUMP_IF_TRUE or POP_JUMP_IF_FALSE.
"""
test_expr = make_expr(stack)
if isinstance(instr, instrs.POP_JUMP_IF_TRUE):
test_expr = ast.UnaryOp(op=ast.Not(), operand=test_expr)
first_block = popwhile(op.is_not(instr.arg), queue, side='left')
if isinstance(first_block[-1], instrs.RETURN_VALUE):
body = instrs_to_body(first_block, context)
return ast.If(test=test_expr, body=body, orelse=[])
jump_to_end = expect(
first_block.pop(), instrs.JUMP_FORWARD, "at end of if-block"
)
body = instrs_to_body(first_block, context)
# First instruction after the whole if-block.
end = jump_to_end.arg
if instr.arg is jump_to_end.arg:
orelse = []
else:
orelse = instrs_to_body(
popwhile(op.is_not(end), queue, side='left'),
context,
)
return ast.If(test=test_expr, body=body, orelse=orelse) | Make an ast.If block from a POP_JUMP_IF_TRUE or POP_JUMP_IF_FALSE. | Below is the the instruction that describes the task:
### Input:
Make an ast.If block from a POP_JUMP_IF_TRUE or POP_JUMP_IF_FALSE.
### Response:
def make_if_statement(instr, queue, stack, context):
"""
Make an ast.If block from a POP_JUMP_IF_TRUE or POP_JUMP_IF_FALSE.
"""
test_expr = make_expr(stack)
if isinstance(instr, instrs.POP_JUMP_IF_TRUE):
test_expr = ast.UnaryOp(op=ast.Not(), operand=test_expr)
first_block = popwhile(op.is_not(instr.arg), queue, side='left')
if isinstance(first_block[-1], instrs.RETURN_VALUE):
body = instrs_to_body(first_block, context)
return ast.If(test=test_expr, body=body, orelse=[])
jump_to_end = expect(
first_block.pop(), instrs.JUMP_FORWARD, "at end of if-block"
)
body = instrs_to_body(first_block, context)
# First instruction after the whole if-block.
end = jump_to_end.arg
if instr.arg is jump_to_end.arg:
orelse = []
else:
orelse = instrs_to_body(
popwhile(op.is_not(end), queue, side='left'),
context,
)
return ast.If(test=test_expr, body=body, orelse=orelse) |
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'cloudstack',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
# pylint: disable=not-callable
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_location(conn, vm_),
}
# pylint: enable=not-callable
sg = get_security_groups(conn, vm_)
if sg is not False:
kwargs['ex_security_groups'] = sg
if get_keypair(vm_) is not False:
kwargs['ex_keyname'] = get_keypair(vm_)
if get_networkid(vm_) is not False:
kwargs['networkids'] = get_networkid(vm_)
kwargs['networks'] = ( # The only attr that is used is 'id'.
CloudStackNetwork(None, None, None,
kwargs['networkids'],
None, None),
)
if get_project(conn, vm_) is not False:
kwargs['project'] = get_project(conn, vm_)
event_data = kwargs.copy()
event_data['image'] = kwargs['image'].name
event_data['size'] = kwargs['size'].name
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args={
'kwargs': __utils__['cloud.filter_event'](
'requesting',
event_data,
['name', 'profile', 'provider', 'driver', 'image', 'size'],
),
},
transport=__opts__['transport']
)
displayname = cloudstack_displayname(vm_)
if displayname:
kwargs['ex_displayname'] = displayname
else:
kwargs['ex_displayname'] = kwargs['name']
volumes = {}
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
for ex_blockdevicemapping in ex_blockdevicemappings:
if 'VirtualName' not in ex_blockdevicemapping:
ex_blockdevicemapping['VirtualName'] = '{0}-{1}'.format(vm_['name'], len(volumes))
__utils__['cloud.fire_event'](
'event',
'requesting volume',
'salt/cloud/{0}/requesting'.format(ex_blockdevicemapping['VirtualName']),
sock_dir=__opts__['sock_dir'],
args={'kwargs': {'name': ex_blockdevicemapping['VirtualName'],
'device': ex_blockdevicemapping['DeviceName'],
'size': ex_blockdevicemapping['VolumeSize']}},
)
try:
volumes[ex_blockdevicemapping['DeviceName']] = conn.create_volume(
ex_blockdevicemapping['VolumeSize'],
ex_blockdevicemapping['VirtualName']
)
except Exception as exc:
log.error(
'Error creating volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'requesting a volume: \n%s',
ex_blockdevicemapping['VirtualName'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
else:
ex_blockdevicemapping = {}
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
for device_name in six.iterkeys(volumes):
try:
conn.attach_volume(data, volumes[device_name], device_name)
except Exception as exc:
log.error(
'Error attaching volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'attach a volume: \n%s',
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc,
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return False
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
vm_['ssh_host'] = get_ip(data)
vm_['password'] = data.extra['password']
vm_['key_filename'] = get_key()
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
return ret | Create a single VM from a data dict | Below is the the instruction that describes the task:
### Input:
Create a single VM from a data dict
### Response:
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'cloudstack',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', vm_['name'])
conn = get_conn()
# pylint: disable=not-callable
kwargs = {
'name': vm_['name'],
'image': get_image(conn, vm_),
'size': get_size(conn, vm_),
'location': get_location(conn, vm_),
}
# pylint: enable=not-callable
sg = get_security_groups(conn, vm_)
if sg is not False:
kwargs['ex_security_groups'] = sg
if get_keypair(vm_) is not False:
kwargs['ex_keyname'] = get_keypair(vm_)
if get_networkid(vm_) is not False:
kwargs['networkids'] = get_networkid(vm_)
kwargs['networks'] = ( # The only attr that is used is 'id'.
CloudStackNetwork(None, None, None,
kwargs['networkids'],
None, None),
)
if get_project(conn, vm_) is not False:
kwargs['project'] = get_project(conn, vm_)
event_data = kwargs.copy()
event_data['image'] = kwargs['image'].name
event_data['size'] = kwargs['size'].name
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args={
'kwargs': __utils__['cloud.filter_event'](
'requesting',
event_data,
['name', 'profile', 'provider', 'driver', 'image', 'size'],
),
},
transport=__opts__['transport']
)
displayname = cloudstack_displayname(vm_)
if displayname:
kwargs['ex_displayname'] = displayname
else:
kwargs['ex_displayname'] = kwargs['name']
volumes = {}
ex_blockdevicemappings = block_device_mappings(vm_)
if ex_blockdevicemappings:
for ex_blockdevicemapping in ex_blockdevicemappings:
if 'VirtualName' not in ex_blockdevicemapping:
ex_blockdevicemapping['VirtualName'] = '{0}-{1}'.format(vm_['name'], len(volumes))
__utils__['cloud.fire_event'](
'event',
'requesting volume',
'salt/cloud/{0}/requesting'.format(ex_blockdevicemapping['VirtualName']),
sock_dir=__opts__['sock_dir'],
args={'kwargs': {'name': ex_blockdevicemapping['VirtualName'],
'device': ex_blockdevicemapping['DeviceName'],
'size': ex_blockdevicemapping['VolumeSize']}},
)
try:
volumes[ex_blockdevicemapping['DeviceName']] = conn.create_volume(
ex_blockdevicemapping['VolumeSize'],
ex_blockdevicemapping['VirtualName']
)
except Exception as exc:
log.error(
'Error creating volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'requesting a volume: \n%s',
ex_blockdevicemapping['VirtualName'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
else:
ex_blockdevicemapping = {}
try:
data = conn.create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'run the initial deployment: \n%s',
vm_['name'], exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
for device_name in six.iterkeys(volumes):
try:
conn.attach_volume(data, volumes[device_name], device_name)
except Exception as exc:
log.error(
'Error attaching volume %s on CLOUDSTACK\n\n'
'The following exception was thrown by libcloud when trying to '
'attach a volume: \n%s',
ex_blockdevicemapping.get('VirtualName', 'UNKNOWN'), exc,
# Show the traceback if the debug logging level is enabled
exc_info=log.isEnabledFor(logging.DEBUG)
)
return False
ssh_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, default='root'
)
vm_['ssh_host'] = get_ip(data)
vm_['password'] = data.extra['password']
vm_['key_filename'] = get_key()
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data.__dict__)
if 'password' in data.extra:
del data.extra['password']
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'], pprint.pformat(data.__dict__)
)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
sock_dir=__opts__['sock_dir'],
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
transport=__opts__['transport']
)
return ret |
def verify_upload(self):
"""
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
"""
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content) | Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful. | Below is the the instruction that describes the task:
### Input:
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
### Response:
def verify_upload(self):
"""
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
"""
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content) |
def load(self):
"""Fetches the MAL media page and sets the current media's attributes.
:rtype: :class:`.Media`
:return: current media object.
"""
media_page = self.session.session.get(u'http://myanimelist.net/' + self.__class__.__name__.lower() + u'/' + str(self.id)).text
self.set(self.parse(utilities.get_clean_dom(media_page)))
return self | Fetches the MAL media page and sets the current media's attributes.
:rtype: :class:`.Media`
:return: current media object. | Below is the the instruction that describes the task:
### Input:
Fetches the MAL media page and sets the current media's attributes.
:rtype: :class:`.Media`
:return: current media object.
### Response:
def load(self):
"""Fetches the MAL media page and sets the current media's attributes.
:rtype: :class:`.Media`
:return: current media object.
"""
media_page = self.session.session.get(u'http://myanimelist.net/' + self.__class__.__name__.lower() + u'/' + str(self.id)).text
self.set(self.parse(utilities.get_clean_dom(media_page)))
return self |
def init_live_reload(run):
"""
Start the live reload task
:param run: run the task inside of this function or just create it
"""
from asyncio import get_event_loop
from ._live_reload import start_child
loop = get_event_loop()
if run:
loop.run_until_complete(start_child())
else:
get_event_loop().create_task(start_child()) | Start the live reload task
:param run: run the task inside of this function or just create it | Below is the the instruction that describes the task:
### Input:
Start the live reload task
:param run: run the task inside of this function or just create it
### Response:
def init_live_reload(run):
"""
Start the live reload task
:param run: run the task inside of this function or just create it
"""
from asyncio import get_event_loop
from ._live_reload import start_child
loop = get_event_loop()
if run:
loop.run_until_complete(start_child())
else:
get_event_loop().create_task(start_child()) |
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes) | Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used. | Below is the the instruction that describes the task:
### Input:
Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
### Response:
def scatter(self, projection=None, c=None, cmap='rainbow', linewidth=0.0,
edgecolor='k', axes=None, colorbar=True, s=10, **kwargs):
"""Display a scatter plot.
Displays a scatter plot using the SAM projection or another input
projection with or without annotations.
Parameters
----------
projection - ndarray of floats, optional, default None
An N x 2 matrix, where N is the number of data points. If None,
use an existing SAM projection (default t-SNE). Can take on values
'umap' or 'tsne' to specify either the SAM UMAP embedding or
SAM t-SNE embedding.
c - ndarray or str, optional, default None
Colors for each cell in the scatter plot. Can be a vector of
floats or strings for cell annotations. Can also be a key
for sam.adata.obs (i.e. 'louvain_clusters').
axes - matplotlib axis, optional, default None
Plot output to the specified, existing axes. If None, create new
figure window.
cmap - string, optional, default 'rainbow'
The colormap to use for the input color values.
colorbar - bool, optional default True
If True, display a colorbar indicating which values / annotations
correspond to which color in the scatter plot.
Keyword arguments -
All other keyword arguments that can be passed into
matplotlib.pyplot.scatter can be used.
"""
if (not PLOTTING):
print("matplotlib not installed!")
else:
if(isinstance(projection, str)):
try:
dt = self.adata.obsm[projection]
except KeyError:
print('Please create a projection first using run_umap or'
'run_tsne')
elif(projection is None):
try:
dt = self.adata.obsm['X_umap']
except KeyError:
try:
dt = self.adata.obsm['X_tsne']
except KeyError:
print("Please create either a t-SNE or UMAP projection"
"first.")
return
else:
dt = projection
if(axes is None):
plt.figure()
axes = plt.gca()
if(c is None):
plt.scatter(dt[:, 0], dt[:, 1], s=s,
linewidth=linewidth, edgecolor=edgecolor, **kwargs)
else:
if isinstance(c, str):
try:
c = self.adata.obs[c].get_values()
except KeyError:
0 # do nothing
if((isinstance(c[0], str) or isinstance(c[0], np.str_)) and
(isinstance(c, np.ndarray) or isinstance(c, list))):
i = ut.convert_annotations(c)
ui, ai = np.unique(i, return_index=True)
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
cbar = plt.colorbar(cax, ax=axes, ticks=ui)
cbar.ax.set_yticklabels(c[ai])
else:
if not (isinstance(c, np.ndarray) or isinstance(c, list)):
colorbar = False
i = c
cax = axes.scatter(dt[:,0], dt[:,1], c=i, cmap=cmap, s=s,
linewidth=linewidth,
edgecolor=edgecolor,
**kwargs)
if(colorbar):
plt.colorbar(cax, ax=axes) |
def filter(args):
"""
%prog filter *.consensus.fasta
Filter consensus sequence with min cluster size.
"""
from jcvi.formats.fasta import Fasta, SeqIO
p = OptionParser(filter.__doc__)
p.add_option("--minsize", default=2, type="int",
help="Minimum cluster size")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fastafiles = args
minsize = opts.minsize
totalreads = totalassembled = 0
fw = must_open(opts.outfile, "w")
for i, fastafile in enumerate(fastafiles):
f = Fasta(fastafile, lazy=True)
pf = "s{0:03d}".format(i)
nreads = nsingletons = nclusters = 0
for desc, rec in f.iterdescriptions_ordered():
nclusters += 1
if desc.startswith("singleton"):
nsingletons += 1
nreads += 1
continue
# consensus_for_cluster_0 with 63 sequences
name, w, size, seqs = desc.split()
assert w == "with"
size = int(size)
nreads += size
if size < minsize:
continue
rec.description = rec.description.split(None, 1)[-1]
rec.id = pf + "_" + rec.id
SeqIO.write(rec, fw, "fasta")
logging.debug("Scanned {0} clusters with {1} reads ..".\
format(nclusters, nreads))
cclusters, creads = nclusters - nsingletons, nreads - nsingletons
logging.debug("Saved {0} clusters (min={1}) with {2} reads (avg:{3}) [{4}]".\
format(cclusters, minsize, creads, creads / cclusters, pf))
totalreads += nreads
totalassembled += nreads - nsingletons
logging.debug("Total assembled: {0}".\
format(percentage(totalassembled, totalreads))) | %prog filter *.consensus.fasta
Filter consensus sequence with min cluster size. | Below is the the instruction that describes the task:
### Input:
%prog filter *.consensus.fasta
Filter consensus sequence with min cluster size.
### Response:
def filter(args):
"""
%prog filter *.consensus.fasta
Filter consensus sequence with min cluster size.
"""
from jcvi.formats.fasta import Fasta, SeqIO
p = OptionParser(filter.__doc__)
p.add_option("--minsize", default=2, type="int",
help="Minimum cluster size")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fastafiles = args
minsize = opts.minsize
totalreads = totalassembled = 0
fw = must_open(opts.outfile, "w")
for i, fastafile in enumerate(fastafiles):
f = Fasta(fastafile, lazy=True)
pf = "s{0:03d}".format(i)
nreads = nsingletons = nclusters = 0
for desc, rec in f.iterdescriptions_ordered():
nclusters += 1
if desc.startswith("singleton"):
nsingletons += 1
nreads += 1
continue
# consensus_for_cluster_0 with 63 sequences
name, w, size, seqs = desc.split()
assert w == "with"
size = int(size)
nreads += size
if size < minsize:
continue
rec.description = rec.description.split(None, 1)[-1]
rec.id = pf + "_" + rec.id
SeqIO.write(rec, fw, "fasta")
logging.debug("Scanned {0} clusters with {1} reads ..".\
format(nclusters, nreads))
cclusters, creads = nclusters - nsingletons, nreads - nsingletons
logging.debug("Saved {0} clusters (min={1}) with {2} reads (avg:{3}) [{4}]".\
format(cclusters, minsize, creads, creads / cclusters, pf))
totalreads += nreads
totalassembled += nreads - nsingletons
logging.debug("Total assembled: {0}".\
format(percentage(totalassembled, totalreads))) |
def ref(self, ref):
"""
Filters the current DataFrame to only contain those rows whose reference is the given
reference name.
>>> heads_df = refs_df.ref('refs/heads/HEAD')
:param ref: Reference to get
:type ref: str
:rtype: ReferencesDataFrame
"""
return ReferencesDataFrame(self.filter(self.name == ref)._jdf,
self._session, self._implicits) | Filters the current DataFrame to only contain those rows whose reference is the given
reference name.
>>> heads_df = refs_df.ref('refs/heads/HEAD')
:param ref: Reference to get
:type ref: str
:rtype: ReferencesDataFrame | Below is the the instruction that describes the task:
### Input:
Filters the current DataFrame to only contain those rows whose reference is the given
reference name.
>>> heads_df = refs_df.ref('refs/heads/HEAD')
:param ref: Reference to get
:type ref: str
:rtype: ReferencesDataFrame
### Response:
def ref(self, ref):
"""
Filters the current DataFrame to only contain those rows whose reference is the given
reference name.
>>> heads_df = refs_df.ref('refs/heads/HEAD')
:param ref: Reference to get
:type ref: str
:rtype: ReferencesDataFrame
"""
return ReferencesDataFrame(self.filter(self.name == ref)._jdf,
self._session, self._implicits) |
def _get_indices(values, selected, tolerance):
"""Get indices based on user-selected values.
Parameters
----------
values : ndarray (any dtype)
values present in the axis.
selected : ndarray (any dtype) or tuple or list
values selected by the user
tolerance : float
avoid rounding errors.
Returns
-------
idx_data : list of int
indices of row/column to select the data
idx_output : list of int
indices of row/column to copy into output
Notes
-----
This function is probably not very fast, but it's pretty robust. It keeps
the order, which is extremely important.
If you use values in the self.axis, you don't need to specify tolerance.
However, if you specify arbitrary points, floating point errors might
affect the actual values. Of course, using tolerance is much slower.
Maybe tolerance should be part of Select instead of here.
"""
idx_data = []
idx_output = []
for idx_of_selected, one_selected in enumerate(selected):
if tolerance is None or values.dtype.kind == 'U':
idx_of_data = where(values == one_selected)[0]
else:
idx_of_data = where(abs(values - one_selected) <= tolerance)[0] # actual use min
if len(idx_of_data) > 0:
idx_data.append(idx_of_data[0])
idx_output.append(idx_of_selected)
return idx_data, idx_output | Get indices based on user-selected values.
Parameters
----------
values : ndarray (any dtype)
values present in the axis.
selected : ndarray (any dtype) or tuple or list
values selected by the user
tolerance : float
avoid rounding errors.
Returns
-------
idx_data : list of int
indices of row/column to select the data
idx_output : list of int
indices of row/column to copy into output
Notes
-----
This function is probably not very fast, but it's pretty robust. It keeps
the order, which is extremely important.
If you use values in the self.axis, you don't need to specify tolerance.
However, if you specify arbitrary points, floating point errors might
affect the actual values. Of course, using tolerance is much slower.
Maybe tolerance should be part of Select instead of here. | Below is the the instruction that describes the task:
### Input:
Get indices based on user-selected values.
Parameters
----------
values : ndarray (any dtype)
values present in the axis.
selected : ndarray (any dtype) or tuple or list
values selected by the user
tolerance : float
avoid rounding errors.
Returns
-------
idx_data : list of int
indices of row/column to select the data
idx_output : list of int
indices of row/column to copy into output
Notes
-----
This function is probably not very fast, but it's pretty robust. It keeps
the order, which is extremely important.
If you use values in the self.axis, you don't need to specify tolerance.
However, if you specify arbitrary points, floating point errors might
affect the actual values. Of course, using tolerance is much slower.
Maybe tolerance should be part of Select instead of here.
### Response:
def _get_indices(values, selected, tolerance):
"""Get indices based on user-selected values.
Parameters
----------
values : ndarray (any dtype)
values present in the axis.
selected : ndarray (any dtype) or tuple or list
values selected by the user
tolerance : float
avoid rounding errors.
Returns
-------
idx_data : list of int
indices of row/column to select the data
idx_output : list of int
indices of row/column to copy into output
Notes
-----
This function is probably not very fast, but it's pretty robust. It keeps
the order, which is extremely important.
If you use values in the self.axis, you don't need to specify tolerance.
However, if you specify arbitrary points, floating point errors might
affect the actual values. Of course, using tolerance is much slower.
Maybe tolerance should be part of Select instead of here.
"""
idx_data = []
idx_output = []
for idx_of_selected, one_selected in enumerate(selected):
if tolerance is None or values.dtype.kind == 'U':
idx_of_data = where(values == one_selected)[0]
else:
idx_of_data = where(abs(values - one_selected) <= tolerance)[0] # actual use min
if len(idx_of_data) > 0:
idx_data.append(idx_of_data[0])
idx_output.append(idx_of_selected)
return idx_data, idx_output |
def _path_parts(self, pth):
"""Return a list of all directories in the path ``pth``.
"""
res = re.split(r"[\\/]", pth)
if res and os.path.splitdrive(res[0]) == (res[0], ''):
res[0] += os.path.sep
return res | Return a list of all directories in the path ``pth``. | Below is the the instruction that describes the task:
### Input:
Return a list of all directories in the path ``pth``.
### Response:
def _path_parts(self, pth):
"""Return a list of all directories in the path ``pth``.
"""
res = re.split(r"[\\/]", pth)
if res and os.path.splitdrive(res[0]) == (res[0], ''):
res[0] += os.path.sep
return res |
def resolve(self, graph):
"""
Resolve a scoped component, respecting the graph cache.
"""
cached = graph.get(self.scoped_key)
if cached:
return cached
component = self.create(graph)
graph.assign(self.scoped_key, component)
return component | Resolve a scoped component, respecting the graph cache. | Below is the the instruction that describes the task:
### Input:
Resolve a scoped component, respecting the graph cache.
### Response:
def resolve(self, graph):
"""
Resolve a scoped component, respecting the graph cache.
"""
cached = graph.get(self.scoped_key)
if cached:
return cached
component = self.create(graph)
graph.assign(self.scoped_key, component)
return component |
def update(self, friendly_name=values.unset, customer_name=values.unset,
street=values.unset, city=values.unset, region=values.unset,
postal_code=values.unset, emergency_enabled=values.unset,
auto_correct_address=values.unset):
"""
Update the AddressInstance
:param unicode friendly_name: A string to describe the resource
:param unicode customer_name: The name to associate with the address
:param unicode street: The number and street address of the address
:param unicode city: The city of the address
:param unicode region: The state or region of the address
:param unicode postal_code: The postal code of the address
:param bool emergency_enabled: Whether to enable emergency calling on the address
:param bool auto_correct_address: Whether we should automatically correct the address
:returns: Updated AddressInstance
:rtype: twilio.rest.api.v2010.account.address.AddressInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
customer_name=customer_name,
street=street,
city=city,
region=region,
postal_code=postal_code,
emergency_enabled=emergency_enabled,
auto_correct_address=auto_correct_address,
) | Update the AddressInstance
:param unicode friendly_name: A string to describe the resource
:param unicode customer_name: The name to associate with the address
:param unicode street: The number and street address of the address
:param unicode city: The city of the address
:param unicode region: The state or region of the address
:param unicode postal_code: The postal code of the address
:param bool emergency_enabled: Whether to enable emergency calling on the address
:param bool auto_correct_address: Whether we should automatically correct the address
:returns: Updated AddressInstance
:rtype: twilio.rest.api.v2010.account.address.AddressInstance | Below is the the instruction that describes the task:
### Input:
Update the AddressInstance
:param unicode friendly_name: A string to describe the resource
:param unicode customer_name: The name to associate with the address
:param unicode street: The number and street address of the address
:param unicode city: The city of the address
:param unicode region: The state or region of the address
:param unicode postal_code: The postal code of the address
:param bool emergency_enabled: Whether to enable emergency calling on the address
:param bool auto_correct_address: Whether we should automatically correct the address
:returns: Updated AddressInstance
:rtype: twilio.rest.api.v2010.account.address.AddressInstance
### Response:
def update(self, friendly_name=values.unset, customer_name=values.unset,
street=values.unset, city=values.unset, region=values.unset,
postal_code=values.unset, emergency_enabled=values.unset,
auto_correct_address=values.unset):
"""
Update the AddressInstance
:param unicode friendly_name: A string to describe the resource
:param unicode customer_name: The name to associate with the address
:param unicode street: The number and street address of the address
:param unicode city: The city of the address
:param unicode region: The state or region of the address
:param unicode postal_code: The postal code of the address
:param bool emergency_enabled: Whether to enable emergency calling on the address
:param bool auto_correct_address: Whether we should automatically correct the address
:returns: Updated AddressInstance
:rtype: twilio.rest.api.v2010.account.address.AddressInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
customer_name=customer_name,
street=street,
city=city,
region=region,
postal_code=postal_code,
emergency_enabled=emergency_enabled,
auto_correct_address=auto_correct_address,
) |
def getConnectedSynapses(self, columnIndex, connectedSynapses):
"""
:param connectedSynapses: (list) will be overwritten
:returns: (iter) the connected synapses for a given column.
``connectedSynapses`` size must match the number of inputs"""
assert(columnIndex < self._numColumns)
connectedSynapses[:] = self._connectedSynapses[columnIndex] | :param connectedSynapses: (list) will be overwritten
:returns: (iter) the connected synapses for a given column.
``connectedSynapses`` size must match the number of inputs | Below is the the instruction that describes the task:
### Input:
:param connectedSynapses: (list) will be overwritten
:returns: (iter) the connected synapses for a given column.
``connectedSynapses`` size must match the number of inputs
### Response:
def getConnectedSynapses(self, columnIndex, connectedSynapses):
"""
:param connectedSynapses: (list) will be overwritten
:returns: (iter) the connected synapses for a given column.
``connectedSynapses`` size must match the number of inputs"""
assert(columnIndex < self._numColumns)
connectedSynapses[:] = self._connectedSynapses[columnIndex] |
def can_run_c_extension(name=None):
"""
Determine whether the given Python C extension loads correctly.
If ``name`` is ``None``, tests all Python C extensions,
and return ``True`` if and only if all load correctly.
:param string name: the name of the Python C extension to test
:rtype: bool
"""
def can_run_cdtw():
""" Python C extension for computing DTW """
try:
import aeneas.cdtw.cdtw
return True
except ImportError:
return False
def can_run_cmfcc():
""" Python C extension for computing MFCC """
try:
import aeneas.cmfcc.cmfcc
return True
except ImportError:
return False
def can_run_cew():
""" Python C extension for synthesizing with eSpeak """
try:
import aeneas.cew.cew
return True
except ImportError:
return False
def can_run_cfw():
""" Python C extension for synthesizing with Festival """
try:
import aeneas.cfw.cfw
return True
except ImportError:
return False
if name == "cdtw":
return can_run_cdtw()
elif name == "cmfcc":
return can_run_cmfcc()
elif name == "cew":
return can_run_cew()
elif name == "cfw":
return can_run_cfw()
else:
# NOTE cfw is still experimental!
return can_run_cdtw() and can_run_cmfcc() and can_run_cew() | Determine whether the given Python C extension loads correctly.
If ``name`` is ``None``, tests all Python C extensions,
and return ``True`` if and only if all load correctly.
:param string name: the name of the Python C extension to test
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Determine whether the given Python C extension loads correctly.
If ``name`` is ``None``, tests all Python C extensions,
and return ``True`` if and only if all load correctly.
:param string name: the name of the Python C extension to test
:rtype: bool
### Response:
def can_run_c_extension(name=None):
"""
Determine whether the given Python C extension loads correctly.
If ``name`` is ``None``, tests all Python C extensions,
and return ``True`` if and only if all load correctly.
:param string name: the name of the Python C extension to test
:rtype: bool
"""
def can_run_cdtw():
""" Python C extension for computing DTW """
try:
import aeneas.cdtw.cdtw
return True
except ImportError:
return False
def can_run_cmfcc():
""" Python C extension for computing MFCC """
try:
import aeneas.cmfcc.cmfcc
return True
except ImportError:
return False
def can_run_cew():
""" Python C extension for synthesizing with eSpeak """
try:
import aeneas.cew.cew
return True
except ImportError:
return False
def can_run_cfw():
""" Python C extension for synthesizing with Festival """
try:
import aeneas.cfw.cfw
return True
except ImportError:
return False
if name == "cdtw":
return can_run_cdtw()
elif name == "cmfcc":
return can_run_cmfcc()
elif name == "cew":
return can_run_cew()
elif name == "cfw":
return can_run_cfw()
else:
# NOTE cfw is still experimental!
return can_run_cdtw() and can_run_cmfcc() and can_run_cew() |
def usable_ids(cls, id, accept_multi=True):
""" Retrieve id from input which can be an id or a cn."""
try:
qry_id = [int(id)]
except ValueError:
try:
qry_id = cls.from_cn(id)
except Exception:
qry_id = None
if not qry_id or not accept_multi and len(qry_id) != 1:
msg = 'unknown identifier %s' % id
cls.error(msg)
return qry_id if accept_multi else qry_id[0] | Retrieve id from input which can be an id or a cn. | Below is the the instruction that describes the task:
### Input:
Retrieve id from input which can be an id or a cn.
### Response:
def usable_ids(cls, id, accept_multi=True):
""" Retrieve id from input which can be an id or a cn."""
try:
qry_id = [int(id)]
except ValueError:
try:
qry_id = cls.from_cn(id)
except Exception:
qry_id = None
if not qry_id or not accept_multi and len(qry_id) != 1:
msg = 'unknown identifier %s' % id
cls.error(msg)
return qry_id if accept_multi else qry_id[0] |
def _validate_arguments(self):
"""method to sanitize model parameters
Parameters
---------
None
Returns
-------
None
"""
super(SplineTerm, self)._validate_arguments()
if self.basis not in self._bases:
raise ValueError("basis must be one of {}, "\
"but found: {}".format(self._bases, self.basis))
# n_splines
self.n_splines = check_param(self.n_splines, param_name='n_splines',
dtype='int', constraint='>= 0')
# spline_order
self.spline_order = check_param(self.spline_order,
param_name='spline_order',
dtype='int', constraint='>= 0')
# n_splines + spline_order
if not self.n_splines > self.spline_order:
raise ValueError('n_splines must be > spline_order. '\
'found: n_splines = {} and spline_order = {}'\
.format(self.n_splines, self.spline_order))
# by
if self.by is not None:
self.by = check_param(self.by,
param_name='by',
dtype='int', constraint='>= 0')
return self | method to sanitize model parameters
Parameters
---------
None
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
method to sanitize model parameters
Parameters
---------
None
Returns
-------
None
### Response:
def _validate_arguments(self):
"""method to sanitize model parameters
Parameters
---------
None
Returns
-------
None
"""
super(SplineTerm, self)._validate_arguments()
if self.basis not in self._bases:
raise ValueError("basis must be one of {}, "\
"but found: {}".format(self._bases, self.basis))
# n_splines
self.n_splines = check_param(self.n_splines, param_name='n_splines',
dtype='int', constraint='>= 0')
# spline_order
self.spline_order = check_param(self.spline_order,
param_name='spline_order',
dtype='int', constraint='>= 0')
# n_splines + spline_order
if not self.n_splines > self.spline_order:
raise ValueError('n_splines must be > spline_order. '\
'found: n_splines = {} and spline_order = {}'\
.format(self.n_splines, self.spline_order))
# by
if self.by is not None:
self.by = check_param(self.by,
param_name='by',
dtype='int', constraint='>= 0')
return self |
def get_partition_trees(self, p):
"""
Return the trees associated with a partition, p
"""
trees = []
for grp in p.get_membership():
try:
result = self.get_group_result(grp)
trees.append(result['ml_tree'])
except ValueError:
trees.append(None)
logger.error('No tree found for group {}'.format(grp))
return trees | Return the trees associated with a partition, p | Below is the the instruction that describes the task:
### Input:
Return the trees associated with a partition, p
### Response:
def get_partition_trees(self, p):
"""
Return the trees associated with a partition, p
"""
trees = []
for grp in p.get_membership():
try:
result = self.get_group_result(grp)
trees.append(result['ml_tree'])
except ValueError:
trees.append(None)
logger.error('No tree found for group {}'.format(grp))
return trees |
def revoke_token(self, token):
"""
ADMIN ONLY. Returns True or False, depending on whether deletion of the
specified token was successful.
"""
resp, resp_body = self.method_delete("tokens/%s" % token, admin=True)
if resp.status_code in (401, 403):
raise exc.AuthorizationFailure("You must be an admin to make this "
"call.")
return 200 <= resp.status_code < 300 | ADMIN ONLY. Returns True or False, depending on whether deletion of the
specified token was successful. | Below is the the instruction that describes the task:
### Input:
ADMIN ONLY. Returns True or False, depending on whether deletion of the
specified token was successful.
### Response:
def revoke_token(self, token):
"""
ADMIN ONLY. Returns True or False, depending on whether deletion of the
specified token was successful.
"""
resp, resp_body = self.method_delete("tokens/%s" % token, admin=True)
if resp.status_code in (401, 403):
raise exc.AuthorizationFailure("You must be an admin to make this "
"call.")
return 200 <= resp.status_code < 300 |
def unordered_storage(config, name=None):
'''Return an unordered storage system based on the specified config.
The canonical example of such a storage container is
``defaultdict(set)``. Thus, the return value of this method contains
keys and values. The values are unordered sets.
Args:
config (dict): Defines the configurations for the storage.
For in-memory storage, the config ``{'type': 'dict'}`` will
suffice. For Redis storage, the type should be ``'redis'`` and
the configurations for the Redis database should be supplied
under the key ``'redis'``. These parameters should be in a form
suitable for `redis.Redis`. The parameters may alternatively
contain references to environment variables, in which case
literal configuration values should be replaced by dicts of
the form::
{'env': 'REDIS_HOSTNAME',
'default': 'localhost'}
For a full example, see :ref:`minhash_lsh_at_scale`
name (bytes, optional): A reference name for this storage container.
For dict-type containers, this is ignored. For Redis containers,
this name is used to prefix keys pertaining to this storage
container within the database.
'''
tp = config['type']
if tp == 'dict':
return DictSetStorage(config)
if tp == 'redis':
return RedisSetStorage(config, name=name) | Return an unordered storage system based on the specified config.
The canonical example of such a storage container is
``defaultdict(set)``. Thus, the return value of this method contains
keys and values. The values are unordered sets.
Args:
config (dict): Defines the configurations for the storage.
For in-memory storage, the config ``{'type': 'dict'}`` will
suffice. For Redis storage, the type should be ``'redis'`` and
the configurations for the Redis database should be supplied
under the key ``'redis'``. These parameters should be in a form
suitable for `redis.Redis`. The parameters may alternatively
contain references to environment variables, in which case
literal configuration values should be replaced by dicts of
the form::
{'env': 'REDIS_HOSTNAME',
'default': 'localhost'}
For a full example, see :ref:`minhash_lsh_at_scale`
name (bytes, optional): A reference name for this storage container.
For dict-type containers, this is ignored. For Redis containers,
this name is used to prefix keys pertaining to this storage
container within the database. | Below is the the instruction that describes the task:
### Input:
Return an unordered storage system based on the specified config.
The canonical example of such a storage container is
``defaultdict(set)``. Thus, the return value of this method contains
keys and values. The values are unordered sets.
Args:
config (dict): Defines the configurations for the storage.
For in-memory storage, the config ``{'type': 'dict'}`` will
suffice. For Redis storage, the type should be ``'redis'`` and
the configurations for the Redis database should be supplied
under the key ``'redis'``. These parameters should be in a form
suitable for `redis.Redis`. The parameters may alternatively
contain references to environment variables, in which case
literal configuration values should be replaced by dicts of
the form::
{'env': 'REDIS_HOSTNAME',
'default': 'localhost'}
For a full example, see :ref:`minhash_lsh_at_scale`
name (bytes, optional): A reference name for this storage container.
For dict-type containers, this is ignored. For Redis containers,
this name is used to prefix keys pertaining to this storage
container within the database.
### Response:
def unordered_storage(config, name=None):
'''Return an unordered storage system based on the specified config.
The canonical example of such a storage container is
``defaultdict(set)``. Thus, the return value of this method contains
keys and values. The values are unordered sets.
Args:
config (dict): Defines the configurations for the storage.
For in-memory storage, the config ``{'type': 'dict'}`` will
suffice. For Redis storage, the type should be ``'redis'`` and
the configurations for the Redis database should be supplied
under the key ``'redis'``. These parameters should be in a form
suitable for `redis.Redis`. The parameters may alternatively
contain references to environment variables, in which case
literal configuration values should be replaced by dicts of
the form::
{'env': 'REDIS_HOSTNAME',
'default': 'localhost'}
For a full example, see :ref:`minhash_lsh_at_scale`
name (bytes, optional): A reference name for this storage container.
For dict-type containers, this is ignored. For Redis containers,
this name is used to prefix keys pertaining to this storage
container within the database.
'''
tp = config['type']
if tp == 'dict':
return DictSetStorage(config)
if tp == 'redis':
return RedisSetStorage(config, name=name) |
def get_es_action_item(data_item, action_settings, es_type, id_field=None):
''' This method will return an item formated and ready to append
to the action list '''
action_item = dict.copy(action_settings)
if id_field is not None:
id_val = first(list(get_dict_key(data_item, id_field)))
if id_val is not None:
action_item['_id'] = id_val
elif data_item.get('id'):
if data_item['id'].startswith("%s/" % action_settings['_index']):
action_item['_id'] = "/".join(data_item['id'].split("/")[2:])
else:
action_item['_id'] = data_item['id']
if data_item.get('data'):
action_item['_source'] = data_item['data']
else:
action_item['_source'] = data_item
action_item['_type'] = es_type
return action_item | This method will return an item formated and ready to append
to the action list | Below is the the instruction that describes the task:
### Input:
This method will return an item formated and ready to append
to the action list
### Response:
def get_es_action_item(data_item, action_settings, es_type, id_field=None):
''' This method will return an item formated and ready to append
to the action list '''
action_item = dict.copy(action_settings)
if id_field is not None:
id_val = first(list(get_dict_key(data_item, id_field)))
if id_val is not None:
action_item['_id'] = id_val
elif data_item.get('id'):
if data_item['id'].startswith("%s/" % action_settings['_index']):
action_item['_id'] = "/".join(data_item['id'].split("/")[2:])
else:
action_item['_id'] = data_item['id']
if data_item.get('data'):
action_item['_source'] = data_item['data']
else:
action_item['_source'] = data_item
action_item['_type'] = es_type
return action_item |
def decompose_by_component(model, observed_time_series, parameter_samples):
"""Decompose an observed time series into contributions from each component.
This method decomposes a time series according to the posterior represention
of a structural time series model. In particular, it:
- Computes the posterior marginal mean and covariances over the additive
model's latent space.
- Decomposes the latent posterior into the marginal blocks for each
model component.
- Maps the per-component latent posteriors back through each component's
observation model, to generate the time series modeled by that component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
observed_time_series: `float` `Tensor` of shape
`batch_shape + [num_timesteps, 1]` (omitting the trailing unit dimension
is also supported when `num_timesteps > 1`), specifying an observed time
series. May optionally be an instance of `tfp.sts.MaskedTimeSeries`, which
includes a mask `Tensor` to specify timesteps with missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
#### Examples
Suppose we've built a model and fit it to data:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
To extract the contributions of individual components, pass the time series
and sampled parameters into `decompose_by_component`:
```python
component_dists = decompose_by_component(
model,
observed_time_series=observed_time_series,
parameter_samples=samples)
# Component mean and stddev have shape `[len(observed_time_series)]`.
day_of_week_effect_mean = component_dists[day_of_week].mean()
day_of_week_effect_stddev = component_dists[day_of_week].stddev()
```
Using the component distributions, we can visualize the uncertainty for
each component:
```
from matplotlib import pylab as plt
num_components = len(component_dists)
xs = np.arange(len(observed_time_series))
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_dists.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
```
"""
with tf.compat.v1.name_scope('decompose_by_component',
values=[observed_time_series]):
[
observed_time_series,
is_missing
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run smoothing over the training timesteps to extract the
# posterior on latents.
num_timesteps = dist_util.prefer_static_value(
tf.shape(input=observed_time_series))[-2]
ssm = model.make_state_space_model(num_timesteps=num_timesteps,
param_vals=parameter_samples)
posterior_means, posterior_covs = ssm.posterior_marginals(
observed_time_series, mask=is_missing)
return _decompose_from_posterior_marginals(
model, posterior_means, posterior_covs, parameter_samples) | Decompose an observed time series into contributions from each component.
This method decomposes a time series according to the posterior represention
of a structural time series model. In particular, it:
- Computes the posterior marginal mean and covariances over the additive
model's latent space.
- Decomposes the latent posterior into the marginal blocks for each
model component.
- Maps the per-component latent posteriors back through each component's
observation model, to generate the time series modeled by that component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
observed_time_series: `float` `Tensor` of shape
`batch_shape + [num_timesteps, 1]` (omitting the trailing unit dimension
is also supported when `num_timesteps > 1`), specifying an observed time
series. May optionally be an instance of `tfp.sts.MaskedTimeSeries`, which
includes a mask `Tensor` to specify timesteps with missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
#### Examples
Suppose we've built a model and fit it to data:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
To extract the contributions of individual components, pass the time series
and sampled parameters into `decompose_by_component`:
```python
component_dists = decompose_by_component(
model,
observed_time_series=observed_time_series,
parameter_samples=samples)
# Component mean and stddev have shape `[len(observed_time_series)]`.
day_of_week_effect_mean = component_dists[day_of_week].mean()
day_of_week_effect_stddev = component_dists[day_of_week].stddev()
```
Using the component distributions, we can visualize the uncertainty for
each component:
```
from matplotlib import pylab as plt
num_components = len(component_dists)
xs = np.arange(len(observed_time_series))
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_dists.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
``` | Below is the the instruction that describes the task:
### Input:
Decompose an observed time series into contributions from each component.
This method decomposes a time series according to the posterior represention
of a structural time series model. In particular, it:
- Computes the posterior marginal mean and covariances over the additive
model's latent space.
- Decomposes the latent posterior into the marginal blocks for each
model component.
- Maps the per-component latent posteriors back through each component's
observation model, to generate the time series modeled by that component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
observed_time_series: `float` `Tensor` of shape
`batch_shape + [num_timesteps, 1]` (omitting the trailing unit dimension
is also supported when `num_timesteps > 1`), specifying an observed time
series. May optionally be an instance of `tfp.sts.MaskedTimeSeries`, which
includes a mask `Tensor` to specify timesteps with missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
#### Examples
Suppose we've built a model and fit it to data:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
To extract the contributions of individual components, pass the time series
and sampled parameters into `decompose_by_component`:
```python
component_dists = decompose_by_component(
model,
observed_time_series=observed_time_series,
parameter_samples=samples)
# Component mean and stddev have shape `[len(observed_time_series)]`.
day_of_week_effect_mean = component_dists[day_of_week].mean()
day_of_week_effect_stddev = component_dists[day_of_week].stddev()
```
Using the component distributions, we can visualize the uncertainty for
each component:
```
from matplotlib import pylab as plt
num_components = len(component_dists)
xs = np.arange(len(observed_time_series))
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_dists.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
```
### Response:
def decompose_by_component(model, observed_time_series, parameter_samples):
"""Decompose an observed time series into contributions from each component.
This method decomposes a time series according to the posterior represention
of a structural time series model. In particular, it:
- Computes the posterior marginal mean and covariances over the additive
model's latent space.
- Decomposes the latent posterior into the marginal blocks for each
model component.
- Maps the per-component latent posteriors back through each component's
observation model, to generate the time series modeled by that component.
Args:
model: An instance of `tfp.sts.Sum` representing a structural time series
model.
observed_time_series: `float` `Tensor` of shape
`batch_shape + [num_timesteps, 1]` (omitting the trailing unit dimension
is also supported when `num_timesteps > 1`), specifying an observed time
series. May optionally be an instance of `tfp.sts.MaskedTimeSeries`, which
includes a mask `Tensor` to specify timesteps with missing observations.
parameter_samples: Python `list` of `Tensors` representing posterior
samples of model parameters, with shapes `[concat([
[num_posterior_draws], param.prior.batch_shape,
param.prior.event_shape]) for param in model.parameters]`. This may
optionally also be a map (Python `dict`) of parameter names to
`Tensor` values.
Returns:
component_dists: A `collections.OrderedDict` instance mapping
component StructuralTimeSeries instances (elements of `model.components`)
to `tfd.Distribution` instances representing the posterior marginal
distributions on the process modeled by each component. Each distribution
has batch shape matching that of `posterior_means`/`posterior_covs`, and
event shape of `[num_timesteps]`.
#### Examples
Suppose we've built a model and fit it to data:
```python
day_of_week = tfp.sts.Seasonal(
num_seasons=7,
observed_time_series=observed_time_series,
name='day_of_week')
local_linear_trend = tfp.sts.LocalLinearTrend(
observed_time_series=observed_time_series,
name='local_linear_trend')
model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
observed_time_series=observed_time_series)
num_steps_forecast = 50
samples, kernel_results = tfp.sts.fit_with_hmc(model, observed_time_series)
```
To extract the contributions of individual components, pass the time series
and sampled parameters into `decompose_by_component`:
```python
component_dists = decompose_by_component(
model,
observed_time_series=observed_time_series,
parameter_samples=samples)
# Component mean and stddev have shape `[len(observed_time_series)]`.
day_of_week_effect_mean = component_dists[day_of_week].mean()
day_of_week_effect_stddev = component_dists[day_of_week].stddev()
```
Using the component distributions, we can visualize the uncertainty for
each component:
```
from matplotlib import pylab as plt
num_components = len(component_dists)
xs = np.arange(len(observed_time_series))
fig = plt.figure(figsize=(12, 3 * num_components))
for i, (component, component_dist) in enumerate(component_dists.items()):
# If in graph mode, replace `.numpy()` with `.eval()` or `sess.run()`.
component_mean = component_dist.mean().numpy()
component_stddev = component_dist.stddev().numpy()
ax = fig.add_subplot(num_components, 1, 1 + i)
ax.plot(xs, component_mean, lw=2)
ax.fill_between(xs,
component_mean - 2 * component_stddev,
component_mean + 2 * component_stddev,
alpha=0.5)
ax.set_title(component.name)
```
"""
with tf.compat.v1.name_scope('decompose_by_component',
values=[observed_time_series]):
[
observed_time_series,
is_missing
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
# Run smoothing over the training timesteps to extract the
# posterior on latents.
num_timesteps = dist_util.prefer_static_value(
tf.shape(input=observed_time_series))[-2]
ssm = model.make_state_space_model(num_timesteps=num_timesteps,
param_vals=parameter_samples)
posterior_means, posterior_covs = ssm.posterior_marginals(
observed_time_series, mask=is_missing)
return _decompose_from_posterior_marginals(
model, posterior_means, posterior_covs, parameter_samples) |
def redirectURL(self, realm, return_to=None, immediate=False):
"""Returns a URL with an encoded OpenID request.
The resulting URL is the OpenID provider's endpoint URL with
parameters appended as query arguments. You should redirect
the user agent to this URL.
OpenID 2.0 endpoints also accept POST requests, see
C{L{shouldSendRedirect}} and C{L{formMarkup}}.
@param realm: The URL (or URL pattern) that identifies your
web site to the user when she is authorizing it.
@type realm: str
@param return_to: The URL that the OpenID provider will send the
user back to after attempting to verify her identity.
Not specifying a return_to URL means that the user will not
be returned to the site issuing the request upon its
completion.
@type return_to: str
@param immediate: If True, the OpenID provider is to send back
a response immediately, useful for behind-the-scenes
authentication attempts. Otherwise the OpenID provider
may engage the user before providing a response. This is
the default case, as the user may need to provide
credentials or approve the request before a positive
response can be sent.
@type immediate: bool
@returns: The URL to redirect the user agent to.
@returntype: str
"""
message = self.getMessage(realm, return_to, immediate)
return message.toURL(self.endpoint.server_url) | Returns a URL with an encoded OpenID request.
The resulting URL is the OpenID provider's endpoint URL with
parameters appended as query arguments. You should redirect
the user agent to this URL.
OpenID 2.0 endpoints also accept POST requests, see
C{L{shouldSendRedirect}} and C{L{formMarkup}}.
@param realm: The URL (or URL pattern) that identifies your
web site to the user when she is authorizing it.
@type realm: str
@param return_to: The URL that the OpenID provider will send the
user back to after attempting to verify her identity.
Not specifying a return_to URL means that the user will not
be returned to the site issuing the request upon its
completion.
@type return_to: str
@param immediate: If True, the OpenID provider is to send back
a response immediately, useful for behind-the-scenes
authentication attempts. Otherwise the OpenID provider
may engage the user before providing a response. This is
the default case, as the user may need to provide
credentials or approve the request before a positive
response can be sent.
@type immediate: bool
@returns: The URL to redirect the user agent to.
@returntype: str | Below is the the instruction that describes the task:
### Input:
Returns a URL with an encoded OpenID request.
The resulting URL is the OpenID provider's endpoint URL with
parameters appended as query arguments. You should redirect
the user agent to this URL.
OpenID 2.0 endpoints also accept POST requests, see
C{L{shouldSendRedirect}} and C{L{formMarkup}}.
@param realm: The URL (or URL pattern) that identifies your
web site to the user when she is authorizing it.
@type realm: str
@param return_to: The URL that the OpenID provider will send the
user back to after attempting to verify her identity.
Not specifying a return_to URL means that the user will not
be returned to the site issuing the request upon its
completion.
@type return_to: str
@param immediate: If True, the OpenID provider is to send back
a response immediately, useful for behind-the-scenes
authentication attempts. Otherwise the OpenID provider
may engage the user before providing a response. This is
the default case, as the user may need to provide
credentials or approve the request before a positive
response can be sent.
@type immediate: bool
@returns: The URL to redirect the user agent to.
@returntype: str
### Response:
def redirectURL(self, realm, return_to=None, immediate=False):
"""Returns a URL with an encoded OpenID request.
The resulting URL is the OpenID provider's endpoint URL with
parameters appended as query arguments. You should redirect
the user agent to this URL.
OpenID 2.0 endpoints also accept POST requests, see
C{L{shouldSendRedirect}} and C{L{formMarkup}}.
@param realm: The URL (or URL pattern) that identifies your
web site to the user when she is authorizing it.
@type realm: str
@param return_to: The URL that the OpenID provider will send the
user back to after attempting to verify her identity.
Not specifying a return_to URL means that the user will not
be returned to the site issuing the request upon its
completion.
@type return_to: str
@param immediate: If True, the OpenID provider is to send back
a response immediately, useful for behind-the-scenes
authentication attempts. Otherwise the OpenID provider
may engage the user before providing a response. This is
the default case, as the user may need to provide
credentials or approve the request before a positive
response can be sent.
@type immediate: bool
@returns: The URL to redirect the user agent to.
@returntype: str
"""
message = self.getMessage(realm, return_to, immediate)
return message.toURL(self.endpoint.server_url) |
def valid_id(opts, id_):
'''
Returns if the passed id is valid
'''
try:
if any(x in id_ for x in ('/', '\\', str('\0'))):
return False
return bool(clean_path(opts['pki_dir'], id_))
except (AttributeError, KeyError, TypeError, UnicodeDecodeError):
return False | Returns if the passed id is valid | Below is the the instruction that describes the task:
### Input:
Returns if the passed id is valid
### Response:
def valid_id(opts, id_):
'''
Returns if the passed id is valid
'''
try:
if any(x in id_ for x in ('/', '\\', str('\0'))):
return False
return bool(clean_path(opts['pki_dir'], id_))
except (AttributeError, KeyError, TypeError, UnicodeDecodeError):
return False |
def remove_mixin(target, name, mixedin=None, replace=True):
"""Remove a mixin with name (and reference) from targetand returns the
replaced one or None.
:param mixedin: a mixedin value or the last defined mixedin if is None
(by default).
:param bool replace: If True (default), the removed mixedin replaces
the current mixin.
"""
try:
result = getattr(target, name)
except AttributeError:
raise Mixin.MixInError(
"No mixin {0} exists in {1}".format(name, target)
)
mixedins_by_name = Mixin.get_mixedins_by_name(target)
mixedins = mixedins_by_name.get(name)
if mixedins:
if mixedin is None:
mixedin = mixedins[-1]
mixedins = mixedins[:-2]
else:
try:
index = mixedins.index(mixedin)
except ValueError:
raise Mixin.MixInError(
"Mixedin {0} with name {1} does not exist \
in target {2}"
.format(mixedin, name, target)
)
mixedins = mixedins[0:index] + mixedins[index + 1:]
if len(mixedins) == 0:
# force to replace/delete the mixin even if replace is False
# in order to stay in a consistent state
if mixedin != Mixin.__NEW_CONTENT_KEY__:
setattr(target, name, mixedin)
else:
delattr(target, name)
del mixedins_by_name[name]
else:
if replace:
setattr(target, name, mixedin)
mixedins_by_name[name] = mixedins
else:
# shouldn't be raised except if removing has been done
# manually
raise Mixin.MixInError(
"No mixin {0} exists in {1}".format(name, target))
# clean mixedins if no one exists
if len(mixedins_by_name) == 0:
delattr(target, Mixin.__MIXEDIN_KEY__)
return result | Remove a mixin with name (and reference) from targetand returns the
replaced one or None.
:param mixedin: a mixedin value or the last defined mixedin if is None
(by default).
:param bool replace: If True (default), the removed mixedin replaces
the current mixin. | Below is the the instruction that describes the task:
### Input:
Remove a mixin with name (and reference) from targetand returns the
replaced one or None.
:param mixedin: a mixedin value or the last defined mixedin if is None
(by default).
:param bool replace: If True (default), the removed mixedin replaces
the current mixin.
### Response:
def remove_mixin(target, name, mixedin=None, replace=True):
"""Remove a mixin with name (and reference) from targetand returns the
replaced one or None.
:param mixedin: a mixedin value or the last defined mixedin if is None
(by default).
:param bool replace: If True (default), the removed mixedin replaces
the current mixin.
"""
try:
result = getattr(target, name)
except AttributeError:
raise Mixin.MixInError(
"No mixin {0} exists in {1}".format(name, target)
)
mixedins_by_name = Mixin.get_mixedins_by_name(target)
mixedins = mixedins_by_name.get(name)
if mixedins:
if mixedin is None:
mixedin = mixedins[-1]
mixedins = mixedins[:-2]
else:
try:
index = mixedins.index(mixedin)
except ValueError:
raise Mixin.MixInError(
"Mixedin {0} with name {1} does not exist \
in target {2}"
.format(mixedin, name, target)
)
mixedins = mixedins[0:index] + mixedins[index + 1:]
if len(mixedins) == 0:
# force to replace/delete the mixin even if replace is False
# in order to stay in a consistent state
if mixedin != Mixin.__NEW_CONTENT_KEY__:
setattr(target, name, mixedin)
else:
delattr(target, name)
del mixedins_by_name[name]
else:
if replace:
setattr(target, name, mixedin)
mixedins_by_name[name] = mixedins
else:
# shouldn't be raised except if removing has been done
# manually
raise Mixin.MixInError(
"No mixin {0} exists in {1}".format(name, target))
# clean mixedins if no one exists
if len(mixedins_by_name) == 0:
delattr(target, Mixin.__MIXEDIN_KEY__)
return result |
def read_namespaced_replication_controller(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_replication_controller # noqa: E501
read the specified ReplicationController # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replication_controller(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicationController (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1ReplicationController
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_replication_controller_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_replication_controller_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | read_namespaced_replication_controller # noqa: E501
read the specified ReplicationController # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replication_controller(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicationController (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1ReplicationController
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
read_namespaced_replication_controller # noqa: E501
read the specified ReplicationController # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replication_controller(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicationController (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1ReplicationController
If the method is called asynchronously,
returns the request thread.
### Response:
def read_namespaced_replication_controller(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_replication_controller # noqa: E501
read the specified ReplicationController # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replication_controller(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicationController (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1ReplicationController
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_replication_controller_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_replication_controller_with_http_info(name, namespace, **kwargs) # noqa: E501
return data |
async def version(self):
"""
GET /api/version.{_format}
Retrieve version number
:return data related to the ext
"""
params = {'access_token': self.token}
url = '/api/version.{ext}'.format(ext=self.format)
return await self.query(url, "get", **params) | GET /api/version.{_format}
Retrieve version number
:return data related to the ext | Below is the the instruction that describes the task:
### Input:
GET /api/version.{_format}
Retrieve version number
:return data related to the ext
### Response:
async def version(self):
"""
GET /api/version.{_format}
Retrieve version number
:return data related to the ext
"""
params = {'access_token': self.token}
url = '/api/version.{ext}'.format(ext=self.format)
return await self.query(url, "get", **params) |
def mtxv(m1, vin):
"""
Multiplies the transpose of a 3x3 matrix
on the left with a vector on the right.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mtxv_c.html
:param m1: 3x3 double precision matrix.
:type m1: 3x3-Element Array of floats
:param vin: 3-dimensional double precision vector.
:type vin: 3-Element Array of floats
:return: 3-dimensional double precision vector.
:rtype: 3-Element Array of floats
"""
m1 = stypes.toDoubleMatrix(m1)
vin = stypes.toDoubleVector(vin)
vout = stypes.emptyDoubleVector(3)
libspice.mtxv_c(m1, vin, vout)
return stypes.cVectorToPython(vout) | Multiplies the transpose of a 3x3 matrix
on the left with a vector on the right.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mtxv_c.html
:param m1: 3x3 double precision matrix.
:type m1: 3x3-Element Array of floats
:param vin: 3-dimensional double precision vector.
:type vin: 3-Element Array of floats
:return: 3-dimensional double precision vector.
:rtype: 3-Element Array of floats | Below is the the instruction that describes the task:
### Input:
Multiplies the transpose of a 3x3 matrix
on the left with a vector on the right.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mtxv_c.html
:param m1: 3x3 double precision matrix.
:type m1: 3x3-Element Array of floats
:param vin: 3-dimensional double precision vector.
:type vin: 3-Element Array of floats
:return: 3-dimensional double precision vector.
:rtype: 3-Element Array of floats
### Response:
def mtxv(m1, vin):
"""
Multiplies the transpose of a 3x3 matrix
on the left with a vector on the right.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mtxv_c.html
:param m1: 3x3 double precision matrix.
:type m1: 3x3-Element Array of floats
:param vin: 3-dimensional double precision vector.
:type vin: 3-Element Array of floats
:return: 3-dimensional double precision vector.
:rtype: 3-Element Array of floats
"""
m1 = stypes.toDoubleMatrix(m1)
vin = stypes.toDoubleVector(vin)
vout = stypes.emptyDoubleVector(3)
libspice.mtxv_c(m1, vin, vout)
return stypes.cVectorToPython(vout) |
def run(self):
""" Listen to the stream and send events to the client. """
channel = self._ssh_client.get_transport().open_session()
self._channel = channel
channel.exec_command("gerrit stream-events")
stdout = channel.makefile()
stderr = channel.makefile_stderr()
while not self._stop.is_set():
try:
if channel.exit_status_ready():
if channel.recv_stderr_ready():
error = stderr.readline().strip()
else:
error = "Remote server connection closed"
self._error_event(error)
self._stop.set()
else:
data = stdout.readline()
self._gerrit.put_event(data)
except Exception as e: # pylint: disable=W0703
self._error_event(repr(e))
self._stop.set() | Listen to the stream and send events to the client. | Below is the the instruction that describes the task:
### Input:
Listen to the stream and send events to the client.
### Response:
def run(self):
""" Listen to the stream and send events to the client. """
channel = self._ssh_client.get_transport().open_session()
self._channel = channel
channel.exec_command("gerrit stream-events")
stdout = channel.makefile()
stderr = channel.makefile_stderr()
while not self._stop.is_set():
try:
if channel.exit_status_ready():
if channel.recv_stderr_ready():
error = stderr.readline().strip()
else:
error = "Remote server connection closed"
self._error_event(error)
self._stop.set()
else:
data = stdout.readline()
self._gerrit.put_event(data)
except Exception as e: # pylint: disable=W0703
self._error_event(repr(e))
self._stop.set() |
def scalar_to_query_parameter(value, name=None):
"""Convert a scalar value into a query parameter.
:type value: any
:param value: A scalar value to convert into a query parameter.
:type name: str
:param name: (Optional) Name of the query parameter.
:rtype: :class:`~google.cloud.bigquery.ScalarQueryParameter`
:returns:
A query parameter corresponding with the type and value of the plain
Python object.
:raises: :class:`~google.cloud.bigquery.dbapi.exceptions.ProgrammingError`
if the type cannot be determined.
"""
parameter_type = None
if isinstance(value, bool):
parameter_type = "BOOL"
elif isinstance(value, numbers.Integral):
parameter_type = "INT64"
elif isinstance(value, numbers.Real):
parameter_type = "FLOAT64"
elif isinstance(value, decimal.Decimal):
parameter_type = "NUMERIC"
elif isinstance(value, six.text_type):
parameter_type = "STRING"
elif isinstance(value, six.binary_type):
parameter_type = "BYTES"
elif isinstance(value, datetime.datetime):
parameter_type = "DATETIME" if value.tzinfo is None else "TIMESTAMP"
elif isinstance(value, datetime.date):
parameter_type = "DATE"
elif isinstance(value, datetime.time):
parameter_type = "TIME"
else:
raise exceptions.ProgrammingError(
"encountered parameter {} with value {} of unexpected type".format(
name, value
)
)
return bigquery.ScalarQueryParameter(name, parameter_type, value) | Convert a scalar value into a query parameter.
:type value: any
:param value: A scalar value to convert into a query parameter.
:type name: str
:param name: (Optional) Name of the query parameter.
:rtype: :class:`~google.cloud.bigquery.ScalarQueryParameter`
:returns:
A query parameter corresponding with the type and value of the plain
Python object.
:raises: :class:`~google.cloud.bigquery.dbapi.exceptions.ProgrammingError`
if the type cannot be determined. | Below is the the instruction that describes the task:
### Input:
Convert a scalar value into a query parameter.
:type value: any
:param value: A scalar value to convert into a query parameter.
:type name: str
:param name: (Optional) Name of the query parameter.
:rtype: :class:`~google.cloud.bigquery.ScalarQueryParameter`
:returns:
A query parameter corresponding with the type and value of the plain
Python object.
:raises: :class:`~google.cloud.bigquery.dbapi.exceptions.ProgrammingError`
if the type cannot be determined.
### Response:
def scalar_to_query_parameter(value, name=None):
"""Convert a scalar value into a query parameter.
:type value: any
:param value: A scalar value to convert into a query parameter.
:type name: str
:param name: (Optional) Name of the query parameter.
:rtype: :class:`~google.cloud.bigquery.ScalarQueryParameter`
:returns:
A query parameter corresponding with the type and value of the plain
Python object.
:raises: :class:`~google.cloud.bigquery.dbapi.exceptions.ProgrammingError`
if the type cannot be determined.
"""
parameter_type = None
if isinstance(value, bool):
parameter_type = "BOOL"
elif isinstance(value, numbers.Integral):
parameter_type = "INT64"
elif isinstance(value, numbers.Real):
parameter_type = "FLOAT64"
elif isinstance(value, decimal.Decimal):
parameter_type = "NUMERIC"
elif isinstance(value, six.text_type):
parameter_type = "STRING"
elif isinstance(value, six.binary_type):
parameter_type = "BYTES"
elif isinstance(value, datetime.datetime):
parameter_type = "DATETIME" if value.tzinfo is None else "TIMESTAMP"
elif isinstance(value, datetime.date):
parameter_type = "DATE"
elif isinstance(value, datetime.time):
parameter_type = "TIME"
else:
raise exceptions.ProgrammingError(
"encountered parameter {} with value {} of unexpected type".format(
name, value
)
)
return bigquery.ScalarQueryParameter(name, parameter_type, value) |
def delete_objects(Bucket, Delete, MFA=None, RequestPayer=None,
region=None, key=None, keyid=None, profile=None):
'''
Delete objects in a given S3 bucket.
Returns {deleted: true} if all objects were deleted
and {deleted: false, failed: [key, ...]} otherwise
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.delete_objects mybucket '{Objects: [Key: myobject]}'
'''
if isinstance(Delete, six.string_types):
Delete = salt.utils.json.loads(Delete)
if not isinstance(Delete, dict):
raise SaltInvocationError("Malformed Delete request.")
if 'Objects' not in Delete:
raise SaltInvocationError("Malformed Delete request.")
failed = []
objs = Delete['Objects']
for i in range(0, len(objs), 1000):
chunk = objs[i:i+1000]
subset = {'Objects': chunk, 'Quiet': True}
try:
args = {'Bucket': Bucket}
args.update({'MFA': MFA}) if MFA else None
args.update({'RequestPayer': RequestPayer}) if RequestPayer else None
args.update({'Delete': subset})
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ret = conn.delete_objects(**args)
failed += ret.get('Errors', [])
except ClientError as e:
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
if failed:
return {'deleted': False, 'failed': failed}
else:
return {'deleted': True} | Delete objects in a given S3 bucket.
Returns {deleted: true} if all objects were deleted
and {deleted: false, failed: [key, ...]} otherwise
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.delete_objects mybucket '{Objects: [Key: myobject]}' | Below is the the instruction that describes the task:
### Input:
Delete objects in a given S3 bucket.
Returns {deleted: true} if all objects were deleted
and {deleted: false, failed: [key, ...]} otherwise
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.delete_objects mybucket '{Objects: [Key: myobject]}'
### Response:
def delete_objects(Bucket, Delete, MFA=None, RequestPayer=None,
region=None, key=None, keyid=None, profile=None):
'''
Delete objects in a given S3 bucket.
Returns {deleted: true} if all objects were deleted
and {deleted: false, failed: [key, ...]} otherwise
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.delete_objects mybucket '{Objects: [Key: myobject]}'
'''
if isinstance(Delete, six.string_types):
Delete = salt.utils.json.loads(Delete)
if not isinstance(Delete, dict):
raise SaltInvocationError("Malformed Delete request.")
if 'Objects' not in Delete:
raise SaltInvocationError("Malformed Delete request.")
failed = []
objs = Delete['Objects']
for i in range(0, len(objs), 1000):
chunk = objs[i:i+1000]
subset = {'Objects': chunk, 'Quiet': True}
try:
args = {'Bucket': Bucket}
args.update({'MFA': MFA}) if MFA else None
args.update({'RequestPayer': RequestPayer}) if RequestPayer else None
args.update({'Delete': subset})
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ret = conn.delete_objects(**args)
failed += ret.get('Errors', [])
except ClientError as e:
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
if failed:
return {'deleted': False, 'failed': failed}
else:
return {'deleted': True} |
def trees_by_path(self, path):
"""
Search trees by `path`.
Args:
path (str): :attr:`.Tree.path` property of :class:`.Tree`.
Returns:
set: Set of matching :class:`Tree` instances.
"""
return set(
self.path_db.get(path, OOSet()).keys()
) | Search trees by `path`.
Args:
path (str): :attr:`.Tree.path` property of :class:`.Tree`.
Returns:
set: Set of matching :class:`Tree` instances. | Below is the the instruction that describes the task:
### Input:
Search trees by `path`.
Args:
path (str): :attr:`.Tree.path` property of :class:`.Tree`.
Returns:
set: Set of matching :class:`Tree` instances.
### Response:
def trees_by_path(self, path):
"""
Search trees by `path`.
Args:
path (str): :attr:`.Tree.path` property of :class:`.Tree`.
Returns:
set: Set of matching :class:`Tree` instances.
"""
return set(
self.path_db.get(path, OOSet()).keys()
) |
def _ReadCompressedData(self, read_size):
"""Reads compressed data from the file-like object.
Args:
read_size (int): number of bytes of compressed data to read.
"""
self._uncompressed_data = self._zip_ext_file.read(read_size)
self._uncompressed_data_size = len(self._uncompressed_data) | Reads compressed data from the file-like object.
Args:
read_size (int): number of bytes of compressed data to read. | Below is the the instruction that describes the task:
### Input:
Reads compressed data from the file-like object.
Args:
read_size (int): number of bytes of compressed data to read.
### Response:
def _ReadCompressedData(self, read_size):
"""Reads compressed data from the file-like object.
Args:
read_size (int): number of bytes of compressed data to read.
"""
self._uncompressed_data = self._zip_ext_file.read(read_size)
self._uncompressed_data_size = len(self._uncompressed_data) |
def update_cache(self, data=None):
"""call with new data or set data to self.cache_data and call this
"""
if data:
self.cache_data = data
self.cache_updated = timezone.now()
self.save() | call with new data or set data to self.cache_data and call this | Below is the the instruction that describes the task:
### Input:
call with new data or set data to self.cache_data and call this
### Response:
def update_cache(self, data=None):
"""call with new data or set data to self.cache_data and call this
"""
if data:
self.cache_data = data
self.cache_updated = timezone.now()
self.save() |
def set_miter_limit(self, limit):
"""Sets the current miter limit within the cairo context.
If the current line join style is set to :obj:`MITER <LINE_JOIN_MITER>`
(see :meth:`set_line_join`),
the miter limit is used to determine
whether the lines should be joined with a bevel instead of a miter.
Cairo divides the length of the miter by the line width.
If the result is greater than the miter limit,
the style is converted to a bevel.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke`, :meth:`stroke_extents`, and :meth:`stroke_to_path`,
but does not have any effect during path construction.
The default miter limit value is 10.0,
which will convert joins with interior angles less than 11 degrees
to bevels instead of miters.
For reference,
a miter limit of 2.0 makes the miter cutoff at 60 degrees,
and a miter limit of 1.414 makes the cutoff at 90 degrees.
A miter limit for a desired angle can be computed as:
``miter_limit = 1. / sin(angle / 2.)``
:param limit: The miter limit to set.
:type limit: float
"""
cairo.cairo_set_miter_limit(self._pointer, limit)
self._check_status() | Sets the current miter limit within the cairo context.
If the current line join style is set to :obj:`MITER <LINE_JOIN_MITER>`
(see :meth:`set_line_join`),
the miter limit is used to determine
whether the lines should be joined with a bevel instead of a miter.
Cairo divides the length of the miter by the line width.
If the result is greater than the miter limit,
the style is converted to a bevel.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke`, :meth:`stroke_extents`, and :meth:`stroke_to_path`,
but does not have any effect during path construction.
The default miter limit value is 10.0,
which will convert joins with interior angles less than 11 degrees
to bevels instead of miters.
For reference,
a miter limit of 2.0 makes the miter cutoff at 60 degrees,
and a miter limit of 1.414 makes the cutoff at 90 degrees.
A miter limit for a desired angle can be computed as:
``miter_limit = 1. / sin(angle / 2.)``
:param limit: The miter limit to set.
:type limit: float | Below is the the instruction that describes the task:
### Input:
Sets the current miter limit within the cairo context.
If the current line join style is set to :obj:`MITER <LINE_JOIN_MITER>`
(see :meth:`set_line_join`),
the miter limit is used to determine
whether the lines should be joined with a bevel instead of a miter.
Cairo divides the length of the miter by the line width.
If the result is greater than the miter limit,
the style is converted to a bevel.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke`, :meth:`stroke_extents`, and :meth:`stroke_to_path`,
but does not have any effect during path construction.
The default miter limit value is 10.0,
which will convert joins with interior angles less than 11 degrees
to bevels instead of miters.
For reference,
a miter limit of 2.0 makes the miter cutoff at 60 degrees,
and a miter limit of 1.414 makes the cutoff at 90 degrees.
A miter limit for a desired angle can be computed as:
``miter_limit = 1. / sin(angle / 2.)``
:param limit: The miter limit to set.
:type limit: float
### Response:
def set_miter_limit(self, limit):
"""Sets the current miter limit within the cairo context.
If the current line join style is set to :obj:`MITER <LINE_JOIN_MITER>`
(see :meth:`set_line_join`),
the miter limit is used to determine
whether the lines should be joined with a bevel instead of a miter.
Cairo divides the length of the miter by the line width.
If the result is greater than the miter limit,
the style is converted to a bevel.
As with the other stroke parameters,
the current line cap style is examined by
:meth:`stroke`, :meth:`stroke_extents`, and :meth:`stroke_to_path`,
but does not have any effect during path construction.
The default miter limit value is 10.0,
which will convert joins with interior angles less than 11 degrees
to bevels instead of miters.
For reference,
a miter limit of 2.0 makes the miter cutoff at 60 degrees,
and a miter limit of 1.414 makes the cutoff at 90 degrees.
A miter limit for a desired angle can be computed as:
``miter_limit = 1. / sin(angle / 2.)``
:param limit: The miter limit to set.
:type limit: float
"""
cairo.cairo_set_miter_limit(self._pointer, limit)
self._check_status() |
def _validate_time_range(trange, status, msg):
'''
Check time range
'''
# If trange is empty, just return the current status & msg
if not trange:
return status, msg
if not isinstance(trange, dict):
status = False
msg = ('The time_range parameter for '
'wtmp beacon must '
'be a dictionary.')
if not all(k in trange for k in ('start', 'end')):
status = False
msg = ('The time_range parameter for '
'wtmp beacon must contain '
'start & end options.')
return status, msg | Check time range | Below is the the instruction that describes the task:
### Input:
Check time range
### Response:
def _validate_time_range(trange, status, msg):
'''
Check time range
'''
# If trange is empty, just return the current status & msg
if not trange:
return status, msg
if not isinstance(trange, dict):
status = False
msg = ('The time_range parameter for '
'wtmp beacon must '
'be a dictionary.')
if not all(k in trange for k in ('start', 'end')):
status = False
msg = ('The time_range parameter for '
'wtmp beacon must contain '
'start & end options.')
return status, msg |
def p_expr_pre_incdec(p):
'''expr : INC variable
| DEC variable'''
p[0] = ast.PreIncDecOp(p[1], p[2], lineno=p.lineno(1)) | expr : INC variable
| DEC variable | Below is the the instruction that describes the task:
### Input:
expr : INC variable
| DEC variable
### Response:
def p_expr_pre_incdec(p):
'''expr : INC variable
| DEC variable'''
p[0] = ast.PreIncDecOp(p[1], p[2], lineno=p.lineno(1)) |
def request(req=None, method=None, requires_response=True):
"""Call function req and then emit its results to the LSP server."""
if req is None:
return functools.partial(request, method=method,
requires_response=requires_response)
@functools.wraps(req)
def wrapper(self, *args, **kwargs):
if self.lsp_ready:
params = req(self, *args, **kwargs)
if params is not None:
self.emit_request(method, params, requires_response)
return wrapper | Call function req and then emit its results to the LSP server. | Below is the the instruction that describes the task:
### Input:
Call function req and then emit its results to the LSP server.
### Response:
def request(req=None, method=None, requires_response=True):
"""Call function req and then emit its results to the LSP server."""
if req is None:
return functools.partial(request, method=method,
requires_response=requires_response)
@functools.wraps(req)
def wrapper(self, *args, **kwargs):
if self.lsp_ready:
params = req(self, *args, **kwargs)
if params is not None:
self.emit_request(method, params, requires_response)
return wrapper |
def match_as_dict(self, film_sl_vectors, substrate_sl_vectors, film_vectors, substrate_vectors, match_area):
"""
Returns dict which contains ZSL match
Args:
film_miller(array)
substrate_miller(array)
"""
d = {}
d["film_sl_vecs"] = np.asarray(film_sl_vectors)
d["sub_sl_vecs"] = np.asarray(substrate_sl_vectors)
d["match_area"] = match_area
d["film_vecs"] = np.asarray(film_vectors)
d["sub_vecs"] = np.asarray(substrate_vectors)
return d | Returns dict which contains ZSL match
Args:
film_miller(array)
substrate_miller(array) | Below is the the instruction that describes the task:
### Input:
Returns dict which contains ZSL match
Args:
film_miller(array)
substrate_miller(array)
### Response:
def match_as_dict(self, film_sl_vectors, substrate_sl_vectors, film_vectors, substrate_vectors, match_area):
"""
Returns dict which contains ZSL match
Args:
film_miller(array)
substrate_miller(array)
"""
d = {}
d["film_sl_vecs"] = np.asarray(film_sl_vectors)
d["sub_sl_vecs"] = np.asarray(substrate_sl_vectors)
d["match_area"] = match_area
d["film_vecs"] = np.asarray(film_vectors)
d["sub_vecs"] = np.asarray(substrate_vectors)
return d |
def _parseMzml(self):
""" #TODO: docstring """
#TODO: this is already pretty nested, reduce that eg by using a function
# processRunNode
for event, element, elementTag in self:
if elementTag == 'mzML':
metadataNode = ETREE.Element(self.elementTag,
self.element.attrib
)
_, _, targetTag = next(self)
break
while targetTag != 'mzML':
if targetTag == 'run':
runNode = ETREE.Element('run', self.element.attrib)
next(self)
while self.event != 'end' or self.elementTag != 'run':
if self.elementTag == 'spectrumList':
#Add spectrumListNode
specListAttrib = {'defaultDataProcessingRef':
self.element.attrib['defaultDataProcessingRef']
}
specListNode = ETREE.Element('spectrumList', specListAttrib)
runNode.append(specListNode)
#Parse and yield spectrum xml elements
while self.event != 'end' or self.elementTag != 'spectrumList':
if self.event == 'end' and self.elementTag == 'spectrum':
yield self.element
clearParsedElements(self.element)
next(self)
elif self.elementTag == 'chromatogramList':
#Add chromatogramListNode
chromListAttrib = {'defaultDataProcessingRef':
self.element.attrib['defaultDataProcessingRef']
}
chromListNode = ETREE.Element('chromatogramList',
chromListAttrib
)
runNode.append(chromListNode)
#Parse and store chromatogram xml elements
while self.event != 'end' or self.elementTag != 'chromatogramList':
if self.event == 'end' and self.elementTag == 'chromatogram':
self.chromatogramList.append(self.element)
#Alternatively also the chromatogram xml
#elements could be yielded:
# yield self.element
# clearParsedElements(self.element)
next(self)
else:
runNode.append(self.element)
next(self)
metadataNode.append(runNode)
break
else:
while self.event != 'end' or self.elementTag != targetTag:
next(self)
metadataNode.append(self.element)
_, _, targetTag = next(self)
recClearTag(metadataNode)
recRemoveTreeFormating(metadataNode)
self.metadataNode = recCopyElement(metadataNode)
self.openfile.close() | #TODO: docstring | Below is the the instruction that describes the task:
### Input:
#TODO: docstring
### Response:
def _parseMzml(self):
""" #TODO: docstring """
#TODO: this is already pretty nested, reduce that eg by using a function
# processRunNode
for event, element, elementTag in self:
if elementTag == 'mzML':
metadataNode = ETREE.Element(self.elementTag,
self.element.attrib
)
_, _, targetTag = next(self)
break
while targetTag != 'mzML':
if targetTag == 'run':
runNode = ETREE.Element('run', self.element.attrib)
next(self)
while self.event != 'end' or self.elementTag != 'run':
if self.elementTag == 'spectrumList':
#Add spectrumListNode
specListAttrib = {'defaultDataProcessingRef':
self.element.attrib['defaultDataProcessingRef']
}
specListNode = ETREE.Element('spectrumList', specListAttrib)
runNode.append(specListNode)
#Parse and yield spectrum xml elements
while self.event != 'end' or self.elementTag != 'spectrumList':
if self.event == 'end' and self.elementTag == 'spectrum':
yield self.element
clearParsedElements(self.element)
next(self)
elif self.elementTag == 'chromatogramList':
#Add chromatogramListNode
chromListAttrib = {'defaultDataProcessingRef':
self.element.attrib['defaultDataProcessingRef']
}
chromListNode = ETREE.Element('chromatogramList',
chromListAttrib
)
runNode.append(chromListNode)
#Parse and store chromatogram xml elements
while self.event != 'end' or self.elementTag != 'chromatogramList':
if self.event == 'end' and self.elementTag == 'chromatogram':
self.chromatogramList.append(self.element)
#Alternatively also the chromatogram xml
#elements could be yielded:
# yield self.element
# clearParsedElements(self.element)
next(self)
else:
runNode.append(self.element)
next(self)
metadataNode.append(runNode)
break
else:
while self.event != 'end' or self.elementTag != targetTag:
next(self)
metadataNode.append(self.element)
_, _, targetTag = next(self)
recClearTag(metadataNode)
recRemoveTreeFormating(metadataNode)
self.metadataNode = recCopyElement(metadataNode)
self.openfile.close() |
def stop(name, kill=False):
'''
This is a compatibility function which provides the logic for
nspawn.poweroff and nspawn.terminate.
'''
if _sd_version() >= 219:
if kill:
action = 'terminate'
else:
action = 'poweroff'
ret = _machinectl('{0} {1}'.format(action, name))
else:
cmd = 'systemctl stop systemd-nspawn@{0}'.format(name)
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode'] != 0:
__context__['retcode'] = salt.defaults.exitcodes.EX_UNAVAILABLE
return False
return True | This is a compatibility function which provides the logic for
nspawn.poweroff and nspawn.terminate. | Below is the the instruction that describes the task:
### Input:
This is a compatibility function which provides the logic for
nspawn.poweroff and nspawn.terminate.
### Response:
def stop(name, kill=False):
'''
This is a compatibility function which provides the logic for
nspawn.poweroff and nspawn.terminate.
'''
if _sd_version() >= 219:
if kill:
action = 'terminate'
else:
action = 'poweroff'
ret = _machinectl('{0} {1}'.format(action, name))
else:
cmd = 'systemctl stop systemd-nspawn@{0}'.format(name)
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode'] != 0:
__context__['retcode'] = salt.defaults.exitcodes.EX_UNAVAILABLE
return False
return True |
def owner(*paths, **kwargs):
'''
.. versionadded:: 2014.7.0
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.yumpkg.version>`, if a
single path is passed, a string will be returned, and if multiple paths are
passed, a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Examples:
.. code-block:: bash
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf
'''
if not paths:
return ''
ret = {}
cmd_prefix = ['rpm', '-qf', '--queryformat', '%{name}']
for path in paths:
ret[path] = __salt__['cmd.run_stdout'](
cmd_prefix + [path],
output_loglevel='trace',
python_shell=False
)
if 'not owned' in ret[path].lower():
ret[path] = ''
if len(ret) == 1:
return next(six.itervalues(ret))
return ret | .. versionadded:: 2014.7.0
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.yumpkg.version>`, if a
single path is passed, a string will be returned, and if multiple paths are
passed, a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Examples:
.. code-block:: bash
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2014.7.0
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.yumpkg.version>`, if a
single path is passed, a string will be returned, and if multiple paths are
passed, a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Examples:
.. code-block:: bash
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf
### Response:
def owner(*paths, **kwargs):
'''
.. versionadded:: 2014.7.0
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.yumpkg.version>`, if a
single path is passed, a string will be returned, and if multiple paths are
passed, a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Examples:
.. code-block:: bash
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf
'''
if not paths:
return ''
ret = {}
cmd_prefix = ['rpm', '-qf', '--queryformat', '%{name}']
for path in paths:
ret[path] = __salt__['cmd.run_stdout'](
cmd_prefix + [path],
output_loglevel='trace',
python_shell=False
)
if 'not owned' in ret[path].lower():
ret[path] = ''
if len(ret) == 1:
return next(six.itervalues(ret))
return ret |
def append(self, item):
"""Adds a new item to the end of the collection."""
if len(self) == 0:
# Special case, we make this the current item
self.index = 0
self.items.append(item) | Adds a new item to the end of the collection. | Below is the the instruction that describes the task:
### Input:
Adds a new item to the end of the collection.
### Response:
def append(self, item):
"""Adds a new item to the end of the collection."""
if len(self) == 0:
# Special case, we make this the current item
self.index = 0
self.items.append(item) |
def load_settings(path, setttings_only = True):
"""
loads the settings that has been save with Script.save_b26.
Args:
path: path to folder saved by Script.save_b26
setttings_only: if true returns only the settings if the .b26 file contains only a single script
Returns:
a dictionary with the settings
"""
# check that path exists
if not os.path.exists(path):
print(path)
raise AttributeError('Path given does not exist!')
tag = '_'.join(os.path.basename(os.path.dirname(os.path.abspath(path) + '/')).split('_')[3:])
search_str = os.path.abspath(path)+'/*'+tag +'.b26'
fname = glob.glob(search_str)
if len(fname)>1:
print(('warning more than one .b26 file found, loading ', fname[0]))
elif len(fname) == 0:
print(('no .b26 file found in folder {:s}, check path !'.format(search_str)))
return
fname = fname[0]
fname = Script.check_filename(fname)
settings = load_b26_file(fname)['scripts']
if len(list(settings.keys())) == 1 and setttings_only:
settings = settings[list(settings.keys())[0]]['settings']
return settings | loads the settings that has been save with Script.save_b26.
Args:
path: path to folder saved by Script.save_b26
setttings_only: if true returns only the settings if the .b26 file contains only a single script
Returns:
a dictionary with the settings | Below is the the instruction that describes the task:
### Input:
loads the settings that has been save with Script.save_b26.
Args:
path: path to folder saved by Script.save_b26
setttings_only: if true returns only the settings if the .b26 file contains only a single script
Returns:
a dictionary with the settings
### Response:
def load_settings(path, setttings_only = True):
"""
loads the settings that has been save with Script.save_b26.
Args:
path: path to folder saved by Script.save_b26
setttings_only: if true returns only the settings if the .b26 file contains only a single script
Returns:
a dictionary with the settings
"""
# check that path exists
if not os.path.exists(path):
print(path)
raise AttributeError('Path given does not exist!')
tag = '_'.join(os.path.basename(os.path.dirname(os.path.abspath(path) + '/')).split('_')[3:])
search_str = os.path.abspath(path)+'/*'+tag +'.b26'
fname = glob.glob(search_str)
if len(fname)>1:
print(('warning more than one .b26 file found, loading ', fname[0]))
elif len(fname) == 0:
print(('no .b26 file found in folder {:s}, check path !'.format(search_str)))
return
fname = fname[0]
fname = Script.check_filename(fname)
settings = load_b26_file(fname)['scripts']
if len(list(settings.keys())) == 1 and setttings_only:
settings = settings[list(settings.keys())[0]]['settings']
return settings |
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs) | Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. | Below is the the instruction that describes the task:
### Input:
Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included.
### Response:
def debug(self, msg, *args, **kwargs):
""" Log a message with DEBUG level. Automatically includes stack info
unless it is specifically not included. """
kwargs.setdefault('inc_stackinfo', True)
self.log(DEBUG, msg, args, **kwargs) |
def _forwards(apps, schema_editor):
"""
Make sure that the MarkupItem model actually points
to the correct proxy model, that implements the given language.
"""
# Need to work on the actual models here.
from fluent_contents.plugins.markup.models import LANGUAGE_MODEL_CLASSES
from fluent_contents.plugins.markup.models import MarkupItem
from django.contrib.contenttypes.models import ContentType
ctype = ContentType.objects.get_for_model(MarkupItem)
for language, proxy_model in LANGUAGE_MODEL_CLASSES.items():
proxy_ctype = ContentType.objects.get_for_model(proxy_model, for_concrete_model=False)
MarkupItem.objects.filter(
polymorphic_ctype=ctype, language=language
).update(
polymorphic_ctype=proxy_ctype
) | Make sure that the MarkupItem model actually points
to the correct proxy model, that implements the given language. | Below is the the instruction that describes the task:
### Input:
Make sure that the MarkupItem model actually points
to the correct proxy model, that implements the given language.
### Response:
def _forwards(apps, schema_editor):
"""
Make sure that the MarkupItem model actually points
to the correct proxy model, that implements the given language.
"""
# Need to work on the actual models here.
from fluent_contents.plugins.markup.models import LANGUAGE_MODEL_CLASSES
from fluent_contents.plugins.markup.models import MarkupItem
from django.contrib.contenttypes.models import ContentType
ctype = ContentType.objects.get_for_model(MarkupItem)
for language, proxy_model in LANGUAGE_MODEL_CLASSES.items():
proxy_ctype = ContentType.objects.get_for_model(proxy_model, for_concrete_model=False)
MarkupItem.objects.filter(
polymorphic_ctype=ctype, language=language
).update(
polymorphic_ctype=proxy_ctype
) |
def write_scatterfunction(self, job, scattername):
'''
Writes out a python function for each WDL "scatter" object.
'''
scatter_outputs = self.fetch_scatter_outputs(job)
# write the function header
fn_section = self.write_scatterfunction_header(scattername)
# write the scatter definitions
fn_section += self.write_scatterfunction_lists(scatter_outputs)
# write
fn_section += self.write_scatterfunction_loop(job, scatter_outputs)
# write the outputs for the task to return
fn_section += self.write_scatterfunction_outputreturn(scatter_outputs)
return fn_section | Writes out a python function for each WDL "scatter" object. | Below is the the instruction that describes the task:
### Input:
Writes out a python function for each WDL "scatter" object.
### Response:
def write_scatterfunction(self, job, scattername):
'''
Writes out a python function for each WDL "scatter" object.
'''
scatter_outputs = self.fetch_scatter_outputs(job)
# write the function header
fn_section = self.write_scatterfunction_header(scattername)
# write the scatter definitions
fn_section += self.write_scatterfunction_lists(scatter_outputs)
# write
fn_section += self.write_scatterfunction_loop(job, scatter_outputs)
# write the outputs for the task to return
fn_section += self.write_scatterfunction_outputreturn(scatter_outputs)
return fn_section |
def swag_from(
specs=None, filetype=None, endpoint=None, methods=None,
validation=False, schema_id=None, data=None, definition=None,
validation_function=None, validation_error_handler=None):
"""
Takes a filename.yml, a dictionary or object and loads swagger specs.
:param specs: a filepath, a dictionary or an object
:param filetype: yml or yaml (json and py to be implemented)
:param endpoint: endpoint to build definition name
:param methods: method to build method based specs
:param validation: perform validation?
:param schema_id: Definition id ot name to use for validation
:param data: data to validate (default is request.json)
:param definition: alias to schema_id
:param validation_function:
custom validation function which takes the positional
arguments: data to be validated at first and schema to validate
against at second
:param validation_error_handler: custom function to handle
exceptions thrown when validating which takes the exception
thrown as the first, the data being validated as the second and
the schema being used to validate as the third argument
"""
def resolve_path(function, filepath):
if not filepath.startswith('/'):
if not hasattr(function, 'root_path'):
function.root_path = get_root_path(function)
res = os.path.join(function.root_path, filepath)
return res
return filepath
def set_from_filepath(function):
final_filepath = resolve_path(function, specs)
function.swag_type = filetype or specs.split('.')[-1]
if endpoint or methods:
if not hasattr(function, 'swag_paths'):
function.swag_paths = {}
if not endpoint and not methods:
function.swag_path = final_filepath
elif endpoint and methods:
for verb in methods:
key = "{}_{}".format(endpoint, verb.lower())
function.swag_paths[key] = final_filepath
elif endpoint and not methods:
function.swag_paths[endpoint] = final_filepath
elif methods and not endpoint:
for verb in methods:
function.swag_paths[verb.lower()] = final_filepath
def set_from_specs_dict(function):
function.specs_dict = specs
def decorator(function):
if isinstance(specs, string_types):
set_from_filepath(function)
# function must have or a single swag_path or a list of them
swag_path = getattr(function, 'swag_path', None)
swag_paths = getattr(function, 'swag_paths', None)
validate_args = {
'filepath': swag_path or swag_paths,
'root': getattr(function, 'root_path', None)
}
if isinstance(specs, dict):
set_from_specs_dict(function)
validate_args = {'specs': specs}
@wraps(function)
def wrapper(*args, **kwargs):
if validation is True:
validate(
data,
schema_id or definition,
validation_function=validation_function,
validation_error_handler=validation_error_handler,
**validate_args
)
return function(*args, **kwargs)
return wrapper
return decorator | Takes a filename.yml, a dictionary or object and loads swagger specs.
:param specs: a filepath, a dictionary or an object
:param filetype: yml or yaml (json and py to be implemented)
:param endpoint: endpoint to build definition name
:param methods: method to build method based specs
:param validation: perform validation?
:param schema_id: Definition id ot name to use for validation
:param data: data to validate (default is request.json)
:param definition: alias to schema_id
:param validation_function:
custom validation function which takes the positional
arguments: data to be validated at first and schema to validate
against at second
:param validation_error_handler: custom function to handle
exceptions thrown when validating which takes the exception
thrown as the first, the data being validated as the second and
the schema being used to validate as the third argument | Below is the the instruction that describes the task:
### Input:
Takes a filename.yml, a dictionary or object and loads swagger specs.
:param specs: a filepath, a dictionary or an object
:param filetype: yml or yaml (json and py to be implemented)
:param endpoint: endpoint to build definition name
:param methods: method to build method based specs
:param validation: perform validation?
:param schema_id: Definition id ot name to use for validation
:param data: data to validate (default is request.json)
:param definition: alias to schema_id
:param validation_function:
custom validation function which takes the positional
arguments: data to be validated at first and schema to validate
against at second
:param validation_error_handler: custom function to handle
exceptions thrown when validating which takes the exception
thrown as the first, the data being validated as the second and
the schema being used to validate as the third argument
### Response:
def swag_from(
specs=None, filetype=None, endpoint=None, methods=None,
validation=False, schema_id=None, data=None, definition=None,
validation_function=None, validation_error_handler=None):
"""
Takes a filename.yml, a dictionary or object and loads swagger specs.
:param specs: a filepath, a dictionary or an object
:param filetype: yml or yaml (json and py to be implemented)
:param endpoint: endpoint to build definition name
:param methods: method to build method based specs
:param validation: perform validation?
:param schema_id: Definition id ot name to use for validation
:param data: data to validate (default is request.json)
:param definition: alias to schema_id
:param validation_function:
custom validation function which takes the positional
arguments: data to be validated at first and schema to validate
against at second
:param validation_error_handler: custom function to handle
exceptions thrown when validating which takes the exception
thrown as the first, the data being validated as the second and
the schema being used to validate as the third argument
"""
def resolve_path(function, filepath):
if not filepath.startswith('/'):
if not hasattr(function, 'root_path'):
function.root_path = get_root_path(function)
res = os.path.join(function.root_path, filepath)
return res
return filepath
def set_from_filepath(function):
final_filepath = resolve_path(function, specs)
function.swag_type = filetype or specs.split('.')[-1]
if endpoint or methods:
if not hasattr(function, 'swag_paths'):
function.swag_paths = {}
if not endpoint and not methods:
function.swag_path = final_filepath
elif endpoint and methods:
for verb in methods:
key = "{}_{}".format(endpoint, verb.lower())
function.swag_paths[key] = final_filepath
elif endpoint and not methods:
function.swag_paths[endpoint] = final_filepath
elif methods and not endpoint:
for verb in methods:
function.swag_paths[verb.lower()] = final_filepath
def set_from_specs_dict(function):
function.specs_dict = specs
def decorator(function):
if isinstance(specs, string_types):
set_from_filepath(function)
# function must have or a single swag_path or a list of them
swag_path = getattr(function, 'swag_path', None)
swag_paths = getattr(function, 'swag_paths', None)
validate_args = {
'filepath': swag_path or swag_paths,
'root': getattr(function, 'root_path', None)
}
if isinstance(specs, dict):
set_from_specs_dict(function)
validate_args = {'specs': specs}
@wraps(function)
def wrapper(*args, **kwargs):
if validation is True:
validate(
data,
schema_id or definition,
validation_function=validation_function,
validation_error_handler=validation_error_handler,
**validate_args
)
return function(*args, **kwargs)
return wrapper
return decorator |
def tempfile(self, mode='wb', **args):
"write the contents of the file to a tempfile and return the tempfile filename"
tf = tempfile.NamedTemporaryFile(mode=mode)
self.write(tf.name, mode=mode, **args)
return tfn | write the contents of the file to a tempfile and return the tempfile filename | Below is the the instruction that describes the task:
### Input:
write the contents of the file to a tempfile and return the tempfile filename
### Response:
def tempfile(self, mode='wb', **args):
"write the contents of the file to a tempfile and return the tempfile filename"
tf = tempfile.NamedTemporaryFile(mode=mode)
self.write(tf.name, mode=mode, **args)
return tfn |
def lcm( *a ):
"""Least common multiple.
Usage: lcm( [ 3, 4, 5 ] )
or: lcm( 3, 4, 5 )
"""
if len( a ) > 1: return reduce( lcm2, a )
if hasattr( a[0], "__iter__" ): return reduce( lcm2, a[0] )
return a[0] | Least common multiple.
Usage: lcm( [ 3, 4, 5 ] )
or: lcm( 3, 4, 5 ) | Below is the the instruction that describes the task:
### Input:
Least common multiple.
Usage: lcm( [ 3, 4, 5 ] )
or: lcm( 3, 4, 5 )
### Response:
def lcm( *a ):
"""Least common multiple.
Usage: lcm( [ 3, 4, 5 ] )
or: lcm( 3, 4, 5 )
"""
if len( a ) > 1: return reduce( lcm2, a )
if hasattr( a[0], "__iter__" ): return reduce( lcm2, a[0] )
return a[0] |
def _encode_multipart(**kw):
' build a multipart/form-data body with randomly generated boundary '
boundary = '----------%s' % hex(int(time.time() * 1000))
data = []
for k, v in kw.iteritems():
data.append('--%s' % boundary)
if hasattr(v, 'read'):
# file-like object:
filename = getattr(v, 'name', '')
content = v.read()
data.append('Content-Disposition: form-data; name="%s"; filename="hidden"' % k)
data.append('Content-Length: %d' % len(content))
data.append('Content-Type: %s\r\n' % _guess_content_type(filename))
data.append(content)
else:
data.append('Content-Disposition: form-data; name="%s"\r\n' % k)
data.append(v.encode('utf-8') if isinstance(v, unicode) else v)
data.append('--%s--\r\n' % boundary)
return '\r\n'.join(data), boundary | build a multipart/form-data body with randomly generated boundary | Below is the the instruction that describes the task:
### Input:
build a multipart/form-data body with randomly generated boundary
### Response:
def _encode_multipart(**kw):
' build a multipart/form-data body with randomly generated boundary '
boundary = '----------%s' % hex(int(time.time() * 1000))
data = []
for k, v in kw.iteritems():
data.append('--%s' % boundary)
if hasattr(v, 'read'):
# file-like object:
filename = getattr(v, 'name', '')
content = v.read()
data.append('Content-Disposition: form-data; name="%s"; filename="hidden"' % k)
data.append('Content-Length: %d' % len(content))
data.append('Content-Type: %s\r\n' % _guess_content_type(filename))
data.append(content)
else:
data.append('Content-Disposition: form-data; name="%s"\r\n' % k)
data.append(v.encode('utf-8') if isinstance(v, unicode) else v)
data.append('--%s--\r\n' % boundary)
return '\r\n'.join(data), boundary |
def _set_boolean_property(self, propname, value):
"""
Generalized setter for boolean properties on the ``<a:tblPr>`` child
element, setting *propname* attribute appropriately based on *value*.
If *value* is True, the attribute is set to "1"; a tblPr child
element is added if necessary. If *value* is False, the *propname*
attribute is removed if present, allowing its default value of False
to be its effective value.
"""
if value not in (True, False):
raise ValueError(
"assigned value must be either True or False, got %s" %
value
)
tblPr = self.get_or_add_tblPr()
setattr(tblPr, propname, value) | Generalized setter for boolean properties on the ``<a:tblPr>`` child
element, setting *propname* attribute appropriately based on *value*.
If *value* is True, the attribute is set to "1"; a tblPr child
element is added if necessary. If *value* is False, the *propname*
attribute is removed if present, allowing its default value of False
to be its effective value. | Below is the the instruction that describes the task:
### Input:
Generalized setter for boolean properties on the ``<a:tblPr>`` child
element, setting *propname* attribute appropriately based on *value*.
If *value* is True, the attribute is set to "1"; a tblPr child
element is added if necessary. If *value* is False, the *propname*
attribute is removed if present, allowing its default value of False
to be its effective value.
### Response:
def _set_boolean_property(self, propname, value):
"""
Generalized setter for boolean properties on the ``<a:tblPr>`` child
element, setting *propname* attribute appropriately based on *value*.
If *value* is True, the attribute is set to "1"; a tblPr child
element is added if necessary. If *value* is False, the *propname*
attribute is removed if present, allowing its default value of False
to be its effective value.
"""
if value not in (True, False):
raise ValueError(
"assigned value must be either True or False, got %s" %
value
)
tblPr = self.get_or_add_tblPr()
setattr(tblPr, propname, value) |
def subdivide(self, face_index=None):
"""
Subdivide a mesh, with each subdivided face replaced with four
smaller faces.
Parameters
----------
face_index: (m,) int or None
If None all faces of mesh will be subdivided
If (m,) int array of indices: only specified faces will be
subdivided. Note that in this case the mesh will generally
no longer be manifold, as the additional vertex on the midpoint
will not be used by the adjacent faces to the faces specified,
and an additional postprocessing step will be required to
make resulting mesh watertight
"""
vertices, faces = remesh.subdivide(vertices=self.vertices,
faces=self.faces,
face_index=face_index)
return Trimesh(vertices=vertices, faces=faces) | Subdivide a mesh, with each subdivided face replaced with four
smaller faces.
Parameters
----------
face_index: (m,) int or None
If None all faces of mesh will be subdivided
If (m,) int array of indices: only specified faces will be
subdivided. Note that in this case the mesh will generally
no longer be manifold, as the additional vertex on the midpoint
will not be used by the adjacent faces to the faces specified,
and an additional postprocessing step will be required to
make resulting mesh watertight | Below is the the instruction that describes the task:
### Input:
Subdivide a mesh, with each subdivided face replaced with four
smaller faces.
Parameters
----------
face_index: (m,) int or None
If None all faces of mesh will be subdivided
If (m,) int array of indices: only specified faces will be
subdivided. Note that in this case the mesh will generally
no longer be manifold, as the additional vertex on the midpoint
will not be used by the adjacent faces to the faces specified,
and an additional postprocessing step will be required to
make resulting mesh watertight
### Response:
def subdivide(self, face_index=None):
"""
Subdivide a mesh, with each subdivided face replaced with four
smaller faces.
Parameters
----------
face_index: (m,) int or None
If None all faces of mesh will be subdivided
If (m,) int array of indices: only specified faces will be
subdivided. Note that in this case the mesh will generally
no longer be manifold, as the additional vertex on the midpoint
will not be used by the adjacent faces to the faces specified,
and an additional postprocessing step will be required to
make resulting mesh watertight
"""
vertices, faces = remesh.subdivide(vertices=self.vertices,
faces=self.faces,
face_index=face_index)
return Trimesh(vertices=vertices, faces=faces) |
def stop_to_stop_networks_by_type(gtfs):
"""
Compute stop-to-stop networks for all travel modes (route_types).
Parameters
----------
gtfs: gtfspy.GTFS
Returns
-------
dict: dict[int, networkx.DiGraph]
keys should be one of route_types.ALL_ROUTE_TYPES (i.e. GTFS route_types)
"""
route_type_to_network = dict()
for route_type in route_types.ALL_ROUTE_TYPES:
if route_type == route_types.WALK:
net = walk_transfer_stop_to_stop_network(gtfs)
else:
net = stop_to_stop_network_for_route_type(gtfs, route_type)
route_type_to_network[route_type] = net
assert len(route_type_to_network) == len(route_types.ALL_ROUTE_TYPES)
return route_type_to_network | Compute stop-to-stop networks for all travel modes (route_types).
Parameters
----------
gtfs: gtfspy.GTFS
Returns
-------
dict: dict[int, networkx.DiGraph]
keys should be one of route_types.ALL_ROUTE_TYPES (i.e. GTFS route_types) | Below is the the instruction that describes the task:
### Input:
Compute stop-to-stop networks for all travel modes (route_types).
Parameters
----------
gtfs: gtfspy.GTFS
Returns
-------
dict: dict[int, networkx.DiGraph]
keys should be one of route_types.ALL_ROUTE_TYPES (i.e. GTFS route_types)
### Response:
def stop_to_stop_networks_by_type(gtfs):
"""
Compute stop-to-stop networks for all travel modes (route_types).
Parameters
----------
gtfs: gtfspy.GTFS
Returns
-------
dict: dict[int, networkx.DiGraph]
keys should be one of route_types.ALL_ROUTE_TYPES (i.e. GTFS route_types)
"""
route_type_to_network = dict()
for route_type in route_types.ALL_ROUTE_TYPES:
if route_type == route_types.WALK:
net = walk_transfer_stop_to_stop_network(gtfs)
else:
net = stop_to_stop_network_for_route_type(gtfs, route_type)
route_type_to_network[route_type] = net
assert len(route_type_to_network) == len(route_types.ALL_ROUTE_TYPES)
return route_type_to_network |
def get_confirmed_blockhash(self):
""" Gets the block CONFIRMATION_BLOCKS in the past and returns its block hash """
confirmed_block_number = self.web3.eth.blockNumber - self.default_block_num_confirmations
if confirmed_block_number < 0:
confirmed_block_number = 0
return self.blockhash_from_blocknumber(confirmed_block_number) | Gets the block CONFIRMATION_BLOCKS in the past and returns its block hash | Below is the the instruction that describes the task:
### Input:
Gets the block CONFIRMATION_BLOCKS in the past and returns its block hash
### Response:
def get_confirmed_blockhash(self):
""" Gets the block CONFIRMATION_BLOCKS in the past and returns its block hash """
confirmed_block_number = self.web3.eth.blockNumber - self.default_block_num_confirmations
if confirmed_block_number < 0:
confirmed_block_number = 0
return self.blockhash_from_blocknumber(confirmed_block_number) |
def make_frame(self, frame, birthframe, startframe, stopframe, deathframe, noiseframe=None):
"""
animation happens between startframe and stopframe
the value is None before aliveframe, and after deathframe
* if aliveframe is not specified it defaults to startframe
* if deathframe is not specified it defaults to stopframe
initial value is held from aliveframe to startframe
final value is held from stopfrome to deathframe
"""
if birthframe is None:
birthframe = startframe
if deathframe is None:
deathframe = stopframe
if frame < birthframe:
return None
if frame > deathframe:
return None
if frame < startframe:
return self.frm
if frame > stopframe:
return self.to
t = self.T.tween2(frame, startframe, stopframe)
newval = Mapping.linlin(t, 0, 1, self.frm, self.to)
if self.noise_fn is not None:
if noiseframe is not None:
nf = noiseframe
else:
nf = t
noise_val = self.noise_fn(newval, nf)
else:
noise_val = 0
return newval + noise_val | animation happens between startframe and stopframe
the value is None before aliveframe, and after deathframe
* if aliveframe is not specified it defaults to startframe
* if deathframe is not specified it defaults to stopframe
initial value is held from aliveframe to startframe
final value is held from stopfrome to deathframe | Below is the the instruction that describes the task:
### Input:
animation happens between startframe and stopframe
the value is None before aliveframe, and after deathframe
* if aliveframe is not specified it defaults to startframe
* if deathframe is not specified it defaults to stopframe
initial value is held from aliveframe to startframe
final value is held from stopfrome to deathframe
### Response:
def make_frame(self, frame, birthframe, startframe, stopframe, deathframe, noiseframe=None):
"""
animation happens between startframe and stopframe
the value is None before aliveframe, and after deathframe
* if aliveframe is not specified it defaults to startframe
* if deathframe is not specified it defaults to stopframe
initial value is held from aliveframe to startframe
final value is held from stopfrome to deathframe
"""
if birthframe is None:
birthframe = startframe
if deathframe is None:
deathframe = stopframe
if frame < birthframe:
return None
if frame > deathframe:
return None
if frame < startframe:
return self.frm
if frame > stopframe:
return self.to
t = self.T.tween2(frame, startframe, stopframe)
newval = Mapping.linlin(t, 0, 1, self.frm, self.to)
if self.noise_fn is not None:
if noiseframe is not None:
nf = noiseframe
else:
nf = t
noise_val = self.noise_fn(newval, nf)
else:
noise_val = 0
return newval + noise_val |
def get_section(file_name, section, separator='='):
'''
Retrieve a section from an ini file. Returns the section as dictionary. If
the section is not found, an empty dictionary is returned.
API Example:
.. code-block:: python
import salt
sc = salt.client.get_local_client()
sc.cmd('target', 'ini.get_section',
[path_to_ini_file, section_name])
CLI Example:
.. code-block:: bash
salt '*' ini.get_section /path/to/ini section_name
'''
inifile = _Ini.get_ini_file(file_name, separator=separator)
ret = {}
for key, value in six.iteritems(inifile.get(section, {})):
if key[0] != '#':
ret.update({key: value})
return ret | Retrieve a section from an ini file. Returns the section as dictionary. If
the section is not found, an empty dictionary is returned.
API Example:
.. code-block:: python
import salt
sc = salt.client.get_local_client()
sc.cmd('target', 'ini.get_section',
[path_to_ini_file, section_name])
CLI Example:
.. code-block:: bash
salt '*' ini.get_section /path/to/ini section_name | Below is the the instruction that describes the task:
### Input:
Retrieve a section from an ini file. Returns the section as dictionary. If
the section is not found, an empty dictionary is returned.
API Example:
.. code-block:: python
import salt
sc = salt.client.get_local_client()
sc.cmd('target', 'ini.get_section',
[path_to_ini_file, section_name])
CLI Example:
.. code-block:: bash
salt '*' ini.get_section /path/to/ini section_name
### Response:
def get_section(file_name, section, separator='='):
'''
Retrieve a section from an ini file. Returns the section as dictionary. If
the section is not found, an empty dictionary is returned.
API Example:
.. code-block:: python
import salt
sc = salt.client.get_local_client()
sc.cmd('target', 'ini.get_section',
[path_to_ini_file, section_name])
CLI Example:
.. code-block:: bash
salt '*' ini.get_section /path/to/ini section_name
'''
inifile = _Ini.get_ini_file(file_name, separator=separator)
ret = {}
for key, value in six.iteritems(inifile.get(section, {})):
if key[0] != '#':
ret.update({key: value})
return ret |
def _set_process(self, v, load=False):
"""
Setter method for process, mapped from YANG variable /rbridge_id/resource_monitor/process (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_process is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_process() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=process.process, is_container='container', presence=False, yang_name="process", rest_name="process", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Process based resource monitor', u'callpoint': u'rmconfig', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """process must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=process.process, is_container='container', presence=False, yang_name="process", rest_name="process", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Process based resource monitor', u'callpoint': u'rmconfig', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='container', is_config=True)""",
})
self.__process = t
if hasattr(self, '_set'):
self._set() | Setter method for process, mapped from YANG variable /rbridge_id/resource_monitor/process (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_process is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_process() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for process, mapped from YANG variable /rbridge_id/resource_monitor/process (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_process is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_process() directly.
### Response:
def _set_process(self, v, load=False):
"""
Setter method for process, mapped from YANG variable /rbridge_id/resource_monitor/process (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_process is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_process() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=process.process, is_container='container', presence=False, yang_name="process", rest_name="process", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Process based resource monitor', u'callpoint': u'rmconfig', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """process must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=process.process, is_container='container', presence=False, yang_name="process", rest_name="process", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Process based resource monitor', u'callpoint': u'rmconfig', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='container', is_config=True)""",
})
self.__process = t
if hasattr(self, '_set'):
self._set() |
def __is_valid_pos(pos_tuple, valid_pos):
# type: (Tuple[text_type,...],List[Tuple[text_type,...]])->bool
"""This function checks token's pos is with in POS set that user specified.
If token meets all conditions, Return True; else return False
"""
def is_valid_pos(valid_pos_tuple):
# type: (Tuple[text_type,...])->bool
length_valid_pos_tuple = len(valid_pos_tuple)
if valid_pos_tuple == pos_tuple[:length_valid_pos_tuple]:
return True
else:
return False
seq_bool_flags = [is_valid_pos(valid_pos_tuple) for valid_pos_tuple in valid_pos]
if True in set(seq_bool_flags):
return True
else:
return False | This function checks token's pos is with in POS set that user specified.
If token meets all conditions, Return True; else return False | Below is the the instruction that describes the task:
### Input:
This function checks token's pos is with in POS set that user specified.
If token meets all conditions, Return True; else return False
### Response:
def __is_valid_pos(pos_tuple, valid_pos):
# type: (Tuple[text_type,...],List[Tuple[text_type,...]])->bool
"""This function checks token's pos is with in POS set that user specified.
If token meets all conditions, Return True; else return False
"""
def is_valid_pos(valid_pos_tuple):
# type: (Tuple[text_type,...])->bool
length_valid_pos_tuple = len(valid_pos_tuple)
if valid_pos_tuple == pos_tuple[:length_valid_pos_tuple]:
return True
else:
return False
seq_bool_flags = [is_valid_pos(valid_pos_tuple) for valid_pos_tuple in valid_pos]
if True in set(seq_bool_flags):
return True
else:
return False |
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(CombineStorage, self).fix_config(options)
opt = "format"
if opt not in options:
options[opt] = ""
if opt not in self.help:
self.help[opt] = "The format to use for generating the combined string; use '@{blah}' for accessing "\
"storage item 'blah' (string)."
return options | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
### Response:
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(CombineStorage, self).fix_config(options)
opt = "format"
if opt not in options:
options[opt] = ""
if opt not in self.help:
self.help[opt] = "The format to use for generating the combined string; use '@{blah}' for accessing "\
"storage item 'blah' (string)."
return options |
def error(msg, log_file=None):
"""Print, output error message and raise RuntimeError."""
UtilClass.print_msg(msg + os.linesep)
if log_file is not None:
UtilClass.writelog(log_file, msg, 'append')
raise RuntimeError(msg) | Print, output error message and raise RuntimeError. | Below is the the instruction that describes the task:
### Input:
Print, output error message and raise RuntimeError.
### Response:
def error(msg, log_file=None):
"""Print, output error message and raise RuntimeError."""
UtilClass.print_msg(msg + os.linesep)
if log_file is not None:
UtilClass.writelog(log_file, msg, 'append')
raise RuntimeError(msg) |
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py]) | Try to restore a broken virtualenv by reinstalling the same python version on top of it | Below is the the instruction that describes the task:
### Input:
Try to restore a broken virtualenv by reinstalling the same python version on top of it
### Response:
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py]) |
def compile_path(self):
"""Returns the file path to this module taking the pre-processing into account.
If the module requires pre-processing, the extension is reported as F90; otherwise,
the regular self.filepath is returned.
"""
if not self.precompile:
return self.filepath
else:
if self._compile_path is None:
segs = self.filepath.split('.')
segs.pop()
segs.append("F90")
self._compile_path = '.'.join(segs)
return self._compile_path | Returns the file path to this module taking the pre-processing into account.
If the module requires pre-processing, the extension is reported as F90; otherwise,
the regular self.filepath is returned. | Below is the the instruction that describes the task:
### Input:
Returns the file path to this module taking the pre-processing into account.
If the module requires pre-processing, the extension is reported as F90; otherwise,
the regular self.filepath is returned.
### Response:
def compile_path(self):
"""Returns the file path to this module taking the pre-processing into account.
If the module requires pre-processing, the extension is reported as F90; otherwise,
the regular self.filepath is returned.
"""
if not self.precompile:
return self.filepath
else:
if self._compile_path is None:
segs = self.filepath.split('.')
segs.pop()
segs.append("F90")
self._compile_path = '.'.join(segs)
return self._compile_path |
def set_time(self, value: float):
"""
Set the current time. This can be used to jump in the timeline.
Args:
value (float): The new time
"""
if value < 0:
value = 0
self.offset += self.get_time() - value | Set the current time. This can be used to jump in the timeline.
Args:
value (float): The new time | Below is the the instruction that describes the task:
### Input:
Set the current time. This can be used to jump in the timeline.
Args:
value (float): The new time
### Response:
def set_time(self, value: float):
"""
Set the current time. This can be used to jump in the timeline.
Args:
value (float): The new time
"""
if value < 0:
value = 0
self.offset += self.get_time() - value |
def display_missing(self, data, return_bool="any"):
""" ???
Parameters
----------
data : pd.DataFrame()
Input dataframe.
return_bool : bool
???
Returns
-------
pd.DataFrame()
???
"""
if return_bool == "any":
bool_sel = self._find_missing(data, return_bool="any")
elif return_bool == "all":
bool_sel = self._find_missing(data, return_bool="all")
return data[bool_sel] | ???
Parameters
----------
data : pd.DataFrame()
Input dataframe.
return_bool : bool
???
Returns
-------
pd.DataFrame()
??? | Below is the the instruction that describes the task:
### Input:
???
Parameters
----------
data : pd.DataFrame()
Input dataframe.
return_bool : bool
???
Returns
-------
pd.DataFrame()
???
### Response:
def display_missing(self, data, return_bool="any"):
""" ???
Parameters
----------
data : pd.DataFrame()
Input dataframe.
return_bool : bool
???
Returns
-------
pd.DataFrame()
???
"""
if return_bool == "any":
bool_sel = self._find_missing(data, return_bool="any")
elif return_bool == "all":
bool_sel = self._find_missing(data, return_bool="all")
return data[bool_sel] |
def validate(obj, schema):
"""
Validate an object against a schema
Args:
obj (dict):
schema (dict):
"""
if isinstance(obj, str):
obj = json.loads(obj)
return JsonValidator(schema)._validate(obj) | Validate an object against a schema
Args:
obj (dict):
schema (dict): | Below is the the instruction that describes the task:
### Input:
Validate an object against a schema
Args:
obj (dict):
schema (dict):
### Response:
def validate(obj, schema):
"""
Validate an object against a schema
Args:
obj (dict):
schema (dict):
"""
if isinstance(obj, str):
obj = json.loads(obj)
return JsonValidator(schema)._validate(obj) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.