code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding | Given a document, tries to detect its XML encoding. | Below is the the instruction that describes the task:
### Input:
Given a document, tries to detect its XML encoding.
### Response:
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding |
def ad_hoc_magic_from_file(filename, **kwargs):
"""Ad-hoc emulation of magic.from_file from python-magic."""
with open(filename, 'rb') as stream:
head = stream.read(16)
if head[:4] == b'\x7fELF':
return b'application/x-executable'
elif head[:2] == b'MZ':
return b'application/x-dosexec'
else:
raise NotImplementedError() | Ad-hoc emulation of magic.from_file from python-magic. | Below is the the instruction that describes the task:
### Input:
Ad-hoc emulation of magic.from_file from python-magic.
### Response:
def ad_hoc_magic_from_file(filename, **kwargs):
"""Ad-hoc emulation of magic.from_file from python-magic."""
with open(filename, 'rb') as stream:
head = stream.read(16)
if head[:4] == b'\x7fELF':
return b'application/x-executable'
elif head[:2] == b'MZ':
return b'application/x-dosexec'
else:
raise NotImplementedError() |
def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.', no_check:bool=False, bs=64, val_bs:int=None,
num_workers:int=0, device:torch.device=None, collate_fn:Callable=data_collate,
dl_tfms:Optional[Collection[Callable]]=None, bptt:int=70, backwards:bool=False, **dl_kwargs) -> DataBunch:
"Create a `TextDataBunch` in `path` from the `datasets` for language modelling. Passes `**dl_kwargs` on to `DataLoader()`"
datasets = cls._init_ds(train_ds, valid_ds, test_ds)
val_bs = ifnone(val_bs, bs)
datasets = [LanguageModelPreLoader(ds, shuffle=(i==0), bs=(bs if i==0 else val_bs), bptt=bptt, backwards=backwards)
for i,ds in enumerate(datasets)]
val_bs = bs
dls = [DataLoader(d, b, shuffle=False, **dl_kwargs) for d,b in zip(datasets, (bs,val_bs,val_bs,val_bs)) if d is not None]
return cls(*dls, path=path, device=device, dl_tfms=dl_tfms, collate_fn=collate_fn, no_check=no_check) | Create a `TextDataBunch` in `path` from the `datasets` for language modelling. Passes `**dl_kwargs` on to `DataLoader()` | Below is the the instruction that describes the task:
### Input:
Create a `TextDataBunch` in `path` from the `datasets` for language modelling. Passes `**dl_kwargs` on to `DataLoader()`
### Response:
def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.', no_check:bool=False, bs=64, val_bs:int=None,
num_workers:int=0, device:torch.device=None, collate_fn:Callable=data_collate,
dl_tfms:Optional[Collection[Callable]]=None, bptt:int=70, backwards:bool=False, **dl_kwargs) -> DataBunch:
"Create a `TextDataBunch` in `path` from the `datasets` for language modelling. Passes `**dl_kwargs` on to `DataLoader()`"
datasets = cls._init_ds(train_ds, valid_ds, test_ds)
val_bs = ifnone(val_bs, bs)
datasets = [LanguageModelPreLoader(ds, shuffle=(i==0), bs=(bs if i==0 else val_bs), bptt=bptt, backwards=backwards)
for i,ds in enumerate(datasets)]
val_bs = bs
dls = [DataLoader(d, b, shuffle=False, **dl_kwargs) for d,b in zip(datasets, (bs,val_bs,val_bs,val_bs)) if d is not None]
return cls(*dls, path=path, device=device, dl_tfms=dl_tfms, collate_fn=collate_fn, no_check=no_check) |
def getlist(self, section, option, raw=False, vars=None, fallback=[], delimiters=','):
"""
A convenience method which coerces the option in the specified section to a list of strings.
"""
v = self.get(section, option, raw=raw, vars=vars, fallback=fallback)
return self._convert_to_list(v, delimiters=delimiters) | A convenience method which coerces the option in the specified section to a list of strings. | Below is the the instruction that describes the task:
### Input:
A convenience method which coerces the option in the specified section to a list of strings.
### Response:
def getlist(self, section, option, raw=False, vars=None, fallback=[], delimiters=','):
"""
A convenience method which coerces the option in the specified section to a list of strings.
"""
v = self.get(section, option, raw=raw, vars=vars, fallback=fallback)
return self._convert_to_list(v, delimiters=delimiters) |
def _next_rotation_id(rotated_files):
"""Given the hanoi_rotator generated files in the output directory,
returns the rotation_id that will be given to the current file. If there
are no existing rotated files, return 0.
"""
if not rotated_files:
return 0
else:
highest_rotated_file = max(rotated_files, key=lambda x: x[1])
return highest_rotated_file[1] + 1 | Given the hanoi_rotator generated files in the output directory,
returns the rotation_id that will be given to the current file. If there
are no existing rotated files, return 0. | Below is the the instruction that describes the task:
### Input:
Given the hanoi_rotator generated files in the output directory,
returns the rotation_id that will be given to the current file. If there
are no existing rotated files, return 0.
### Response:
def _next_rotation_id(rotated_files):
"""Given the hanoi_rotator generated files in the output directory,
returns the rotation_id that will be given to the current file. If there
are no existing rotated files, return 0.
"""
if not rotated_files:
return 0
else:
highest_rotated_file = max(rotated_files, key=lambda x: x[1])
return highest_rotated_file[1] + 1 |
def thermal_conductivity_Magomedov(T, P, ws, CASRNs, k_w=None):
r'''Calculate the thermal conductivity of an aqueous mixture of
electrolytes using the form proposed by Magomedov [1]_.
Parameters are loaded by the function as needed. Function will fail if an
electrolyte is not in the database.
.. math::
\lambda = \lambda_w\left[ 1 - \sum_{i=1}^n A_i (w_i + 2\times10^{-4}
w_i^3)\right] - 2\times10^{-8} PT\sum_{i=1}^n w_i
Parameters
----------
T : float
Temperature of liquid [K]
P : float
Pressure of the liquid [Pa]
ws : array
Weight fractions of liquid components other than water
CASRNs : array
CAS numbers of the liquid components other than water
k_w : float
Liquid thermal condiuctivity or pure water at T and P, [W/m/K]
Returns
-------
kl : float
Liquid thermal condiuctivity, [W/m/K]
Notes
-----
Range from 273 K to 473 K, P from 0.1 MPa to 100 MPa. C from 0 to 25 mass%.
Internal untis are MPa for pressure and weight percent.
An example is sought for this function. It is not possible to reproduce
the author's values consistently.
Examples
--------
>>> thermal_conductivity_Magomedov(293., 1E6, [.25], ['7758-94-3'], k_w=0.59827)
0.548654049375
References
----------
.. [1] Magomedov, U. B. "The Thermal Conductivity of Binary and
Multicomponent Aqueous Solutions of Inorganic Substances at High
Parameters of State." High Temperature 39, no. 2 (March 1, 2001):
221-26. doi:10.1023/A:1017518731726.
'''
P = P/1E6
ws = [i*100 for i in ws]
if not k_w:
raise Exception('k_w correlation must be provided')
sum1 = 0
for i, CASRN in enumerate(CASRNs):
Ai = float(Magomedovk_thermal_cond.at[CASRN, 'Ai'])
sum1 += Ai*(ws[i] + 2E-4*ws[i]**3)
return k_w*(1 - sum1) - 2E-8*P*T*sum(ws) | r'''Calculate the thermal conductivity of an aqueous mixture of
electrolytes using the form proposed by Magomedov [1]_.
Parameters are loaded by the function as needed. Function will fail if an
electrolyte is not in the database.
.. math::
\lambda = \lambda_w\left[ 1 - \sum_{i=1}^n A_i (w_i + 2\times10^{-4}
w_i^3)\right] - 2\times10^{-8} PT\sum_{i=1}^n w_i
Parameters
----------
T : float
Temperature of liquid [K]
P : float
Pressure of the liquid [Pa]
ws : array
Weight fractions of liquid components other than water
CASRNs : array
CAS numbers of the liquid components other than water
k_w : float
Liquid thermal condiuctivity or pure water at T and P, [W/m/K]
Returns
-------
kl : float
Liquid thermal condiuctivity, [W/m/K]
Notes
-----
Range from 273 K to 473 K, P from 0.1 MPa to 100 MPa. C from 0 to 25 mass%.
Internal untis are MPa for pressure and weight percent.
An example is sought for this function. It is not possible to reproduce
the author's values consistently.
Examples
--------
>>> thermal_conductivity_Magomedov(293., 1E6, [.25], ['7758-94-3'], k_w=0.59827)
0.548654049375
References
----------
.. [1] Magomedov, U. B. "The Thermal Conductivity of Binary and
Multicomponent Aqueous Solutions of Inorganic Substances at High
Parameters of State." High Temperature 39, no. 2 (March 1, 2001):
221-26. doi:10.1023/A:1017518731726. | Below is the the instruction that describes the task:
### Input:
r'''Calculate the thermal conductivity of an aqueous mixture of
electrolytes using the form proposed by Magomedov [1]_.
Parameters are loaded by the function as needed. Function will fail if an
electrolyte is not in the database.
.. math::
\lambda = \lambda_w\left[ 1 - \sum_{i=1}^n A_i (w_i + 2\times10^{-4}
w_i^3)\right] - 2\times10^{-8} PT\sum_{i=1}^n w_i
Parameters
----------
T : float
Temperature of liquid [K]
P : float
Pressure of the liquid [Pa]
ws : array
Weight fractions of liquid components other than water
CASRNs : array
CAS numbers of the liquid components other than water
k_w : float
Liquid thermal condiuctivity or pure water at T and P, [W/m/K]
Returns
-------
kl : float
Liquid thermal condiuctivity, [W/m/K]
Notes
-----
Range from 273 K to 473 K, P from 0.1 MPa to 100 MPa. C from 0 to 25 mass%.
Internal untis are MPa for pressure and weight percent.
An example is sought for this function. It is not possible to reproduce
the author's values consistently.
Examples
--------
>>> thermal_conductivity_Magomedov(293., 1E6, [.25], ['7758-94-3'], k_w=0.59827)
0.548654049375
References
----------
.. [1] Magomedov, U. B. "The Thermal Conductivity of Binary and
Multicomponent Aqueous Solutions of Inorganic Substances at High
Parameters of State." High Temperature 39, no. 2 (March 1, 2001):
221-26. doi:10.1023/A:1017518731726.
### Response:
def thermal_conductivity_Magomedov(T, P, ws, CASRNs, k_w=None):
r'''Calculate the thermal conductivity of an aqueous mixture of
electrolytes using the form proposed by Magomedov [1]_.
Parameters are loaded by the function as needed. Function will fail if an
electrolyte is not in the database.
.. math::
\lambda = \lambda_w\left[ 1 - \sum_{i=1}^n A_i (w_i + 2\times10^{-4}
w_i^3)\right] - 2\times10^{-8} PT\sum_{i=1}^n w_i
Parameters
----------
T : float
Temperature of liquid [K]
P : float
Pressure of the liquid [Pa]
ws : array
Weight fractions of liquid components other than water
CASRNs : array
CAS numbers of the liquid components other than water
k_w : float
Liquid thermal condiuctivity or pure water at T and P, [W/m/K]
Returns
-------
kl : float
Liquid thermal condiuctivity, [W/m/K]
Notes
-----
Range from 273 K to 473 K, P from 0.1 MPa to 100 MPa. C from 0 to 25 mass%.
Internal untis are MPa for pressure and weight percent.
An example is sought for this function. It is not possible to reproduce
the author's values consistently.
Examples
--------
>>> thermal_conductivity_Magomedov(293., 1E6, [.25], ['7758-94-3'], k_w=0.59827)
0.548654049375
References
----------
.. [1] Magomedov, U. B. "The Thermal Conductivity of Binary and
Multicomponent Aqueous Solutions of Inorganic Substances at High
Parameters of State." High Temperature 39, no. 2 (March 1, 2001):
221-26. doi:10.1023/A:1017518731726.
'''
P = P/1E6
ws = [i*100 for i in ws]
if not k_w:
raise Exception('k_w correlation must be provided')
sum1 = 0
for i, CASRN in enumerate(CASRNs):
Ai = float(Magomedovk_thermal_cond.at[CASRN, 'Ai'])
sum1 += Ai*(ws[i] + 2E-4*ws[i]**3)
return k_w*(1 - sum1) - 2E-8*P*T*sum(ws) |
def predict_local(self, X, batch_size = -1):
"""
:param X: X can be a ndarray or list of ndarray if the model has multiple inputs.
The first dimension of X should be batch.
:param batch_size: total batch size of prediction.
:return: a ndarray as the prediction result.
"""
jresults = callBigDlFunc(self.bigdl_type,
"predictLocal",
self.value,
self._to_jtensors(X),
batch_size)
return np.stack([j.to_ndarray()for j in jresults]) | :param X: X can be a ndarray or list of ndarray if the model has multiple inputs.
The first dimension of X should be batch.
:param batch_size: total batch size of prediction.
:return: a ndarray as the prediction result. | Below is the the instruction that describes the task:
### Input:
:param X: X can be a ndarray or list of ndarray if the model has multiple inputs.
The first dimension of X should be batch.
:param batch_size: total batch size of prediction.
:return: a ndarray as the prediction result.
### Response:
def predict_local(self, X, batch_size = -1):
"""
:param X: X can be a ndarray or list of ndarray if the model has multiple inputs.
The first dimension of X should be batch.
:param batch_size: total batch size of prediction.
:return: a ndarray as the prediction result.
"""
jresults = callBigDlFunc(self.bigdl_type,
"predictLocal",
self.value,
self._to_jtensors(X),
batch_size)
return np.stack([j.to_ndarray()for j in jresults]) |
def constq_grun(v, v0, gamma0, q):
"""
calculate Gruneisen parameter for constant q
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param q: logarithmic derivative of Grunseinen parameter
:return: Gruneisen parameter at a given volume
"""
x = v / v0
return gamma0 * np.power(x, q) | calculate Gruneisen parameter for constant q
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param q: logarithmic derivative of Grunseinen parameter
:return: Gruneisen parameter at a given volume | Below is the the instruction that describes the task:
### Input:
calculate Gruneisen parameter for constant q
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param q: logarithmic derivative of Grunseinen parameter
:return: Gruneisen parameter at a given volume
### Response:
def constq_grun(v, v0, gamma0, q):
"""
calculate Gruneisen parameter for constant q
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param q: logarithmic derivative of Grunseinen parameter
:return: Gruneisen parameter at a given volume
"""
x = v / v0
return gamma0 * np.power(x, q) |
def SendToExecSocket(self, code, tid=None):
"""Inject python code into exec socket."""
response = self._SendToExecSocketRaw(json.dumps(code), tid)
return json.loads(response) | Inject python code into exec socket. | Below is the the instruction that describes the task:
### Input:
Inject python code into exec socket.
### Response:
def SendToExecSocket(self, code, tid=None):
"""Inject python code into exec socket."""
response = self._SendToExecSocketRaw(json.dumps(code), tid)
return json.loads(response) |
def validate(instance, schema, cls=None, *args, **kwargs):
"""
Validate an instance under the given schema.
>>> validate([2, 3, 4], {"maxItems" : 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
:func:`validate` will first verify that the provided schema is itself
valid, since not doing so can lead to less obvious error messages and fail
in less obvious or consistent ways. If you know you have a valid schema
already or don't care, you might prefer using the
:meth:`~IValidator.validate` method directly on a specific validator
(e.g. :meth:`Draft4Validator.validate`).
:argument instance: the instance to validate
:argument schema: the schema to validate with
:argument cls: an :class:`IValidator` class that will be used to validate
the instance.
If the ``cls`` argument is not provided, two things will happen in
accordance with the specification. First, if the schema has a
:validator:`$schema` property containing a known meta-schema [#]_ then the
proper validator will be used. The specification recommends that all
schemas contain :validator:`$schema` properties for this reason. If no
:validator:`$schema` property is found, the default validator class is
:class:`Draft4Validator`.
Any other provided positional and keyword arguments will be passed on when
instantiating the ``cls``.
:raises:
:exc:`ValidationError` if the instance is invalid
:exc:`SchemaError` if the schema itself is invalid
.. rubric:: Footnotes
.. [#] known by a validator registered with :func:`validates`
"""
if cls is None:
cls = validator_for(schema)
cls.check_schema(schema)
cls(schema, *args, **kwargs).validate(instance) | Validate an instance under the given schema.
>>> validate([2, 3, 4], {"maxItems" : 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
:func:`validate` will first verify that the provided schema is itself
valid, since not doing so can lead to less obvious error messages and fail
in less obvious or consistent ways. If you know you have a valid schema
already or don't care, you might prefer using the
:meth:`~IValidator.validate` method directly on a specific validator
(e.g. :meth:`Draft4Validator.validate`).
:argument instance: the instance to validate
:argument schema: the schema to validate with
:argument cls: an :class:`IValidator` class that will be used to validate
the instance.
If the ``cls`` argument is not provided, two things will happen in
accordance with the specification. First, if the schema has a
:validator:`$schema` property containing a known meta-schema [#]_ then the
proper validator will be used. The specification recommends that all
schemas contain :validator:`$schema` properties for this reason. If no
:validator:`$schema` property is found, the default validator class is
:class:`Draft4Validator`.
Any other provided positional and keyword arguments will be passed on when
instantiating the ``cls``.
:raises:
:exc:`ValidationError` if the instance is invalid
:exc:`SchemaError` if the schema itself is invalid
.. rubric:: Footnotes
.. [#] known by a validator registered with :func:`validates` | Below is the the instruction that describes the task:
### Input:
Validate an instance under the given schema.
>>> validate([2, 3, 4], {"maxItems" : 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
:func:`validate` will first verify that the provided schema is itself
valid, since not doing so can lead to less obvious error messages and fail
in less obvious or consistent ways. If you know you have a valid schema
already or don't care, you might prefer using the
:meth:`~IValidator.validate` method directly on a specific validator
(e.g. :meth:`Draft4Validator.validate`).
:argument instance: the instance to validate
:argument schema: the schema to validate with
:argument cls: an :class:`IValidator` class that will be used to validate
the instance.
If the ``cls`` argument is not provided, two things will happen in
accordance with the specification. First, if the schema has a
:validator:`$schema` property containing a known meta-schema [#]_ then the
proper validator will be used. The specification recommends that all
schemas contain :validator:`$schema` properties for this reason. If no
:validator:`$schema` property is found, the default validator class is
:class:`Draft4Validator`.
Any other provided positional and keyword arguments will be passed on when
instantiating the ``cls``.
:raises:
:exc:`ValidationError` if the instance is invalid
:exc:`SchemaError` if the schema itself is invalid
.. rubric:: Footnotes
.. [#] known by a validator registered with :func:`validates`
### Response:
def validate(instance, schema, cls=None, *args, **kwargs):
"""
Validate an instance under the given schema.
>>> validate([2, 3, 4], {"maxItems" : 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
:func:`validate` will first verify that the provided schema is itself
valid, since not doing so can lead to less obvious error messages and fail
in less obvious or consistent ways. If you know you have a valid schema
already or don't care, you might prefer using the
:meth:`~IValidator.validate` method directly on a specific validator
(e.g. :meth:`Draft4Validator.validate`).
:argument instance: the instance to validate
:argument schema: the schema to validate with
:argument cls: an :class:`IValidator` class that will be used to validate
the instance.
If the ``cls`` argument is not provided, two things will happen in
accordance with the specification. First, if the schema has a
:validator:`$schema` property containing a known meta-schema [#]_ then the
proper validator will be used. The specification recommends that all
schemas contain :validator:`$schema` properties for this reason. If no
:validator:`$schema` property is found, the default validator class is
:class:`Draft4Validator`.
Any other provided positional and keyword arguments will be passed on when
instantiating the ``cls``.
:raises:
:exc:`ValidationError` if the instance is invalid
:exc:`SchemaError` if the schema itself is invalid
.. rubric:: Footnotes
.. [#] known by a validator registered with :func:`validates`
"""
if cls is None:
cls = validator_for(schema)
cls.check_schema(schema)
cls(schema, *args, **kwargs).validate(instance) |
def call(corofunc, *args, **kwargs):
"""
:return:
a delegator function that returns a coroutine object by calling
``corofunc(seed_tuple, *args, **kwargs)``.
"""
corofunc = _ensure_coroutine_function(corofunc)
def f(seed_tuple):
return corofunc(seed_tuple, *args, **kwargs)
return f | :return:
a delegator function that returns a coroutine object by calling
``corofunc(seed_tuple, *args, **kwargs)``. | Below is the the instruction that describes the task:
### Input:
:return:
a delegator function that returns a coroutine object by calling
``corofunc(seed_tuple, *args, **kwargs)``.
### Response:
def call(corofunc, *args, **kwargs):
"""
:return:
a delegator function that returns a coroutine object by calling
``corofunc(seed_tuple, *args, **kwargs)``.
"""
corofunc = _ensure_coroutine_function(corofunc)
def f(seed_tuple):
return corofunc(seed_tuple, *args, **kwargs)
return f |
def get_last_file(input_dir, glob_pattern='*', key=op.getctime, reverse=True):
""" Return the path to the latest file in `input_dir`.
The `key` argument defines which information to use for sorting
the list of files, could be:
- creation date: os.path.getctime,
- modification date: os.path.getmtime,
etc.
Parameters
----------
input_dir: str
Path to the folder where to perform the `glob`.
glob_pattern: str
`glob` Pattern to filter the files in `input_dir`.
key: str
Sorting key function
reverse: bool
Set to True if you want the sorting to be in decreasing order,
False otherwise.
Returns
-------
latest_filepath: str
Path to the latest modified file in `input_dir`.
"""
files = glob(op.join(input_dir, glob_pattern))
files.sort(key=key, reverse=reverse)
return files[0] | Return the path to the latest file in `input_dir`.
The `key` argument defines which information to use for sorting
the list of files, could be:
- creation date: os.path.getctime,
- modification date: os.path.getmtime,
etc.
Parameters
----------
input_dir: str
Path to the folder where to perform the `glob`.
glob_pattern: str
`glob` Pattern to filter the files in `input_dir`.
key: str
Sorting key function
reverse: bool
Set to True if you want the sorting to be in decreasing order,
False otherwise.
Returns
-------
latest_filepath: str
Path to the latest modified file in `input_dir`. | Below is the the instruction that describes the task:
### Input:
Return the path to the latest file in `input_dir`.
The `key` argument defines which information to use for sorting
the list of files, could be:
- creation date: os.path.getctime,
- modification date: os.path.getmtime,
etc.
Parameters
----------
input_dir: str
Path to the folder where to perform the `glob`.
glob_pattern: str
`glob` Pattern to filter the files in `input_dir`.
key: str
Sorting key function
reverse: bool
Set to True if you want the sorting to be in decreasing order,
False otherwise.
Returns
-------
latest_filepath: str
Path to the latest modified file in `input_dir`.
### Response:
def get_last_file(input_dir, glob_pattern='*', key=op.getctime, reverse=True):
""" Return the path to the latest file in `input_dir`.
The `key` argument defines which information to use for sorting
the list of files, could be:
- creation date: os.path.getctime,
- modification date: os.path.getmtime,
etc.
Parameters
----------
input_dir: str
Path to the folder where to perform the `glob`.
glob_pattern: str
`glob` Pattern to filter the files in `input_dir`.
key: str
Sorting key function
reverse: bool
Set to True if you want the sorting to be in decreasing order,
False otherwise.
Returns
-------
latest_filepath: str
Path to the latest modified file in `input_dir`.
"""
files = glob(op.join(input_dir, glob_pattern))
files.sort(key=key, reverse=reverse)
return files[0] |
def _ParseMRUListKey(self, parser_mediator, registry_key, codepage='cp1252'):
"""Extract event objects from a MRUList Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
"""
try:
mrulist = self._ParseMRUListValue(registry_key)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse MRUList value with error: {0!s}'.format(exception))
return
if not mrulist:
return
values_dict = {}
found_terminator = False
for entry_index, entry_letter in enumerate(mrulist):
# The MRU list is terminated with '\0' (0x0000).
if entry_letter == 0:
break
if found_terminator:
parser_mediator.ProduceExtractionWarning((
'found additional MRUList entries after terminator in key: '
'{0:s}.').format(registry_key.path))
# Only create one parser error per terminator.
found_terminator = False
entry_letter = chr(entry_letter)
value_string = self._ParseMRUListEntryValue(
parser_mediator, registry_key, entry_index, entry_letter,
codepage=codepage)
value_text = 'Index: {0:d} [MRU Value {1:s}]'.format(
entry_index + 1, entry_letter)
values_dict[value_text] = value_string
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.source_append = self._SOURCE_APPEND
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extract event objects from a MRUList Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage. | Below is the the instruction that describes the task:
### Input:
Extract event objects from a MRUList Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
### Response:
def _ParseMRUListKey(self, parser_mediator, registry_key, codepage='cp1252'):
"""Extract event objects from a MRUList Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage.
"""
try:
mrulist = self._ParseMRUListValue(registry_key)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse MRUList value with error: {0!s}'.format(exception))
return
if not mrulist:
return
values_dict = {}
found_terminator = False
for entry_index, entry_letter in enumerate(mrulist):
# The MRU list is terminated with '\0' (0x0000).
if entry_letter == 0:
break
if found_terminator:
parser_mediator.ProduceExtractionWarning((
'found additional MRUList entries after terminator in key: '
'{0:s}.').format(registry_key.path))
# Only create one parser error per terminator.
found_terminator = False
entry_letter = chr(entry_letter)
value_string = self._ParseMRUListEntryValue(
parser_mediator, registry_key, entry_index, entry_letter,
codepage=codepage)
value_text = 'Index: {0:d} [MRU Value {1:s}]'.format(
entry_index + 1, entry_letter)
values_dict[value_text] = value_string
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.source_append = self._SOURCE_APPEND
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def event_loop(self):
""" Run the event loop once. """
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever() | Run the event loop once. | Below is the the instruction that describes the task:
### Input:
Run the event loop once.
### Response:
def event_loop(self):
""" Run the event loop once. """
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever() |
def get(cls, resolvable_string, options_builder=None, interpreter=None):
"""Get a :class:`Resolvable` from a string.
:returns: A :class:`Resolvable` or ``None`` if no implementation was appropriate.
"""
options_builder = options_builder or ResolverOptionsBuilder()
for resolvable_impl in cls._REGISTRY:
try:
return resolvable_impl.from_string(resolvable_string,
options_builder,
interpreter=interpreter)
except cls.InvalidRequirement:
continue
raise cls.InvalidRequirement('Unknown requirement type: %s' % resolvable_string) | Get a :class:`Resolvable` from a string.
:returns: A :class:`Resolvable` or ``None`` if no implementation was appropriate. | Below is the the instruction that describes the task:
### Input:
Get a :class:`Resolvable` from a string.
:returns: A :class:`Resolvable` or ``None`` if no implementation was appropriate.
### Response:
def get(cls, resolvable_string, options_builder=None, interpreter=None):
"""Get a :class:`Resolvable` from a string.
:returns: A :class:`Resolvable` or ``None`` if no implementation was appropriate.
"""
options_builder = options_builder or ResolverOptionsBuilder()
for resolvable_impl in cls._REGISTRY:
try:
return resolvable_impl.from_string(resolvable_string,
options_builder,
interpreter=interpreter)
except cls.InvalidRequirement:
continue
raise cls.InvalidRequirement('Unknown requirement type: %s' % resolvable_string) |
def reset_headers(self):
"""
Update the column and row numbering in the headers.
"""
rows = self.rowCount()
cols = self.columnCount()
for r in range(rows):
self.setVerticalHeaderItem(r, QTableWidgetItem(str(r)))
for c in range(cols):
self.setHorizontalHeaderItem(c, QTableWidgetItem(str(c)))
self.setColumnWidth(c, 40) | Update the column and row numbering in the headers. | Below is the the instruction that describes the task:
### Input:
Update the column and row numbering in the headers.
### Response:
def reset_headers(self):
"""
Update the column and row numbering in the headers.
"""
rows = self.rowCount()
cols = self.columnCount()
for r in range(rows):
self.setVerticalHeaderItem(r, QTableWidgetItem(str(r)))
for c in range(cols):
self.setHorizontalHeaderItem(c, QTableWidgetItem(str(c)))
self.setColumnWidth(c, 40) |
def box_draw_character(first: Optional[BoxDrawCharacterSet],
second: BoxDrawCharacterSet,
*,
top: int = 0,
bottom: int = 0,
left: int = 0,
right: int = 0) -> Optional[str]:
"""Finds a box drawing character based on its connectivity.
For example:
box_draw_character(
NORMAL_BOX_CHARS,
BOLD_BOX_CHARS,
top=-1,
right=+1)
evaluates to '┕', which has a normal upward leg and bold rightward leg.
Args:
first: The character set to use for legs set to -1. If set to None,
defaults to the same thing as the second character set.
second: The character set to use for legs set to +1.
top: Whether the upward leg should be present.
bottom: Whether the bottom leg should be present.
left: Whether the left leg should be present.
right: Whether the right leg should be present.
Returns:
A box drawing character approximating the desired properties, or None
if all legs are set to 0.
"""
if first is None:
first = second
sign = +1
combo = None
# Known combinations.
if first is NORMAL_BOX_CHARS and second is BOLD_BOX_CHARS:
combo = NORMAL_THEN_BOLD_MIXED_BOX_CHARS
if first is BOLD_BOX_CHARS and second is NORMAL_BOX_CHARS:
combo = NORMAL_THEN_BOLD_MIXED_BOX_CHARS
sign = -1
if combo is None:
choice = second if +1 in [top, bottom, left, right] else first
return choice.char(top=bool(top),
bottom=bool(bottom),
left=bool(left),
right=bool(right))
return combo.char(top=top * sign,
bottom=bottom * sign,
left=left * sign,
right=right * sign) | Finds a box drawing character based on its connectivity.
For example:
box_draw_character(
NORMAL_BOX_CHARS,
BOLD_BOX_CHARS,
top=-1,
right=+1)
evaluates to '┕', which has a normal upward leg and bold rightward leg.
Args:
first: The character set to use for legs set to -1. If set to None,
defaults to the same thing as the second character set.
second: The character set to use for legs set to +1.
top: Whether the upward leg should be present.
bottom: Whether the bottom leg should be present.
left: Whether the left leg should be present.
right: Whether the right leg should be present.
Returns:
A box drawing character approximating the desired properties, or None
if all legs are set to 0. | Below is the the instruction that describes the task:
### Input:
Finds a box drawing character based on its connectivity.
For example:
box_draw_character(
NORMAL_BOX_CHARS,
BOLD_BOX_CHARS,
top=-1,
right=+1)
evaluates to '┕', which has a normal upward leg and bold rightward leg.
Args:
first: The character set to use for legs set to -1. If set to None,
defaults to the same thing as the second character set.
second: The character set to use for legs set to +1.
top: Whether the upward leg should be present.
bottom: Whether the bottom leg should be present.
left: Whether the left leg should be present.
right: Whether the right leg should be present.
Returns:
A box drawing character approximating the desired properties, or None
if all legs are set to 0.
### Response:
def box_draw_character(first: Optional[BoxDrawCharacterSet],
second: BoxDrawCharacterSet,
*,
top: int = 0,
bottom: int = 0,
left: int = 0,
right: int = 0) -> Optional[str]:
"""Finds a box drawing character based on its connectivity.
For example:
box_draw_character(
NORMAL_BOX_CHARS,
BOLD_BOX_CHARS,
top=-1,
right=+1)
evaluates to '┕', which has a normal upward leg and bold rightward leg.
Args:
first: The character set to use for legs set to -1. If set to None,
defaults to the same thing as the second character set.
second: The character set to use for legs set to +1.
top: Whether the upward leg should be present.
bottom: Whether the bottom leg should be present.
left: Whether the left leg should be present.
right: Whether the right leg should be present.
Returns:
A box drawing character approximating the desired properties, or None
if all legs are set to 0.
"""
if first is None:
first = second
sign = +1
combo = None
# Known combinations.
if first is NORMAL_BOX_CHARS and second is BOLD_BOX_CHARS:
combo = NORMAL_THEN_BOLD_MIXED_BOX_CHARS
if first is BOLD_BOX_CHARS and second is NORMAL_BOX_CHARS:
combo = NORMAL_THEN_BOLD_MIXED_BOX_CHARS
sign = -1
if combo is None:
choice = second if +1 in [top, bottom, left, right] else first
return choice.char(top=bool(top),
bottom=bool(bottom),
left=bool(left),
right=bool(right))
return combo.char(top=top * sign,
bottom=bottom * sign,
left=left * sign,
right=right * sign) |
def parse_template(self):
'''Parse the template string into a dict.
Find the (large) inner sections first, save them, and remove them from
a modified string. Then find the template attributes in the modified
string.
:returns: dictionary of parsed template
'''
self.templ_dict = {'actions': {'definition': {}}}
self.templ_dict['name'] = self._get_template_name()
self._add_cli_scripts()
self._add_sections()
self._add_attrs()
return self.templ_dict | Parse the template string into a dict.
Find the (large) inner sections first, save them, and remove them from
a modified string. Then find the template attributes in the modified
string.
:returns: dictionary of parsed template | Below is the the instruction that describes the task:
### Input:
Parse the template string into a dict.
Find the (large) inner sections first, save them, and remove them from
a modified string. Then find the template attributes in the modified
string.
:returns: dictionary of parsed template
### Response:
def parse_template(self):
'''Parse the template string into a dict.
Find the (large) inner sections first, save them, and remove them from
a modified string. Then find the template attributes in the modified
string.
:returns: dictionary of parsed template
'''
self.templ_dict = {'actions': {'definition': {}}}
self.templ_dict['name'] = self._get_template_name()
self._add_cli_scripts()
self._add_sections()
self._add_attrs()
return self.templ_dict |
def get_ssh_to_node(self, ssh_to=None):
"""
Return target node for SSH/SFTP connections.
The target node is the first node of the class specified in
the configuration file as ``ssh_to`` (but argument ``ssh_to``
can override this choice).
If not ``ssh_to`` has been specified in this cluster's config,
then try node class names ``ssh``, ``login``, ``frontend``,
and ``master``: if any of these is non-empty, return the first
node.
If all else fails, return the first node of the first class
(in alphabetic order).
:return: :py:class:`Node`
:raise: :py:class:`elasticluster.exceptions.NodeNotFound`
if no valid frontend node is found
"""
if ssh_to is None:
ssh_to = self.ssh_to
# first try to interpret `ssh_to` as a node name
if ssh_to:
try:
return self.get_node_by_name(ssh_to)
except NodeNotFound:
pass
# next, ensure `ssh_to` is a class name
if ssh_to:
try:
parts = self._naming_policy.parse(ssh_to)
log.warning(
"Node `%s` not found."
" Trying to find other node in class `%s` ...",
ssh_to, parts['kind'])
ssh_to = parts['kind']
except ValueError:
# it's already a class name
pass
# try getting first node of kind `ssh_to`
if ssh_to:
try:
nodes = self.nodes[ssh_to]
except KeyError:
raise ConfigurationError(
"Invalid configuration item `ssh_to={ssh_to}` in cluster `{name}`:"
" node class `{ssh_to}` does not exist in this cluster."
.format(ssh_to=ssh_to, name=self.name))
try:
return nodes[0]
except IndexError:
log.warning(
"Chosen `ssh_to` class `%s` is empty: unable to "
"get the choosen frontend node from that class.",
ssh_to)
# If we reach this point, `ssh_to` was not set or the
# preferred class was empty. Try "natural" `ssh_to` values.
for kind in ['ssh', 'login', 'frontend', 'master']:
try:
nodes = self.nodes[kind]
return nodes[0]
except (KeyError, IndexError):
pass
# ... if all else fails, return first node
for kind in sorted(self.nodes.keys()):
if self.nodes[kind]:
return self.nodes[kind][0]
# Uh-oh, no nodes in this cluster!
raise NodeNotFound("Unable to find a valid frontend:"
" cluster has no nodes!") | Return target node for SSH/SFTP connections.
The target node is the first node of the class specified in
the configuration file as ``ssh_to`` (but argument ``ssh_to``
can override this choice).
If not ``ssh_to`` has been specified in this cluster's config,
then try node class names ``ssh``, ``login``, ``frontend``,
and ``master``: if any of these is non-empty, return the first
node.
If all else fails, return the first node of the first class
(in alphabetic order).
:return: :py:class:`Node`
:raise: :py:class:`elasticluster.exceptions.NodeNotFound`
if no valid frontend node is found | Below is the the instruction that describes the task:
### Input:
Return target node for SSH/SFTP connections.
The target node is the first node of the class specified in
the configuration file as ``ssh_to`` (but argument ``ssh_to``
can override this choice).
If not ``ssh_to`` has been specified in this cluster's config,
then try node class names ``ssh``, ``login``, ``frontend``,
and ``master``: if any of these is non-empty, return the first
node.
If all else fails, return the first node of the first class
(in alphabetic order).
:return: :py:class:`Node`
:raise: :py:class:`elasticluster.exceptions.NodeNotFound`
if no valid frontend node is found
### Response:
def get_ssh_to_node(self, ssh_to=None):
"""
Return target node for SSH/SFTP connections.
The target node is the first node of the class specified in
the configuration file as ``ssh_to`` (but argument ``ssh_to``
can override this choice).
If not ``ssh_to`` has been specified in this cluster's config,
then try node class names ``ssh``, ``login``, ``frontend``,
and ``master``: if any of these is non-empty, return the first
node.
If all else fails, return the first node of the first class
(in alphabetic order).
:return: :py:class:`Node`
:raise: :py:class:`elasticluster.exceptions.NodeNotFound`
if no valid frontend node is found
"""
if ssh_to is None:
ssh_to = self.ssh_to
# first try to interpret `ssh_to` as a node name
if ssh_to:
try:
return self.get_node_by_name(ssh_to)
except NodeNotFound:
pass
# next, ensure `ssh_to` is a class name
if ssh_to:
try:
parts = self._naming_policy.parse(ssh_to)
log.warning(
"Node `%s` not found."
" Trying to find other node in class `%s` ...",
ssh_to, parts['kind'])
ssh_to = parts['kind']
except ValueError:
# it's already a class name
pass
# try getting first node of kind `ssh_to`
if ssh_to:
try:
nodes = self.nodes[ssh_to]
except KeyError:
raise ConfigurationError(
"Invalid configuration item `ssh_to={ssh_to}` in cluster `{name}`:"
" node class `{ssh_to}` does not exist in this cluster."
.format(ssh_to=ssh_to, name=self.name))
try:
return nodes[0]
except IndexError:
log.warning(
"Chosen `ssh_to` class `%s` is empty: unable to "
"get the choosen frontend node from that class.",
ssh_to)
# If we reach this point, `ssh_to` was not set or the
# preferred class was empty. Try "natural" `ssh_to` values.
for kind in ['ssh', 'login', 'frontend', 'master']:
try:
nodes = self.nodes[kind]
return nodes[0]
except (KeyError, IndexError):
pass
# ... if all else fails, return first node
for kind in sorted(self.nodes.keys()):
if self.nodes[kind]:
return self.nodes[kind][0]
# Uh-oh, no nodes in this cluster!
raise NodeNotFound("Unable to find a valid frontend:"
" cluster has no nodes!") |
def limits(self, x1, x2, y1, y2):
"""Set the coordinate boundaries of plot"""
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.xscale = (self.cx2 - self.cx1) / (self.x2 - self.x1)
self.yscale = (self.cy2 - self.cy1) / (self.y2 - self.y1)
# Determine the limits of the canvas
(cx1, cy1) = self.p2c((0, -math.pi / 2.0))
(cx2, cy2) = self.p2c((2 * math.pi, math.pi / 2.0))
# # set the scroll region to the size of the camvas plus a boundary to allow the canvas edge to be at centre
self.config(scrollregion=(
cx2 - self.width / 2.0, cy2 - self.height / 2.0, cx1 + self.width / 2.0, cy1 + self.height / 2.0)) | Set the coordinate boundaries of plot | Below is the the instruction that describes the task:
### Input:
Set the coordinate boundaries of plot
### Response:
def limits(self, x1, x2, y1, y2):
"""Set the coordinate boundaries of plot"""
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.xscale = (self.cx2 - self.cx1) / (self.x2 - self.x1)
self.yscale = (self.cy2 - self.cy1) / (self.y2 - self.y1)
# Determine the limits of the canvas
(cx1, cy1) = self.p2c((0, -math.pi / 2.0))
(cx2, cy2) = self.p2c((2 * math.pi, math.pi / 2.0))
# # set the scroll region to the size of the camvas plus a boundary to allow the canvas edge to be at centre
self.config(scrollregion=(
cx2 - self.width / 2.0, cy2 - self.height / 2.0, cx1 + self.width / 2.0, cy1 + self.height / 2.0)) |
def create_parser(prog):
"""Create an argument parser, adding in the list of providers."""
parser = argparse.ArgumentParser(prog=prog, formatter_class=DsubHelpFormatter)
parser.add_argument(
'--provider',
default='google-v2',
choices=['local', 'google', 'google-v2', 'test-fails'],
help="""Job service provider. Valid values are "google-v2" (Google's
Pipeline API v2) and "local" (local Docker execution). "test-*"
providers are for testing purposes only.""",
metavar='PROVIDER')
return parser | Create an argument parser, adding in the list of providers. | Below is the the instruction that describes the task:
### Input:
Create an argument parser, adding in the list of providers.
### Response:
def create_parser(prog):
"""Create an argument parser, adding in the list of providers."""
parser = argparse.ArgumentParser(prog=prog, formatter_class=DsubHelpFormatter)
parser.add_argument(
'--provider',
default='google-v2',
choices=['local', 'google', 'google-v2', 'test-fails'],
help="""Job service provider. Valid values are "google-v2" (Google's
Pipeline API v2) and "local" (local Docker execution). "test-*"
providers are for testing purposes only.""",
metavar='PROVIDER')
return parser |
def get_instance(self, payload):
"""
Build an instance of SyncMapInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
"""
return SyncMapInstance(self._version, payload, service_sid=self._solution['service_sid'], ) | Build an instance of SyncMapInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of SyncMapInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of SyncMapInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
"""
return SyncMapInstance(self._version, payload, service_sid=self._solution['service_sid'], ) |
def add_constant(self, stream, value):
"""Store a constant value for use in this sensor graph.
Constant assignments occur after all sensor graph nodes have been
allocated since they must be propogated to all appropriate virtual
stream walkers.
Args:
stream (DataStream): The constant stream to assign the value to
value (int): The value to assign.
"""
if stream in self.constant_database:
raise ArgumentError("Attempted to set the same constant twice", stream=stream, old_value=self.constant_database[stream], new_value=value)
self.constant_database[stream] = value | Store a constant value for use in this sensor graph.
Constant assignments occur after all sensor graph nodes have been
allocated since they must be propogated to all appropriate virtual
stream walkers.
Args:
stream (DataStream): The constant stream to assign the value to
value (int): The value to assign. | Below is the the instruction that describes the task:
### Input:
Store a constant value for use in this sensor graph.
Constant assignments occur after all sensor graph nodes have been
allocated since they must be propogated to all appropriate virtual
stream walkers.
Args:
stream (DataStream): The constant stream to assign the value to
value (int): The value to assign.
### Response:
def add_constant(self, stream, value):
"""Store a constant value for use in this sensor graph.
Constant assignments occur after all sensor graph nodes have been
allocated since they must be propogated to all appropriate virtual
stream walkers.
Args:
stream (DataStream): The constant stream to assign the value to
value (int): The value to assign.
"""
if stream in self.constant_database:
raise ArgumentError("Attempted to set the same constant twice", stream=stream, old_value=self.constant_database[stream], new_value=value)
self.constant_database[stream] = value |
def largestNativeBiClique(self, chain_imbalance=0, max_chain_length=None):
"""Returns a native embedding for the complete bipartite graph :math:`K_{n,m}`
for `n <= m`; where `n` is as large as possible and `m` is as large as
possible subject to `n`. The native embedding of a complete bipartite
graph is a set of horizontally-aligned qubits connected in lines
together with an equal-sized set of vertically-aligned qubits
connected in lines.
INPUTS:
chain_imbalance: how big of a difference to allow between the
chain lengths on the two sides of the bipartition. If ``None``,
then we allow an arbitrary imbalance. (default: ``0``)
max_chain_length: longest chain length to consider or None if chain
lengths are allowed to be unbounded. (default: ``None``)
OUTPUT:
embedding (tuple): a tuple of two lists containing lists of qubits.
If ``embedding = (A_side, B_side)``, the lists found in ``A_side`` and
``B_side`` are chains of qubits. These lists of qubits are arranged so that
>>> [zip(chain,chain[1:]) for chain in A_side]
and
>>> [zip(chain,chain[1:]) for chain in B_side]
are lists of valid couplers.
"""
def f(x):
return x.largestNativeBiClique(chain_imbalance=chain_imbalance,
max_chain_length=max_chain_length)
objective = self._objective_bestscore
emb = self._map_to_processors(f, objective)
return self._translate_partitioned(emb) | Returns a native embedding for the complete bipartite graph :math:`K_{n,m}`
for `n <= m`; where `n` is as large as possible and `m` is as large as
possible subject to `n`. The native embedding of a complete bipartite
graph is a set of horizontally-aligned qubits connected in lines
together with an equal-sized set of vertically-aligned qubits
connected in lines.
INPUTS:
chain_imbalance: how big of a difference to allow between the
chain lengths on the two sides of the bipartition. If ``None``,
then we allow an arbitrary imbalance. (default: ``0``)
max_chain_length: longest chain length to consider or None if chain
lengths are allowed to be unbounded. (default: ``None``)
OUTPUT:
embedding (tuple): a tuple of two lists containing lists of qubits.
If ``embedding = (A_side, B_side)``, the lists found in ``A_side`` and
``B_side`` are chains of qubits. These lists of qubits are arranged so that
>>> [zip(chain,chain[1:]) for chain in A_side]
and
>>> [zip(chain,chain[1:]) for chain in B_side]
are lists of valid couplers. | Below is the the instruction that describes the task:
### Input:
Returns a native embedding for the complete bipartite graph :math:`K_{n,m}`
for `n <= m`; where `n` is as large as possible and `m` is as large as
possible subject to `n`. The native embedding of a complete bipartite
graph is a set of horizontally-aligned qubits connected in lines
together with an equal-sized set of vertically-aligned qubits
connected in lines.
INPUTS:
chain_imbalance: how big of a difference to allow between the
chain lengths on the two sides of the bipartition. If ``None``,
then we allow an arbitrary imbalance. (default: ``0``)
max_chain_length: longest chain length to consider or None if chain
lengths are allowed to be unbounded. (default: ``None``)
OUTPUT:
embedding (tuple): a tuple of two lists containing lists of qubits.
If ``embedding = (A_side, B_side)``, the lists found in ``A_side`` and
``B_side`` are chains of qubits. These lists of qubits are arranged so that
>>> [zip(chain,chain[1:]) for chain in A_side]
and
>>> [zip(chain,chain[1:]) for chain in B_side]
are lists of valid couplers.
### Response:
def largestNativeBiClique(self, chain_imbalance=0, max_chain_length=None):
"""Returns a native embedding for the complete bipartite graph :math:`K_{n,m}`
for `n <= m`; where `n` is as large as possible and `m` is as large as
possible subject to `n`. The native embedding of a complete bipartite
graph is a set of horizontally-aligned qubits connected in lines
together with an equal-sized set of vertically-aligned qubits
connected in lines.
INPUTS:
chain_imbalance: how big of a difference to allow between the
chain lengths on the two sides of the bipartition. If ``None``,
then we allow an arbitrary imbalance. (default: ``0``)
max_chain_length: longest chain length to consider or None if chain
lengths are allowed to be unbounded. (default: ``None``)
OUTPUT:
embedding (tuple): a tuple of two lists containing lists of qubits.
If ``embedding = (A_side, B_side)``, the lists found in ``A_side`` and
``B_side`` are chains of qubits. These lists of qubits are arranged so that
>>> [zip(chain,chain[1:]) for chain in A_side]
and
>>> [zip(chain,chain[1:]) for chain in B_side]
are lists of valid couplers.
"""
def f(x):
return x.largestNativeBiClique(chain_imbalance=chain_imbalance,
max_chain_length=max_chain_length)
objective = self._objective_bestscore
emb = self._map_to_processors(f, objective)
return self._translate_partitioned(emb) |
def eld_another(U,p_min,p_max,d,brk):
"""eld -- economic load dispatching in electricity generation
Parameters:
- U: set of generators (units)
- p_min[u]: minimum operating power for unit u
- p_max[u]: maximum operating power for unit u
- d: demand
- brk[u][k]: (x,y) coordinates of breakpoint k, k=0,...,K for unit u
Returns a model, ready to be solved.
"""
model = Model("Economic load dispatching")
# set objective based on piecewise linear approximation
p,F,z = {},{},{}
for u in U:
abrk = [X for (X,Y) in brk[u]]
bbrk = [Y for (X,Y) in brk[u]]
p[u],F[u],z[u] = convex_comb_sos(model,abrk,bbrk)
p[u].lb = p_min[u]
p[u].ub = p_max[u]
# demand satisfaction
model.addCons(quicksum(p[u] for u in U) == d, "demand")
# objective
model.setObjective(quicksum(F[u] for u in U), "minimize")
model.data = p
return model | eld -- economic load dispatching in electricity generation
Parameters:
- U: set of generators (units)
- p_min[u]: minimum operating power for unit u
- p_max[u]: maximum operating power for unit u
- d: demand
- brk[u][k]: (x,y) coordinates of breakpoint k, k=0,...,K for unit u
Returns a model, ready to be solved. | Below is the the instruction that describes the task:
### Input:
eld -- economic load dispatching in electricity generation
Parameters:
- U: set of generators (units)
- p_min[u]: minimum operating power for unit u
- p_max[u]: maximum operating power for unit u
- d: demand
- brk[u][k]: (x,y) coordinates of breakpoint k, k=0,...,K for unit u
Returns a model, ready to be solved.
### Response:
def eld_another(U,p_min,p_max,d,brk):
"""eld -- economic load dispatching in electricity generation
Parameters:
- U: set of generators (units)
- p_min[u]: minimum operating power for unit u
- p_max[u]: maximum operating power for unit u
- d: demand
- brk[u][k]: (x,y) coordinates of breakpoint k, k=0,...,K for unit u
Returns a model, ready to be solved.
"""
model = Model("Economic load dispatching")
# set objective based on piecewise linear approximation
p,F,z = {},{},{}
for u in U:
abrk = [X for (X,Y) in brk[u]]
bbrk = [Y for (X,Y) in brk[u]]
p[u],F[u],z[u] = convex_comb_sos(model,abrk,bbrk)
p[u].lb = p_min[u]
p[u].ub = p_max[u]
# demand satisfaction
model.addCons(quicksum(p[u] for u in U) == d, "demand")
# objective
model.setObjective(quicksum(F[u] for u in U), "minimize")
model.data = p
return model |
def purge (self,
strategy = "klogn",
keep = None,
deleteNonSnapshots = False,
**kwargs):
"""Purge snapshot directory of snapshots according to some strategy,
preserving however a given "keep" list or set of snapshot numbers.
Available strategies are:
"lastk": Keep last k snapshots (Default: k=10)
"klogn": Keep every snapshot in the last k, 2k snapshots in
the last k**2, 3k snapshots in the last k**3, ...
(Default: k=4. k must be > 1).
Returns `self`."""
assert(isinstance(keep, (list, set)) or keep is None)
keep = set(keep or [])
if self.haveSnapshots:
if strategy == "lastk":
keep.update(self.strategyLastK(self.latestSnapshotNum, **kwargs))
elif strategy == "klogn":
keep.update(self.strategyKLogN(self.latestSnapshotNum, **kwargs))
else:
raise ValueError("Unknown purge strategy "+str(None)+"!")
keep.update(["latest", str(self.latestSnapshotNum)])
keep = set(map(str, keep))
snaps, nonSnaps = self.listSnapshotDir(self.snapDir)
dirEntriesToDelete = set()
dirEntriesToDelete.update(snaps)
dirEntriesToDelete.update(nonSnaps if deleteNonSnapshots else set())
dirEntriesToDelete.difference_update(keep)
for dirEntry in dirEntriesToDelete:
self.rmR(os.path.join(self.snapDir, dirEntry))
return self | Purge snapshot directory of snapshots according to some strategy,
preserving however a given "keep" list or set of snapshot numbers.
Available strategies are:
"lastk": Keep last k snapshots (Default: k=10)
"klogn": Keep every snapshot in the last k, 2k snapshots in
the last k**2, 3k snapshots in the last k**3, ...
(Default: k=4. k must be > 1).
Returns `self`. | Below is the the instruction that describes the task:
### Input:
Purge snapshot directory of snapshots according to some strategy,
preserving however a given "keep" list or set of snapshot numbers.
Available strategies are:
"lastk": Keep last k snapshots (Default: k=10)
"klogn": Keep every snapshot in the last k, 2k snapshots in
the last k**2, 3k snapshots in the last k**3, ...
(Default: k=4. k must be > 1).
Returns `self`.
### Response:
def purge (self,
strategy = "klogn",
keep = None,
deleteNonSnapshots = False,
**kwargs):
"""Purge snapshot directory of snapshots according to some strategy,
preserving however a given "keep" list or set of snapshot numbers.
Available strategies are:
"lastk": Keep last k snapshots (Default: k=10)
"klogn": Keep every snapshot in the last k, 2k snapshots in
the last k**2, 3k snapshots in the last k**3, ...
(Default: k=4. k must be > 1).
Returns `self`."""
assert(isinstance(keep, (list, set)) or keep is None)
keep = set(keep or [])
if self.haveSnapshots:
if strategy == "lastk":
keep.update(self.strategyLastK(self.latestSnapshotNum, **kwargs))
elif strategy == "klogn":
keep.update(self.strategyKLogN(self.latestSnapshotNum, **kwargs))
else:
raise ValueError("Unknown purge strategy "+str(None)+"!")
keep.update(["latest", str(self.latestSnapshotNum)])
keep = set(map(str, keep))
snaps, nonSnaps = self.listSnapshotDir(self.snapDir)
dirEntriesToDelete = set()
dirEntriesToDelete.update(snaps)
dirEntriesToDelete.update(nonSnaps if deleteNonSnapshots else set())
dirEntriesToDelete.difference_update(keep)
for dirEntry in dirEntriesToDelete:
self.rmR(os.path.join(self.snapDir, dirEntry))
return self |
def _get_crop_target(target_px:Union[int,TensorImageSize], mult:int=None)->Tuple[int,int]:
"Calc crop shape of `target_px` to nearest multiple of `mult`."
target_r,target_c = tis2hw(target_px)
return _round_multiple(target_r,mult),_round_multiple(target_c,mult) | Calc crop shape of `target_px` to nearest multiple of `mult`. | Below is the the instruction that describes the task:
### Input:
Calc crop shape of `target_px` to nearest multiple of `mult`.
### Response:
def _get_crop_target(target_px:Union[int,TensorImageSize], mult:int=None)->Tuple[int,int]:
"Calc crop shape of `target_px` to nearest multiple of `mult`."
target_r,target_c = tis2hw(target_px)
return _round_multiple(target_r,mult),_round_multiple(target_c,mult) |
def annotate_svs(adapter, vcf_obj):
"""Annotate all SV variants in a VCF
Args:
adapter(loqusdb.plugin.adapter)
vcf_obj(cyvcf2.VCF)
Yields:
variant(cyvcf2.Variant)
"""
for nr_variants, variant in enumerate(vcf_obj, 1):
variant_info = get_coords(variant)
match = adapter.get_structural_variant(variant_info)
if match:
annotate_variant(variant, match)
yield variant | Annotate all SV variants in a VCF
Args:
adapter(loqusdb.plugin.adapter)
vcf_obj(cyvcf2.VCF)
Yields:
variant(cyvcf2.Variant) | Below is the the instruction that describes the task:
### Input:
Annotate all SV variants in a VCF
Args:
adapter(loqusdb.plugin.adapter)
vcf_obj(cyvcf2.VCF)
Yields:
variant(cyvcf2.Variant)
### Response:
def annotate_svs(adapter, vcf_obj):
"""Annotate all SV variants in a VCF
Args:
adapter(loqusdb.plugin.adapter)
vcf_obj(cyvcf2.VCF)
Yields:
variant(cyvcf2.Variant)
"""
for nr_variants, variant in enumerate(vcf_obj, 1):
variant_info = get_coords(variant)
match = adapter.get_structural_variant(variant_info)
if match:
annotate_variant(variant, match)
yield variant |
def addex(extype, exmsg, condition=None, edata=None):
r"""
Add an exception in the global exception handler.
:param extype: Exception type; *must* be derived from the `Exception
<https://docs.python.org/2/library/exceptions.html#
exceptions.Exception>`_ class
:type extype: Exception type object, i.e. RuntimeError, TypeError,
etc.
:param exmsg: Exception message; it can contain fields to be replaced
when the exception is raised via
:py:meth:`pexdoc.ExHandle.raise_exception_if`.
A field starts with the characters :code:`'\*['` and
ends with the characters :code:`']\*'`, the field name
follows the same rules as variable names and is between
these two sets of characters. For example,
:code:`'\*[fname]\*'` defines the fname field
:type exmsg: string
:param condition: Flag that indicates whether the exception is
raised *(True)* or not *(False)*. If None the
flag is not used an no exception is raised
:type condition: boolean or None
:param edata: Replacement values for fields in the exception message
(see :py:meth:`pexdoc.ExHandle.add_exception` for how
to define fields). Each dictionary entry can only have
these two keys:
* **field** *(string)* -- Field name
* **value** *(any)* -- Field value, to be converted into
a string with the `format
<https://docs.python.org/2/library/stdtypes.html#
str.format>`_ string method
If None no field replacement is done
:rtype: (if condition is not given or None) function
:raises:
* RuntimeError (Argument \`condition\` is not valid)
* RuntimeError (Argument \`edata\` is not valid)
* RuntimeError (Argument \`exmsg\` is not valid)
* RuntimeError (Argument \`extype\` is not valid)
"""
return _ExObj(extype, exmsg, condition, edata).craise | r"""
Add an exception in the global exception handler.
:param extype: Exception type; *must* be derived from the `Exception
<https://docs.python.org/2/library/exceptions.html#
exceptions.Exception>`_ class
:type extype: Exception type object, i.e. RuntimeError, TypeError,
etc.
:param exmsg: Exception message; it can contain fields to be replaced
when the exception is raised via
:py:meth:`pexdoc.ExHandle.raise_exception_if`.
A field starts with the characters :code:`'\*['` and
ends with the characters :code:`']\*'`, the field name
follows the same rules as variable names and is between
these two sets of characters. For example,
:code:`'\*[fname]\*'` defines the fname field
:type exmsg: string
:param condition: Flag that indicates whether the exception is
raised *(True)* or not *(False)*. If None the
flag is not used an no exception is raised
:type condition: boolean or None
:param edata: Replacement values for fields in the exception message
(see :py:meth:`pexdoc.ExHandle.add_exception` for how
to define fields). Each dictionary entry can only have
these two keys:
* **field** *(string)* -- Field name
* **value** *(any)* -- Field value, to be converted into
a string with the `format
<https://docs.python.org/2/library/stdtypes.html#
str.format>`_ string method
If None no field replacement is done
:rtype: (if condition is not given or None) function
:raises:
* RuntimeError (Argument \`condition\` is not valid)
* RuntimeError (Argument \`edata\` is not valid)
* RuntimeError (Argument \`exmsg\` is not valid)
* RuntimeError (Argument \`extype\` is not valid) | Below is the the instruction that describes the task:
### Input:
r"""
Add an exception in the global exception handler.
:param extype: Exception type; *must* be derived from the `Exception
<https://docs.python.org/2/library/exceptions.html#
exceptions.Exception>`_ class
:type extype: Exception type object, i.e. RuntimeError, TypeError,
etc.
:param exmsg: Exception message; it can contain fields to be replaced
when the exception is raised via
:py:meth:`pexdoc.ExHandle.raise_exception_if`.
A field starts with the characters :code:`'\*['` and
ends with the characters :code:`']\*'`, the field name
follows the same rules as variable names and is between
these two sets of characters. For example,
:code:`'\*[fname]\*'` defines the fname field
:type exmsg: string
:param condition: Flag that indicates whether the exception is
raised *(True)* or not *(False)*. If None the
flag is not used an no exception is raised
:type condition: boolean or None
:param edata: Replacement values for fields in the exception message
(see :py:meth:`pexdoc.ExHandle.add_exception` for how
to define fields). Each dictionary entry can only have
these two keys:
* **field** *(string)* -- Field name
* **value** *(any)* -- Field value, to be converted into
a string with the `format
<https://docs.python.org/2/library/stdtypes.html#
str.format>`_ string method
If None no field replacement is done
:rtype: (if condition is not given or None) function
:raises:
* RuntimeError (Argument \`condition\` is not valid)
* RuntimeError (Argument \`edata\` is not valid)
* RuntimeError (Argument \`exmsg\` is not valid)
* RuntimeError (Argument \`extype\` is not valid)
### Response:
def addex(extype, exmsg, condition=None, edata=None):
r"""
Add an exception in the global exception handler.
:param extype: Exception type; *must* be derived from the `Exception
<https://docs.python.org/2/library/exceptions.html#
exceptions.Exception>`_ class
:type extype: Exception type object, i.e. RuntimeError, TypeError,
etc.
:param exmsg: Exception message; it can contain fields to be replaced
when the exception is raised via
:py:meth:`pexdoc.ExHandle.raise_exception_if`.
A field starts with the characters :code:`'\*['` and
ends with the characters :code:`']\*'`, the field name
follows the same rules as variable names and is between
these two sets of characters. For example,
:code:`'\*[fname]\*'` defines the fname field
:type exmsg: string
:param condition: Flag that indicates whether the exception is
raised *(True)* or not *(False)*. If None the
flag is not used an no exception is raised
:type condition: boolean or None
:param edata: Replacement values for fields in the exception message
(see :py:meth:`pexdoc.ExHandle.add_exception` for how
to define fields). Each dictionary entry can only have
these two keys:
* **field** *(string)* -- Field name
* **value** *(any)* -- Field value, to be converted into
a string with the `format
<https://docs.python.org/2/library/stdtypes.html#
str.format>`_ string method
If None no field replacement is done
:rtype: (if condition is not given or None) function
:raises:
* RuntimeError (Argument \`condition\` is not valid)
* RuntimeError (Argument \`edata\` is not valid)
* RuntimeError (Argument \`exmsg\` is not valid)
* RuntimeError (Argument \`extype\` is not valid)
"""
return _ExObj(extype, exmsg, condition, edata).craise |
def check_response(self, response):
"""
Checks that a JSON response from the WebDriver does not have an error.
:Args:
- response - The JSON response from the WebDriver server as a dictionary
object.
:Raises: If the response contains an error message.
"""
status = response.get('status', None)
if status is None or status == ErrorCode.SUCCESS:
return
value = None
message = response.get("message", "")
screen = response.get("screen", "")
stacktrace = None
if isinstance(status, int):
value_json = response.get('value', None)
if value_json and isinstance(value_json, basestring):
import json
try:
value = json.loads(value_json)
if len(value.keys()) == 1:
value = value['value']
status = value.get('error', None)
if status is None:
status = value["status"]
message = value["value"]
if not isinstance(message, basestring):
value = message
message = message.get('message')
else:
message = value.get('message', None)
except ValueError:
pass
if status in ErrorCode.NO_SUCH_ELEMENT:
exception_class = NoSuchElementException
elif status in ErrorCode.NO_SUCH_FRAME:
exception_class = NoSuchFrameException
elif status in ErrorCode.NO_SUCH_WINDOW:
exception_class = NoSuchWindowException
elif status in ErrorCode.STALE_ELEMENT_REFERENCE:
exception_class = StaleElementReferenceException
elif status in ErrorCode.ELEMENT_NOT_VISIBLE:
exception_class = ElementNotVisibleException
elif status in ErrorCode.INVALID_ELEMENT_STATE:
exception_class = InvalidElementStateException
elif status in ErrorCode.INVALID_SELECTOR \
or status in ErrorCode.INVALID_XPATH_SELECTOR \
or status in ErrorCode.INVALID_XPATH_SELECTOR_RETURN_TYPER:
exception_class = InvalidSelectorException
elif status in ErrorCode.ELEMENT_IS_NOT_SELECTABLE:
exception_class = ElementNotSelectableException
elif status in ErrorCode.ELEMENT_NOT_INTERACTABLE:
exception_class = ElementNotInteractableException
elif status in ErrorCode.INVALID_COOKIE_DOMAIN:
exception_class = InvalidCookieDomainException
elif status in ErrorCode.UNABLE_TO_SET_COOKIE:
exception_class = UnableToSetCookieException
elif status in ErrorCode.TIMEOUT:
exception_class = TimeoutException
elif status in ErrorCode.SCRIPT_TIMEOUT:
exception_class = TimeoutException
elif status in ErrorCode.UNKNOWN_ERROR:
exception_class = WebDriverException
elif status in ErrorCode.UNEXPECTED_ALERT_OPEN:
exception_class = UnexpectedAlertPresentException
elif status in ErrorCode.NO_ALERT_OPEN:
exception_class = NoAlertPresentException
elif status in ErrorCode.IME_NOT_AVAILABLE:
exception_class = ImeNotAvailableException
elif status in ErrorCode.IME_ENGINE_ACTIVATION_FAILED:
exception_class = ImeActivationFailedException
elif status in ErrorCode.MOVE_TARGET_OUT_OF_BOUNDS:
exception_class = MoveTargetOutOfBoundsException
elif status in ErrorCode.JAVASCRIPT_ERROR:
exception_class = JavascriptException
elif status in ErrorCode.SESSION_NOT_CREATED:
exception_class = SessionNotCreatedException
elif status in ErrorCode.INVALID_ARGUMENT:
exception_class = InvalidArgumentException
elif status in ErrorCode.NO_SUCH_COOKIE:
exception_class = NoSuchCookieException
elif status in ErrorCode.UNABLE_TO_CAPTURE_SCREEN:
exception_class = ScreenshotException
elif status in ErrorCode.ELEMENT_CLICK_INTERCEPTED:
exception_class = ElementClickInterceptedException
elif status in ErrorCode.INSECURE_CERTIFICATE:
exception_class = InsecureCertificateException
elif status in ErrorCode.INVALID_COORDINATES:
exception_class = InvalidCoordinatesException
elif status in ErrorCode.INVALID_SESSION_ID:
exception_class = InvalidSessionIdException
elif status in ErrorCode.UNKNOWN_METHOD:
exception_class = UnknownMethodException
else:
exception_class = WebDriverException
if value == '' or value is None:
value = response['value']
if isinstance(value, basestring):
raise exception_class(value)
if message == "" and 'message' in value:
message = value['message']
screen = None
if 'screen' in value:
screen = value['screen']
stacktrace = None
if 'stackTrace' in value and value['stackTrace']:
stacktrace = []
try:
for frame in value['stackTrace']:
line = self._value_or_default(frame, 'lineNumber', '')
file = self._value_or_default(frame, 'fileName', '<anonymous>')
if line:
file = "%s:%s" % (file, line)
meth = self._value_or_default(frame, 'methodName', '<anonymous>')
if 'className' in frame:
meth = "%s.%s" % (frame['className'], meth)
msg = " at %s (%s)"
msg = msg % (meth, file)
stacktrace.append(msg)
except TypeError:
pass
if exception_class == UnexpectedAlertPresentException:
alert_text = None
if 'data' in value:
alert_text = value['data'].get('text')
elif 'alert' in value:
alert_text = value['alert'].get('text')
raise exception_class(message, screen, stacktrace, alert_text)
raise exception_class(message, screen, stacktrace) | Checks that a JSON response from the WebDriver does not have an error.
:Args:
- response - The JSON response from the WebDriver server as a dictionary
object.
:Raises: If the response contains an error message. | Below is the the instruction that describes the task:
### Input:
Checks that a JSON response from the WebDriver does not have an error.
:Args:
- response - The JSON response from the WebDriver server as a dictionary
object.
:Raises: If the response contains an error message.
### Response:
def check_response(self, response):
"""
Checks that a JSON response from the WebDriver does not have an error.
:Args:
- response - The JSON response from the WebDriver server as a dictionary
object.
:Raises: If the response contains an error message.
"""
status = response.get('status', None)
if status is None or status == ErrorCode.SUCCESS:
return
value = None
message = response.get("message", "")
screen = response.get("screen", "")
stacktrace = None
if isinstance(status, int):
value_json = response.get('value', None)
if value_json and isinstance(value_json, basestring):
import json
try:
value = json.loads(value_json)
if len(value.keys()) == 1:
value = value['value']
status = value.get('error', None)
if status is None:
status = value["status"]
message = value["value"]
if not isinstance(message, basestring):
value = message
message = message.get('message')
else:
message = value.get('message', None)
except ValueError:
pass
if status in ErrorCode.NO_SUCH_ELEMENT:
exception_class = NoSuchElementException
elif status in ErrorCode.NO_SUCH_FRAME:
exception_class = NoSuchFrameException
elif status in ErrorCode.NO_SUCH_WINDOW:
exception_class = NoSuchWindowException
elif status in ErrorCode.STALE_ELEMENT_REFERENCE:
exception_class = StaleElementReferenceException
elif status in ErrorCode.ELEMENT_NOT_VISIBLE:
exception_class = ElementNotVisibleException
elif status in ErrorCode.INVALID_ELEMENT_STATE:
exception_class = InvalidElementStateException
elif status in ErrorCode.INVALID_SELECTOR \
or status in ErrorCode.INVALID_XPATH_SELECTOR \
or status in ErrorCode.INVALID_XPATH_SELECTOR_RETURN_TYPER:
exception_class = InvalidSelectorException
elif status in ErrorCode.ELEMENT_IS_NOT_SELECTABLE:
exception_class = ElementNotSelectableException
elif status in ErrorCode.ELEMENT_NOT_INTERACTABLE:
exception_class = ElementNotInteractableException
elif status in ErrorCode.INVALID_COOKIE_DOMAIN:
exception_class = InvalidCookieDomainException
elif status in ErrorCode.UNABLE_TO_SET_COOKIE:
exception_class = UnableToSetCookieException
elif status in ErrorCode.TIMEOUT:
exception_class = TimeoutException
elif status in ErrorCode.SCRIPT_TIMEOUT:
exception_class = TimeoutException
elif status in ErrorCode.UNKNOWN_ERROR:
exception_class = WebDriverException
elif status in ErrorCode.UNEXPECTED_ALERT_OPEN:
exception_class = UnexpectedAlertPresentException
elif status in ErrorCode.NO_ALERT_OPEN:
exception_class = NoAlertPresentException
elif status in ErrorCode.IME_NOT_AVAILABLE:
exception_class = ImeNotAvailableException
elif status in ErrorCode.IME_ENGINE_ACTIVATION_FAILED:
exception_class = ImeActivationFailedException
elif status in ErrorCode.MOVE_TARGET_OUT_OF_BOUNDS:
exception_class = MoveTargetOutOfBoundsException
elif status in ErrorCode.JAVASCRIPT_ERROR:
exception_class = JavascriptException
elif status in ErrorCode.SESSION_NOT_CREATED:
exception_class = SessionNotCreatedException
elif status in ErrorCode.INVALID_ARGUMENT:
exception_class = InvalidArgumentException
elif status in ErrorCode.NO_SUCH_COOKIE:
exception_class = NoSuchCookieException
elif status in ErrorCode.UNABLE_TO_CAPTURE_SCREEN:
exception_class = ScreenshotException
elif status in ErrorCode.ELEMENT_CLICK_INTERCEPTED:
exception_class = ElementClickInterceptedException
elif status in ErrorCode.INSECURE_CERTIFICATE:
exception_class = InsecureCertificateException
elif status in ErrorCode.INVALID_COORDINATES:
exception_class = InvalidCoordinatesException
elif status in ErrorCode.INVALID_SESSION_ID:
exception_class = InvalidSessionIdException
elif status in ErrorCode.UNKNOWN_METHOD:
exception_class = UnknownMethodException
else:
exception_class = WebDriverException
if value == '' or value is None:
value = response['value']
if isinstance(value, basestring):
raise exception_class(value)
if message == "" and 'message' in value:
message = value['message']
screen = None
if 'screen' in value:
screen = value['screen']
stacktrace = None
if 'stackTrace' in value and value['stackTrace']:
stacktrace = []
try:
for frame in value['stackTrace']:
line = self._value_or_default(frame, 'lineNumber', '')
file = self._value_or_default(frame, 'fileName', '<anonymous>')
if line:
file = "%s:%s" % (file, line)
meth = self._value_or_default(frame, 'methodName', '<anonymous>')
if 'className' in frame:
meth = "%s.%s" % (frame['className'], meth)
msg = " at %s (%s)"
msg = msg % (meth, file)
stacktrace.append(msg)
except TypeError:
pass
if exception_class == UnexpectedAlertPresentException:
alert_text = None
if 'data' in value:
alert_text = value['data'].get('text')
elif 'alert' in value:
alert_text = value['alert'].get('text')
raise exception_class(message, screen, stacktrace, alert_text)
raise exception_class(message, screen, stacktrace) |
def update(self, data):
"""
C_DigestUpdate
:param data: data to add to the digest
:type data: bytes or string
"""
data1 = ckbytelist(data)
rv = self._lib.C_DigestUpdate(self._session, data1)
if rv != CKR_OK:
raise PyKCS11Error(rv)
return self | C_DigestUpdate
:param data: data to add to the digest
:type data: bytes or string | Below is the the instruction that describes the task:
### Input:
C_DigestUpdate
:param data: data to add to the digest
:type data: bytes or string
### Response:
def update(self, data):
"""
C_DigestUpdate
:param data: data to add to the digest
:type data: bytes or string
"""
data1 = ckbytelist(data)
rv = self._lib.C_DigestUpdate(self._session, data1)
if rv != CKR_OK:
raise PyKCS11Error(rv)
return self |
def set_fig_size(self, width, height=None):
"""Set the figure size in inches.
Sets the figure size with a call to fig.set_size_inches.
Default in code is 8 inches for each.
Args:
width (float): Dimensions for figure width in inches.
height (float, optional): Dimensions for figure height in inches. Default is None.
"""
self.figure.figure_width = width
self.figure.figure_height = height
return | Set the figure size in inches.
Sets the figure size with a call to fig.set_size_inches.
Default in code is 8 inches for each.
Args:
width (float): Dimensions for figure width in inches.
height (float, optional): Dimensions for figure height in inches. Default is None. | Below is the the instruction that describes the task:
### Input:
Set the figure size in inches.
Sets the figure size with a call to fig.set_size_inches.
Default in code is 8 inches for each.
Args:
width (float): Dimensions for figure width in inches.
height (float, optional): Dimensions for figure height in inches. Default is None.
### Response:
def set_fig_size(self, width, height=None):
"""Set the figure size in inches.
Sets the figure size with a call to fig.set_size_inches.
Default in code is 8 inches for each.
Args:
width (float): Dimensions for figure width in inches.
height (float, optional): Dimensions for figure height in inches. Default is None.
"""
self.figure.figure_width = width
self.figure.figure_height = height
return |
def strip_punctuation(text, exclude='', include=''):
"""Strip leading and trailing punctuation from an input string."""
chars_to_strip = ''.join(
set(list(punctuation)).union(set(list(include))) - set(list(exclude))
)
return text.strip(chars_to_strip) | Strip leading and trailing punctuation from an input string. | Below is the the instruction that describes the task:
### Input:
Strip leading and trailing punctuation from an input string.
### Response:
def strip_punctuation(text, exclude='', include=''):
"""Strip leading and trailing punctuation from an input string."""
chars_to_strip = ''.join(
set(list(punctuation)).union(set(list(include))) - set(list(exclude))
)
return text.strip(chars_to_strip) |
def _handle_msg(self, msg):
"""When a BGP message is received, send it to peer.
Open messages are validated here. Peer handler is called to handle each
message except for *Open* and *Notification* message. On receiving
*Notification* message we close connection with peer.
"""
LOG.debug('Received msg from %s << %s', self._remotename, msg)
# If we receive open message we try to bind to protocol
if msg.type == BGP_MSG_OPEN:
if self.state == BGP_FSM_OPEN_SENT:
# Validate open message.
self._validate_open_msg(msg)
self.recv_open_msg = msg
self.state = BGP_FSM_OPEN_CONFIRM
self._peer.state.bgp_state = self.state
# Try to bind this protocol to peer.
self._is_bound = self._peer.bind_protocol(self)
# If this protocol failed to bind to peer.
if not self._is_bound:
# Failure to bind to peer indicates connection collision
# resolution choose different instance of protocol and this
# instance has to close. Before closing it sends
# appropriate notification msg. to peer.
raise bgp.CollisionResolution()
# If peer sends Hold Time as zero, then according to RFC we do
# not set Hold Time and Keep Alive timer.
if msg.hold_time == 0:
LOG.info('The Hold Time sent by the peer is zero, hence '
'not setting any Hold Time and Keep Alive'
' timers.')
else:
# Start Keep Alive timer considering Hold Time preference
# of the peer.
self._start_timers(msg.hold_time)
self._send_keepalive()
# Peer does not see open message.
return
else:
# If we receive a Open message out of order
LOG.error('Open message received when current state is not '
'OpenSent')
# Received out-of-order open message
# We raise Finite state machine error
raise bgp.FiniteStateMachineError()
elif msg.type == BGP_MSG_NOTIFICATION:
if self._peer:
self._signal_bus.bgp_notification_received(self._peer, msg)
# If we receive notification message
LOG.error('Received notification message, hence closing '
'connection %s', msg)
self._socket.close()
return
# If we receive keepalive or update message, we reset expire timer.
if (msg.type == BGP_MSG_KEEPALIVE or
msg.type == BGP_MSG_UPDATE):
if self._expiry:
self._expiry.reset()
# Call peer message handler for appropriate messages.
if (msg.type in
(BGP_MSG_UPDATE, BGP_MSG_KEEPALIVE, BGP_MSG_ROUTE_REFRESH)):
self._peer.handle_msg(msg)
# We give chance to other threads to run.
self.pause(0) | When a BGP message is received, send it to peer.
Open messages are validated here. Peer handler is called to handle each
message except for *Open* and *Notification* message. On receiving
*Notification* message we close connection with peer. | Below is the the instruction that describes the task:
### Input:
When a BGP message is received, send it to peer.
Open messages are validated here. Peer handler is called to handle each
message except for *Open* and *Notification* message. On receiving
*Notification* message we close connection with peer.
### Response:
def _handle_msg(self, msg):
"""When a BGP message is received, send it to peer.
Open messages are validated here. Peer handler is called to handle each
message except for *Open* and *Notification* message. On receiving
*Notification* message we close connection with peer.
"""
LOG.debug('Received msg from %s << %s', self._remotename, msg)
# If we receive open message we try to bind to protocol
if msg.type == BGP_MSG_OPEN:
if self.state == BGP_FSM_OPEN_SENT:
# Validate open message.
self._validate_open_msg(msg)
self.recv_open_msg = msg
self.state = BGP_FSM_OPEN_CONFIRM
self._peer.state.bgp_state = self.state
# Try to bind this protocol to peer.
self._is_bound = self._peer.bind_protocol(self)
# If this protocol failed to bind to peer.
if not self._is_bound:
# Failure to bind to peer indicates connection collision
# resolution choose different instance of protocol and this
# instance has to close. Before closing it sends
# appropriate notification msg. to peer.
raise bgp.CollisionResolution()
# If peer sends Hold Time as zero, then according to RFC we do
# not set Hold Time and Keep Alive timer.
if msg.hold_time == 0:
LOG.info('The Hold Time sent by the peer is zero, hence '
'not setting any Hold Time and Keep Alive'
' timers.')
else:
# Start Keep Alive timer considering Hold Time preference
# of the peer.
self._start_timers(msg.hold_time)
self._send_keepalive()
# Peer does not see open message.
return
else:
# If we receive a Open message out of order
LOG.error('Open message received when current state is not '
'OpenSent')
# Received out-of-order open message
# We raise Finite state machine error
raise bgp.FiniteStateMachineError()
elif msg.type == BGP_MSG_NOTIFICATION:
if self._peer:
self._signal_bus.bgp_notification_received(self._peer, msg)
# If we receive notification message
LOG.error('Received notification message, hence closing '
'connection %s', msg)
self._socket.close()
return
# If we receive keepalive or update message, we reset expire timer.
if (msg.type == BGP_MSG_KEEPALIVE or
msg.type == BGP_MSG_UPDATE):
if self._expiry:
self._expiry.reset()
# Call peer message handler for appropriate messages.
if (msg.type in
(BGP_MSG_UPDATE, BGP_MSG_KEEPALIVE, BGP_MSG_ROUTE_REFRESH)):
self._peer.handle_msg(msg)
# We give chance to other threads to run.
self.pause(0) |
def as_tryte_string(self):
# type: () -> TransactionTrytes
"""
Returns a TryteString representation of the transaction.
"""
return TransactionTrytes(
self.signature_message_fragment
+ self.address.address
+ self.value_as_trytes
+ self.legacy_tag
+ self.timestamp_as_trytes
+ self.current_index_as_trytes
+ self.last_index_as_trytes
+ self.bundle_hash
+ self.trunk_transaction_hash
+ self.branch_transaction_hash
+ self.tag
+ self.attachment_timestamp_as_trytes
+ self.attachment_timestamp_lower_bound_as_trytes
+ self.attachment_timestamp_upper_bound_as_trytes
+ self.nonce
) | Returns a TryteString representation of the transaction. | Below is the the instruction that describes the task:
### Input:
Returns a TryteString representation of the transaction.
### Response:
def as_tryte_string(self):
# type: () -> TransactionTrytes
"""
Returns a TryteString representation of the transaction.
"""
return TransactionTrytes(
self.signature_message_fragment
+ self.address.address
+ self.value_as_trytes
+ self.legacy_tag
+ self.timestamp_as_trytes
+ self.current_index_as_trytes
+ self.last_index_as_trytes
+ self.bundle_hash
+ self.trunk_transaction_hash
+ self.branch_transaction_hash
+ self.tag
+ self.attachment_timestamp_as_trytes
+ self.attachment_timestamp_lower_bound_as_trytes
+ self.attachment_timestamp_upper_bound_as_trytes
+ self.nonce
) |
def getdoc(obj):
"""Stable wrapper around inspect.getdoc.
This can't crash because of attribute problems.
It also attempts to call a getdoc() method on the given object. This
allows objects which provide their docstrings via non-standard mechanisms
(like Pyro proxies) to still be inspected by ipython's ? system."""
# Allow objects to offer customized documentation via a getdoc method:
try:
ds = obj.getdoc()
except Exception:
pass
else:
# if we get extra info, we add it to the normal docstring.
if isinstance(ds, basestring):
return inspect.cleandoc(ds)
try:
return inspect.getdoc(obj)
except Exception:
# Harden against an inspect failure, which can occur with
# SWIG-wrapped extensions.
return None | Stable wrapper around inspect.getdoc.
This can't crash because of attribute problems.
It also attempts to call a getdoc() method on the given object. This
allows objects which provide their docstrings via non-standard mechanisms
(like Pyro proxies) to still be inspected by ipython's ? system. | Below is the the instruction that describes the task:
### Input:
Stable wrapper around inspect.getdoc.
This can't crash because of attribute problems.
It also attempts to call a getdoc() method on the given object. This
allows objects which provide their docstrings via non-standard mechanisms
(like Pyro proxies) to still be inspected by ipython's ? system.
### Response:
def getdoc(obj):
"""Stable wrapper around inspect.getdoc.
This can't crash because of attribute problems.
It also attempts to call a getdoc() method on the given object. This
allows objects which provide their docstrings via non-standard mechanisms
(like Pyro proxies) to still be inspected by ipython's ? system."""
# Allow objects to offer customized documentation via a getdoc method:
try:
ds = obj.getdoc()
except Exception:
pass
else:
# if we get extra info, we add it to the normal docstring.
if isinstance(ds, basestring):
return inspect.cleandoc(ds)
try:
return inspect.getdoc(obj)
except Exception:
# Harden against an inspect failure, which can occur with
# SWIG-wrapped extensions.
return None |
def num_workers(self):
"""Returns the number of worker nodes.
Returns
-------
size :int
The number of worker nodes.
"""
size = ctypes.c_int()
check_call(_LIB.MXKVStoreGetGroupSize(self.handle, ctypes.byref(size)))
return size.value | Returns the number of worker nodes.
Returns
-------
size :int
The number of worker nodes. | Below is the the instruction that describes the task:
### Input:
Returns the number of worker nodes.
Returns
-------
size :int
The number of worker nodes.
### Response:
def num_workers(self):
"""Returns the number of worker nodes.
Returns
-------
size :int
The number of worker nodes.
"""
size = ctypes.c_int()
check_call(_LIB.MXKVStoreGetGroupSize(self.handle, ctypes.byref(size)))
return size.value |
def iter_decode(input, fallback_encoding, errors='replace'):
"""
"Pull"-based decoder.
:param input:
An iterable of byte strings.
The input is first consumed just enough to determine the encoding
based on the precense of a BOM,
then consumed on demand when the return value is.
:param fallback_encoding:
An :class:`Encoding` object or a label string.
The encoding to use if :obj:`input` does note have a BOM.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:returns:
An ``(output, encoding)`` tuple.
:obj:`output` is an iterable of Unicode strings,
:obj:`encoding` is the :obj:`Encoding` that is being used.
"""
decoder = IncrementalDecoder(fallback_encoding, errors)
generator = _iter_decode_generator(input, decoder)
encoding = next(generator)
return generator, encoding | "Pull"-based decoder.
:param input:
An iterable of byte strings.
The input is first consumed just enough to determine the encoding
based on the precense of a BOM,
then consumed on demand when the return value is.
:param fallback_encoding:
An :class:`Encoding` object or a label string.
The encoding to use if :obj:`input` does note have a BOM.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:returns:
An ``(output, encoding)`` tuple.
:obj:`output` is an iterable of Unicode strings,
:obj:`encoding` is the :obj:`Encoding` that is being used. | Below is the the instruction that describes the task:
### Input:
"Pull"-based decoder.
:param input:
An iterable of byte strings.
The input is first consumed just enough to determine the encoding
based on the precense of a BOM,
then consumed on demand when the return value is.
:param fallback_encoding:
An :class:`Encoding` object or a label string.
The encoding to use if :obj:`input` does note have a BOM.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:returns:
An ``(output, encoding)`` tuple.
:obj:`output` is an iterable of Unicode strings,
:obj:`encoding` is the :obj:`Encoding` that is being used.
### Response:
def iter_decode(input, fallback_encoding, errors='replace'):
"""
"Pull"-based decoder.
:param input:
An iterable of byte strings.
The input is first consumed just enough to determine the encoding
based on the precense of a BOM,
then consumed on demand when the return value is.
:param fallback_encoding:
An :class:`Encoding` object or a label string.
The encoding to use if :obj:`input` does note have a BOM.
:param errors: Type of error handling. See :func:`codecs.register`.
:raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
:returns:
An ``(output, encoding)`` tuple.
:obj:`output` is an iterable of Unicode strings,
:obj:`encoding` is the :obj:`Encoding` that is being used.
"""
decoder = IncrementalDecoder(fallback_encoding, errors)
generator = _iter_decode_generator(input, decoder)
encoding = next(generator)
return generator, encoding |
def write(self, filename):
"""
Write the multitrack pianoroll to a MIDI file.
Parameters
----------
filename : str
The name of the MIDI file to which the multitrack pianoroll is
written.
"""
if not filename.endswith(('.mid', '.midi', '.MID', '.MIDI')):
filename = filename + '.mid'
pm = self.to_pretty_midi()
pm.write(filename) | Write the multitrack pianoroll to a MIDI file.
Parameters
----------
filename : str
The name of the MIDI file to which the multitrack pianoroll is
written. | Below is the the instruction that describes the task:
### Input:
Write the multitrack pianoroll to a MIDI file.
Parameters
----------
filename : str
The name of the MIDI file to which the multitrack pianoroll is
written.
### Response:
def write(self, filename):
"""
Write the multitrack pianoroll to a MIDI file.
Parameters
----------
filename : str
The name of the MIDI file to which the multitrack pianoroll is
written.
"""
if not filename.endswith(('.mid', '.midi', '.MID', '.MIDI')):
filename = filename + '.mid'
pm = self.to_pretty_midi()
pm.write(filename) |
def proximal(self):
"""Return the proximal operator.
Raises
------
NotImplementedError
if ``outer_exp`` is not 1 or ``singular_vector_exp`` is not 1, 2 or
infinity
"""
if self.outernorm.exponent != 1:
raise NotImplementedError('`proximal` only implemented for '
'`outer_exp==1`')
if self.pwisenorm.exponent not in [1, 2, np.inf]:
raise NotImplementedError('`proximal` only implemented for '
'`singular_vector_exp` in [1, 2, inf]')
def nddot(a, b):
"""Compute pointwise matrix product in the last indices."""
return np.einsum('...ij,...jk->...ik', a, b)
func = self
# Add epsilon to fix rounding errors, i.e. make sure that when we
# project on the unit ball, we actually end up slightly inside the unit
# ball. Without, we may end up slightly outside.
dtype = getattr(self.domain, 'dtype', float)
eps = np.finfo(dtype).resolution * 10
class NuclearNormProximal(Operator):
"""Proximal operator of `NuclearNorm`."""
def __init__(self, sigma):
self.sigma = float(sigma)
super(NuclearNormProximal, self).__init__(
func.domain, func.domain, linear=False)
def _call(self, x):
"""Return ``self(x)``."""
arr = func._asarray(x)
# Compute SVD
U, s, Vt = np.linalg.svd(arr, full_matrices=False)
# transpose pointwise
V = Vt.swapaxes(-1, -2)
# Take pseudoinverse of s
sinv = s.copy()
sinv[sinv != 0] = 1 / sinv[sinv != 0]
# Take pointwise proximal operator of s w.r.t. the norm
# on the singular vectors
if func.pwisenorm.exponent == 1:
abss = np.abs(s) - (self.sigma - eps)
sprox = np.sign(s) * np.maximum(abss, 0)
elif func.pwisenorm.exponent == 2:
s_reordered = moveaxis(s, -1, 0)
snorm = func.pwisenorm(s_reordered).asarray()
snorm = np.maximum(self.sigma, snorm, out=snorm)
sprox = ((1 - eps) - self.sigma / snorm)[..., None] * s
elif func.pwisenorm.exponent == np.inf:
snorm = np.sum(np.abs(s), axis=-1)
snorm = np.maximum(self.sigma, snorm, out=snorm)
sprox = ((1 - eps) - self.sigma / snorm)[..., None] * s
else:
raise RuntimeError
# Compute s matrix
sproxsinv = (sprox * sinv)[..., :, None]
# Compute the final result
result = nddot(nddot(arr, V), sproxsinv * Vt)
# Cast to vector and return. Note array and vector have
# different shapes.
return func._asvector(result)
def __repr__(self):
"""Return ``repr(self)``."""
return '{!r}.proximal({})'.format(func, self.sigma)
return NuclearNormProximal | Return the proximal operator.
Raises
------
NotImplementedError
if ``outer_exp`` is not 1 or ``singular_vector_exp`` is not 1, 2 or
infinity | Below is the the instruction that describes the task:
### Input:
Return the proximal operator.
Raises
------
NotImplementedError
if ``outer_exp`` is not 1 or ``singular_vector_exp`` is not 1, 2 or
infinity
### Response:
def proximal(self):
"""Return the proximal operator.
Raises
------
NotImplementedError
if ``outer_exp`` is not 1 or ``singular_vector_exp`` is not 1, 2 or
infinity
"""
if self.outernorm.exponent != 1:
raise NotImplementedError('`proximal` only implemented for '
'`outer_exp==1`')
if self.pwisenorm.exponent not in [1, 2, np.inf]:
raise NotImplementedError('`proximal` only implemented for '
'`singular_vector_exp` in [1, 2, inf]')
def nddot(a, b):
"""Compute pointwise matrix product in the last indices."""
return np.einsum('...ij,...jk->...ik', a, b)
func = self
# Add epsilon to fix rounding errors, i.e. make sure that when we
# project on the unit ball, we actually end up slightly inside the unit
# ball. Without, we may end up slightly outside.
dtype = getattr(self.domain, 'dtype', float)
eps = np.finfo(dtype).resolution * 10
class NuclearNormProximal(Operator):
"""Proximal operator of `NuclearNorm`."""
def __init__(self, sigma):
self.sigma = float(sigma)
super(NuclearNormProximal, self).__init__(
func.domain, func.domain, linear=False)
def _call(self, x):
"""Return ``self(x)``."""
arr = func._asarray(x)
# Compute SVD
U, s, Vt = np.linalg.svd(arr, full_matrices=False)
# transpose pointwise
V = Vt.swapaxes(-1, -2)
# Take pseudoinverse of s
sinv = s.copy()
sinv[sinv != 0] = 1 / sinv[sinv != 0]
# Take pointwise proximal operator of s w.r.t. the norm
# on the singular vectors
if func.pwisenorm.exponent == 1:
abss = np.abs(s) - (self.sigma - eps)
sprox = np.sign(s) * np.maximum(abss, 0)
elif func.pwisenorm.exponent == 2:
s_reordered = moveaxis(s, -1, 0)
snorm = func.pwisenorm(s_reordered).asarray()
snorm = np.maximum(self.sigma, snorm, out=snorm)
sprox = ((1 - eps) - self.sigma / snorm)[..., None] * s
elif func.pwisenorm.exponent == np.inf:
snorm = np.sum(np.abs(s), axis=-1)
snorm = np.maximum(self.sigma, snorm, out=snorm)
sprox = ((1 - eps) - self.sigma / snorm)[..., None] * s
else:
raise RuntimeError
# Compute s matrix
sproxsinv = (sprox * sinv)[..., :, None]
# Compute the final result
result = nddot(nddot(arr, V), sproxsinv * Vt)
# Cast to vector and return. Note array and vector have
# different shapes.
return func._asvector(result)
def __repr__(self):
"""Return ``repr(self)``."""
return '{!r}.proximal({})'.format(func, self.sigma)
return NuclearNormProximal |
def _configure_project(config):
"""Setup a Google Cloud Platform Project.
Google Compute Platform organizes all the resources, such as storage
buckets, users, and instances under projects. This is different from
aws ec2 where everything is global.
"""
project_id = config["provider"].get("project_id")
assert config["provider"]["project_id"] is not None, (
"'project_id' must be set in the 'provider' section of the autoscaler"
" config. Notice that the project id must be globally unique.")
project = _get_project(project_id)
if project is None:
# Project not found, try creating it
_create_project(project_id)
project = _get_project(project_id)
assert project is not None, "Failed to create project"
assert project["lifecycleState"] == "ACTIVE", (
"Project status needs to be ACTIVE, got {}".format(
project["lifecycleState"]))
config["provider"]["project_id"] = project["projectId"]
return config | Setup a Google Cloud Platform Project.
Google Compute Platform organizes all the resources, such as storage
buckets, users, and instances under projects. This is different from
aws ec2 where everything is global. | Below is the the instruction that describes the task:
### Input:
Setup a Google Cloud Platform Project.
Google Compute Platform organizes all the resources, such as storage
buckets, users, and instances under projects. This is different from
aws ec2 where everything is global.
### Response:
def _configure_project(config):
"""Setup a Google Cloud Platform Project.
Google Compute Platform organizes all the resources, such as storage
buckets, users, and instances under projects. This is different from
aws ec2 where everything is global.
"""
project_id = config["provider"].get("project_id")
assert config["provider"]["project_id"] is not None, (
"'project_id' must be set in the 'provider' section of the autoscaler"
" config. Notice that the project id must be globally unique.")
project = _get_project(project_id)
if project is None:
# Project not found, try creating it
_create_project(project_id)
project = _get_project(project_id)
assert project is not None, "Failed to create project"
assert project["lifecycleState"] == "ACTIVE", (
"Project status needs to be ACTIVE, got {}".format(
project["lifecycleState"]))
config["provider"]["project_id"] = project["projectId"]
return config |
def find(self, query=None, **kwargs):
"""
You can pass in the appropriate model object from the queries module,
or a dictionary with the keys and values for the query,
or a set of key=value parameters.
"""
url = self.getUrl()
if query is not None:
if isinstance(query, queries.SlickQuery):
url = url + "?" + urlencode(query.to_dict())
elif isinstance(query, dict):
url = url + "?" + urlencode(query)
elif len(kwargs) > 0:
url = url + "?" + urlencode(kwargs)
# hopefully when we discover what problems exist in slick to require this, we can take the loop out
for retry in range(3):
try:
self.logger.debug("Making request to slick at url %s", url)
r = requests.get(url)
self.logger.debug("Request returned status code %d", r.status_code)
if r.status_code is 200:
retval = []
objects = r.json()
for dct in objects:
retval.append(self.model.from_dict(dct))
return retval
else:
self.logger.error("Slick returned an error when trying to access %s: status code %s" % (url, str(r.status_code)))
self.logger.error("Slick response: ", pprint.pformat(r))
except BaseException as error:
self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info())
raise SlickCommunicationError(
"Tried 3 times to request data from slick at url %s without a successful status code.", url) | You can pass in the appropriate model object from the queries module,
or a dictionary with the keys and values for the query,
or a set of key=value parameters. | Below is the the instruction that describes the task:
### Input:
You can pass in the appropriate model object from the queries module,
or a dictionary with the keys and values for the query,
or a set of key=value parameters.
### Response:
def find(self, query=None, **kwargs):
"""
You can pass in the appropriate model object from the queries module,
or a dictionary with the keys and values for the query,
or a set of key=value parameters.
"""
url = self.getUrl()
if query is not None:
if isinstance(query, queries.SlickQuery):
url = url + "?" + urlencode(query.to_dict())
elif isinstance(query, dict):
url = url + "?" + urlencode(query)
elif len(kwargs) > 0:
url = url + "?" + urlencode(kwargs)
# hopefully when we discover what problems exist in slick to require this, we can take the loop out
for retry in range(3):
try:
self.logger.debug("Making request to slick at url %s", url)
r = requests.get(url)
self.logger.debug("Request returned status code %d", r.status_code)
if r.status_code is 200:
retval = []
objects = r.json()
for dct in objects:
retval.append(self.model.from_dict(dct))
return retval
else:
self.logger.error("Slick returned an error when trying to access %s: status code %s" % (url, str(r.status_code)))
self.logger.error("Slick response: ", pprint.pformat(r))
except BaseException as error:
self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info())
raise SlickCommunicationError(
"Tried 3 times to request data from slick at url %s without a successful status code.", url) |
def validate_key(key_path):
""" Validate a key
:param key_path: path to a key to use for authentication
:type key_path: str
:return: key object used for authentication
:rtype: paramiko.RSAKey
"""
key_path = os.path.expanduser(key_path)
if not os.path.isfile(key_path):
return False
return paramiko.RSAKey.from_private_key_file(key_path) | Validate a key
:param key_path: path to a key to use for authentication
:type key_path: str
:return: key object used for authentication
:rtype: paramiko.RSAKey | Below is the the instruction that describes the task:
### Input:
Validate a key
:param key_path: path to a key to use for authentication
:type key_path: str
:return: key object used for authentication
:rtype: paramiko.RSAKey
### Response:
def validate_key(key_path):
""" Validate a key
:param key_path: path to a key to use for authentication
:type key_path: str
:return: key object used for authentication
:rtype: paramiko.RSAKey
"""
key_path = os.path.expanduser(key_path)
if not os.path.isfile(key_path):
return False
return paramiko.RSAKey.from_private_key_file(key_path) |
def service_factory(app, host, port,
report_message='service factory port {port}',
provider_cls=HTTPServiceProvider):
"""Create service, start server.
:param app: application to instantiate a service
:param host: interface to bound provider
:param port: port to bound provider
:param report_message: message format to report port
:param provider_cls: server class provide a service
"""
service = Service(app)
server = provider_cls(service, host, port, report_message)
server.serve_forever() | Create service, start server.
:param app: application to instantiate a service
:param host: interface to bound provider
:param port: port to bound provider
:param report_message: message format to report port
:param provider_cls: server class provide a service | Below is the the instruction that describes the task:
### Input:
Create service, start server.
:param app: application to instantiate a service
:param host: interface to bound provider
:param port: port to bound provider
:param report_message: message format to report port
:param provider_cls: server class provide a service
### Response:
def service_factory(app, host, port,
report_message='service factory port {port}',
provider_cls=HTTPServiceProvider):
"""Create service, start server.
:param app: application to instantiate a service
:param host: interface to bound provider
:param port: port to bound provider
:param report_message: message format to report port
:param provider_cls: server class provide a service
"""
service = Service(app)
server = provider_cls(service, host, port, report_message)
server.serve_forever() |
def template_filter(self,
arg: Optional[Callable] = None,
*,
name: Optional[str] = None,
pass_context: bool = False,
inject: Optional[Union[bool, Iterable[str]]] = None,
safe: bool = False,
) -> Callable:
"""
Decorator to mark a function as a Jinja template filter.
:param name: The name of the filter, if different from the function name.
:param pass_context: Whether or not to pass the template context into the filter.
If ``True``, the first argument must be the context.
:param inject: Whether or not this filter needs any dependencies injected.
:param safe: Whether or not to mark the output of this filter as html-safe.
"""
def wrapper(fn):
fn = _inject(fn, inject)
if safe:
fn = _make_safe(fn)
if pass_context:
fn = jinja2.contextfilter(fn)
self._defer(lambda app: app.add_template_filter(fn, name=name))
return fn
if callable(arg):
return wrapper(arg)
return wrapper | Decorator to mark a function as a Jinja template filter.
:param name: The name of the filter, if different from the function name.
:param pass_context: Whether or not to pass the template context into the filter.
If ``True``, the first argument must be the context.
:param inject: Whether or not this filter needs any dependencies injected.
:param safe: Whether or not to mark the output of this filter as html-safe. | Below is the the instruction that describes the task:
### Input:
Decorator to mark a function as a Jinja template filter.
:param name: The name of the filter, if different from the function name.
:param pass_context: Whether or not to pass the template context into the filter.
If ``True``, the first argument must be the context.
:param inject: Whether or not this filter needs any dependencies injected.
:param safe: Whether or not to mark the output of this filter as html-safe.
### Response:
def template_filter(self,
arg: Optional[Callable] = None,
*,
name: Optional[str] = None,
pass_context: bool = False,
inject: Optional[Union[bool, Iterable[str]]] = None,
safe: bool = False,
) -> Callable:
"""
Decorator to mark a function as a Jinja template filter.
:param name: The name of the filter, if different from the function name.
:param pass_context: Whether or not to pass the template context into the filter.
If ``True``, the first argument must be the context.
:param inject: Whether or not this filter needs any dependencies injected.
:param safe: Whether or not to mark the output of this filter as html-safe.
"""
def wrapper(fn):
fn = _inject(fn, inject)
if safe:
fn = _make_safe(fn)
if pass_context:
fn = jinja2.contextfilter(fn)
self._defer(lambda app: app.add_template_filter(fn, name=name))
return fn
if callable(arg):
return wrapper(arg)
return wrapper |
def _to_pb(self):
"""Construct a KeySet protobuf.
:rtype: :class:`~google.cloud.spanner_v1.proto.keys_pb2.KeySet`
:returns: protobuf corresponding to this instance.
"""
if self.all_:
return KeySetPB(all=True)
kwargs = {}
if self.keys:
kwargs["keys"] = _make_list_value_pbs(self.keys)
if self.ranges:
kwargs["ranges"] = [krange._to_pb() for krange in self.ranges]
return KeySetPB(**kwargs) | Construct a KeySet protobuf.
:rtype: :class:`~google.cloud.spanner_v1.proto.keys_pb2.KeySet`
:returns: protobuf corresponding to this instance. | Below is the the instruction that describes the task:
### Input:
Construct a KeySet protobuf.
:rtype: :class:`~google.cloud.spanner_v1.proto.keys_pb2.KeySet`
:returns: protobuf corresponding to this instance.
### Response:
def _to_pb(self):
"""Construct a KeySet protobuf.
:rtype: :class:`~google.cloud.spanner_v1.proto.keys_pb2.KeySet`
:returns: protobuf corresponding to this instance.
"""
if self.all_:
return KeySetPB(all=True)
kwargs = {}
if self.keys:
kwargs["keys"] = _make_list_value_pbs(self.keys)
if self.ranges:
kwargs["ranges"] = [krange._to_pb() for krange in self.ranges]
return KeySetPB(**kwargs) |
def clonerepo(barerepo, userrepo):
""" Clone a bare base repo to a user """
git.clone(barerepo, userrepo)
ag = activegit.ActiveGit(userrepo) | Clone a bare base repo to a user | Below is the the instruction that describes the task:
### Input:
Clone a bare base repo to a user
### Response:
def clonerepo(barerepo, userrepo):
""" Clone a bare base repo to a user """
git.clone(barerepo, userrepo)
ag = activegit.ActiveGit(userrepo) |
def decrypt(private, ciphertext, output):
"""Decrypt ciphertext with private key.
Requires PRIVATE key file and the CIPHERTEXT encrypted with
the corresponding public key.
"""
privatekeydata = json.load(private)
assert 'pub' in privatekeydata
pub = load_public_key(privatekeydata['pub'])
log("Loading private key")
private_key_error = "Invalid private key"
assert 'key_ops' in privatekeydata, private_key_error
assert "decrypt" in privatekeydata['key_ops'], private_key_error
assert 'p' in privatekeydata, private_key_error
assert 'q' in privatekeydata, private_key_error
assert privatekeydata['kty'] == 'DAJ', private_key_error
_p = phe.util.base64_to_int(privatekeydata['p'])
_q = phe.util.base64_to_int(privatekeydata['q'])
private_key = phe.PaillierPrivateKey(pub, _p, _q)
log("Decrypting ciphertext")
enc = load_encrypted_number(ciphertext, pub)
out = private_key.decrypt(enc)
print(out, file=output) | Decrypt ciphertext with private key.
Requires PRIVATE key file and the CIPHERTEXT encrypted with
the corresponding public key. | Below is the the instruction that describes the task:
### Input:
Decrypt ciphertext with private key.
Requires PRIVATE key file and the CIPHERTEXT encrypted with
the corresponding public key.
### Response:
def decrypt(private, ciphertext, output):
"""Decrypt ciphertext with private key.
Requires PRIVATE key file and the CIPHERTEXT encrypted with
the corresponding public key.
"""
privatekeydata = json.load(private)
assert 'pub' in privatekeydata
pub = load_public_key(privatekeydata['pub'])
log("Loading private key")
private_key_error = "Invalid private key"
assert 'key_ops' in privatekeydata, private_key_error
assert "decrypt" in privatekeydata['key_ops'], private_key_error
assert 'p' in privatekeydata, private_key_error
assert 'q' in privatekeydata, private_key_error
assert privatekeydata['kty'] == 'DAJ', private_key_error
_p = phe.util.base64_to_int(privatekeydata['p'])
_q = phe.util.base64_to_int(privatekeydata['q'])
private_key = phe.PaillierPrivateKey(pub, _p, _q)
log("Decrypting ciphertext")
enc = load_encrypted_number(ciphertext, pub)
out = private_key.decrypt(enc)
print(out, file=output) |
def get_role_config_group(resource_root, service_name, name,
cluster_name="default"):
"""
Find a role config group by name.
@param resource_root: The root Resource object.
@param service_name: Service name.
@param name: Role config group name.
@param cluster_name: Cluster name.
@return: An ApiRoleConfigGroup object.
"""
return _get_role_config_group(resource_root, _get_role_config_group_path(
cluster_name, service_name, name)) | Find a role config group by name.
@param resource_root: The root Resource object.
@param service_name: Service name.
@param name: Role config group name.
@param cluster_name: Cluster name.
@return: An ApiRoleConfigGroup object. | Below is the the instruction that describes the task:
### Input:
Find a role config group by name.
@param resource_root: The root Resource object.
@param service_name: Service name.
@param name: Role config group name.
@param cluster_name: Cluster name.
@return: An ApiRoleConfigGroup object.
### Response:
def get_role_config_group(resource_root, service_name, name,
cluster_name="default"):
"""
Find a role config group by name.
@param resource_root: The root Resource object.
@param service_name: Service name.
@param name: Role config group name.
@param cluster_name: Cluster name.
@return: An ApiRoleConfigGroup object.
"""
return _get_role_config_group(resource_root, _get_role_config_group_path(
cluster_name, service_name, name)) |
def partition_chem_env(self, n_sphere=4,
use_lookup=None):
"""This function partitions the molecule into subsets of the
same chemical environment.
A chemical environment is specified by the number of
surrounding atoms of a certain kind around an atom with a
certain atomic number represented by a tuple of a string
and a frozenset of tuples.
The ``n_sphere`` option determines how many branches the
algorithm follows to determine the chemical environment.
Example:
A carbon atom in ethane has bonds with three hydrogen (atomic
number 1) and one carbon atom (atomic number 6).
If ``n_sphere=1`` these are the only atoms we are
interested in and the chemical environment is::
('C', frozenset([('H', 3), ('C', 1)]))
If ``n_sphere=2`` we follow every atom in the chemical
enviromment of ``n_sphere=1`` to their direct neighbours.
In the case of ethane this gives::
('C', frozenset([('H', 6), ('C', 1)]))
In the special case of ethane this is the whole molecule;
in other cases you can apply this operation recursively and
stop after ``n_sphere`` or after reaching the end of
branches.
Args:
n_sphere (int):
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
dict: The output will look like this::
{ (element_symbol, frozenset([tuples])) : set([indices]) }
A dictionary mapping from a chemical environment to
the set of indices of atoms in this environment.
"""
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
def get_chem_env(self, i, n_sphere):
env_index = self.get_coordination_sphere(
i, n_sphere=n_sphere, only_surface=False,
give_only_index=True, use_lookup=use_lookup)
env_index.remove(i)
atoms = self.loc[env_index, 'atom']
environment = frozenset(collections.Counter(atoms).most_common())
return (self.loc[i, 'atom'], environment)
chemical_environments = collections.defaultdict(set)
for i in self.index:
chemical_environments[get_chem_env(self, i, n_sphere)].add(i)
return dict(chemical_environments) | This function partitions the molecule into subsets of the
same chemical environment.
A chemical environment is specified by the number of
surrounding atoms of a certain kind around an atom with a
certain atomic number represented by a tuple of a string
and a frozenset of tuples.
The ``n_sphere`` option determines how many branches the
algorithm follows to determine the chemical environment.
Example:
A carbon atom in ethane has bonds with three hydrogen (atomic
number 1) and one carbon atom (atomic number 6).
If ``n_sphere=1`` these are the only atoms we are
interested in and the chemical environment is::
('C', frozenset([('H', 3), ('C', 1)]))
If ``n_sphere=2`` we follow every atom in the chemical
enviromment of ``n_sphere=1`` to their direct neighbours.
In the case of ethane this gives::
('C', frozenset([('H', 6), ('C', 1)]))
In the special case of ethane this is the whole molecule;
in other cases you can apply this operation recursively and
stop after ``n_sphere`` or after reaching the end of
branches.
Args:
n_sphere (int):
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
dict: The output will look like this::
{ (element_symbol, frozenset([tuples])) : set([indices]) }
A dictionary mapping from a chemical environment to
the set of indices of atoms in this environment. | Below is the the instruction that describes the task:
### Input:
This function partitions the molecule into subsets of the
same chemical environment.
A chemical environment is specified by the number of
surrounding atoms of a certain kind around an atom with a
certain atomic number represented by a tuple of a string
and a frozenset of tuples.
The ``n_sphere`` option determines how many branches the
algorithm follows to determine the chemical environment.
Example:
A carbon atom in ethane has bonds with three hydrogen (atomic
number 1) and one carbon atom (atomic number 6).
If ``n_sphere=1`` these are the only atoms we are
interested in and the chemical environment is::
('C', frozenset([('H', 3), ('C', 1)]))
If ``n_sphere=2`` we follow every atom in the chemical
enviromment of ``n_sphere=1`` to their direct neighbours.
In the case of ethane this gives::
('C', frozenset([('H', 6), ('C', 1)]))
In the special case of ethane this is the whole molecule;
in other cases you can apply this operation recursively and
stop after ``n_sphere`` or after reaching the end of
branches.
Args:
n_sphere (int):
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
dict: The output will look like this::
{ (element_symbol, frozenset([tuples])) : set([indices]) }
A dictionary mapping from a chemical environment to
the set of indices of atoms in this environment.
### Response:
def partition_chem_env(self, n_sphere=4,
use_lookup=None):
"""This function partitions the molecule into subsets of the
same chemical environment.
A chemical environment is specified by the number of
surrounding atoms of a certain kind around an atom with a
certain atomic number represented by a tuple of a string
and a frozenset of tuples.
The ``n_sphere`` option determines how many branches the
algorithm follows to determine the chemical environment.
Example:
A carbon atom in ethane has bonds with three hydrogen (atomic
number 1) and one carbon atom (atomic number 6).
If ``n_sphere=1`` these are the only atoms we are
interested in and the chemical environment is::
('C', frozenset([('H', 3), ('C', 1)]))
If ``n_sphere=2`` we follow every atom in the chemical
enviromment of ``n_sphere=1`` to their direct neighbours.
In the case of ethane this gives::
('C', frozenset([('H', 6), ('C', 1)]))
In the special case of ethane this is the whole molecule;
in other cases you can apply this operation recursively and
stop after ``n_sphere`` or after reaching the end of
branches.
Args:
n_sphere (int):
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
dict: The output will look like this::
{ (element_symbol, frozenset([tuples])) : set([indices]) }
A dictionary mapping from a chemical environment to
the set of indices of atoms in this environment.
"""
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
def get_chem_env(self, i, n_sphere):
env_index = self.get_coordination_sphere(
i, n_sphere=n_sphere, only_surface=False,
give_only_index=True, use_lookup=use_lookup)
env_index.remove(i)
atoms = self.loc[env_index, 'atom']
environment = frozenset(collections.Counter(atoms).most_common())
return (self.loc[i, 'atom'], environment)
chemical_environments = collections.defaultdict(set)
for i in self.index:
chemical_environments[get_chem_env(self, i, n_sphere)].add(i)
return dict(chemical_environments) |
def click(self, x, y):
'''
Simulate click operation
Args:
- x (int): position of x
- y (int): position of y
Returns:
self
'''
self._run_nowait('target.tap({x: %d, y: %d})' % (x/self._scale, y/self._scale))
return self | Simulate click operation
Args:
- x (int): position of x
- y (int): position of y
Returns:
self | Below is the the instruction that describes the task:
### Input:
Simulate click operation
Args:
- x (int): position of x
- y (int): position of y
Returns:
self
### Response:
def click(self, x, y):
'''
Simulate click operation
Args:
- x (int): position of x
- y (int): position of y
Returns:
self
'''
self._run_nowait('target.tap({x: %d, y: %d})' % (x/self._scale, y/self._scale))
return self |
def dotransition(accountable, transition_id):
"""
Transition the given issue to the provided ID. The API does not return a
JSON response for this call.
"""
t = accountable.issue_do_transition(transition_id)
if t.status_code == 204:
click.secho(
'Successfully transitioned {}'.format(accountable.issue_key),
fg='green'
) | Transition the given issue to the provided ID. The API does not return a
JSON response for this call. | Below is the the instruction that describes the task:
### Input:
Transition the given issue to the provided ID. The API does not return a
JSON response for this call.
### Response:
def dotransition(accountable, transition_id):
"""
Transition the given issue to the provided ID. The API does not return a
JSON response for this call.
"""
t = accountable.issue_do_transition(transition_id)
if t.status_code == 204:
click.secho(
'Successfully transitioned {}'.format(accountable.issue_key),
fg='green'
) |
def save_account(changes: Changeset, table: LdapObjectClass, database: Database) -> Changeset:
""" Modify a changes to add an automatically generated uidNumber. """
d = {}
settings = database.settings
uid_number = changes.get_value_as_single('uidNumber')
if uid_number is None:
scheme = settings['NUMBER_SCHEME']
first = settings.get('UID_FIRST', 10000)
d['uidNumber'] = Counters.get_and_increment(
scheme, "uidNumber", first,
lambda n: not _check_exists(database, table, 'uidNumber', n)
)
changes = changes.merge(d)
return changes | Modify a changes to add an automatically generated uidNumber. | Below is the the instruction that describes the task:
### Input:
Modify a changes to add an automatically generated uidNumber.
### Response:
def save_account(changes: Changeset, table: LdapObjectClass, database: Database) -> Changeset:
""" Modify a changes to add an automatically generated uidNumber. """
d = {}
settings = database.settings
uid_number = changes.get_value_as_single('uidNumber')
if uid_number is None:
scheme = settings['NUMBER_SCHEME']
first = settings.get('UID_FIRST', 10000)
d['uidNumber'] = Counters.get_and_increment(
scheme, "uidNumber", first,
lambda n: not _check_exists(database, table, 'uidNumber', n)
)
changes = changes.merge(d)
return changes |
def ChoiceHumanReadable(choices, choice):
"""
Return the human readable representation for a list of choices.
@see https://docs.djangoproject.com/en/dev/ref/models/fields/#choices
"""
if choice == None: raise NoChoiceError()
for _choice in choices:
if _choice[0] == choice:
return _choice[1]
raise NoChoiceMatchError("The choice '%s' does not exist in '%s'" % (choice, ", ".join([choice[0] for choice in choices]))) | Return the human readable representation for a list of choices.
@see https://docs.djangoproject.com/en/dev/ref/models/fields/#choices | Below is the the instruction that describes the task:
### Input:
Return the human readable representation for a list of choices.
@see https://docs.djangoproject.com/en/dev/ref/models/fields/#choices
### Response:
def ChoiceHumanReadable(choices, choice):
"""
Return the human readable representation for a list of choices.
@see https://docs.djangoproject.com/en/dev/ref/models/fields/#choices
"""
if choice == None: raise NoChoiceError()
for _choice in choices:
if _choice[0] == choice:
return _choice[1]
raise NoChoiceMatchError("The choice '%s' does not exist in '%s'" % (choice, ", ".join([choice[0] for choice in choices]))) |
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2010-04-02 - Started - Bovy (NYU)
2010-04-30 - Adapted for R,z - Bovy (NYU)
"""
if self.isNonAxi:
return 1./2.*nu.log(R**2.*(1.-self._1m1overb2*nu.sin(phi)**2.)
+(z/self._q)**2.+self._core2)
else:
return 1./2.*nu.log(R**2.+(z/self._q)**2.+self._core2) | NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2010-04-02 - Started - Bovy (NYU)
2010-04-30 - Adapted for R,z - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2010-04-02 - Started - Bovy (NYU)
2010-04-30 - Adapted for R,z - Bovy (NYU)
### Response:
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2010-04-02 - Started - Bovy (NYU)
2010-04-30 - Adapted for R,z - Bovy (NYU)
"""
if self.isNonAxi:
return 1./2.*nu.log(R**2.*(1.-self._1m1overb2*nu.sin(phi)**2.)
+(z/self._q)**2.+self._core2)
else:
return 1./2.*nu.log(R**2.+(z/self._q)**2.+self._core2) |
def from_array(array):
"""
Deserialize a new EncryptedCredentials from a given dictionary.
:return: new EncryptedCredentials instance.
:rtype: EncryptedCredentials
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['data'] = u(array.get('data'))
data['hash'] = u(array.get('hash'))
data['secret'] = u(array.get('secret'))
data['_raw'] = array
return EncryptedCredentials(**data) | Deserialize a new EncryptedCredentials from a given dictionary.
:return: new EncryptedCredentials instance.
:rtype: EncryptedCredentials | Below is the the instruction that describes the task:
### Input:
Deserialize a new EncryptedCredentials from a given dictionary.
:return: new EncryptedCredentials instance.
:rtype: EncryptedCredentials
### Response:
def from_array(array):
"""
Deserialize a new EncryptedCredentials from a given dictionary.
:return: new EncryptedCredentials instance.
:rtype: EncryptedCredentials
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['data'] = u(array.get('data'))
data['hash'] = u(array.get('hash'))
data['secret'] = u(array.get('secret'))
data['_raw'] = array
return EncryptedCredentials(**data) |
def get_neighbors(self, connectedness=8):
"""
Return tile neighbors.
Tile neighbors are unique, i.e. in some edge cases, where both the left
and right neighbor wrapped around the antimeridian is the same. Also,
neighbors ouside the northern and southern TilePyramid boundaries are
excluded, because they are invalid.
-------------
| 8 | 1 | 5 |
-------------
| 4 | x | 2 |
-------------
| 7 | 3 | 6 |
-------------
- connectedness: [4 or 8] return four direct neighbors or all eight.
"""
if connectedness not in [4, 8]:
raise ValueError("only connectedness values 8 or 4 are allowed")
unique_neighbors = {}
# 4-connected neighborsfor pyramid
matrix_offsets = [
(-1, 0), # 1: above
(0, 1), # 2: right
(1, 0), # 3: below
(0, -1) # 4: left
]
if connectedness == 8:
matrix_offsets.extend([
(-1, 1), # 5: above right
(1, 1), # 6: below right
(1, -1), # 7: below left
(-1, -1) # 8: above left
])
for row_offset, col_offset in matrix_offsets:
new_row = self.row + row_offset
new_col = self.col + col_offset
# omit if row is outside of tile matrix
if new_row < 0 or new_row >= self.tp.matrix_height(self.zoom):
continue
# wrap around antimeridian if new column is outside of tile matrix
if new_col < 0:
if not self.tp.is_global:
continue
new_col = self.tp.matrix_width(self.zoom) + new_col
elif new_col >= self.tp.matrix_width(self.zoom):
if not self.tp.is_global:
continue
new_col -= self.tp.matrix_width(self.zoom)
# omit if new tile is current tile
if new_row == self.row and new_col == self.col:
continue
# create new tile
unique_neighbors[(new_row, new_col)] = self.tp.tile(
self.zoom, new_row, new_col
)
return unique_neighbors.values() | Return tile neighbors.
Tile neighbors are unique, i.e. in some edge cases, where both the left
and right neighbor wrapped around the antimeridian is the same. Also,
neighbors ouside the northern and southern TilePyramid boundaries are
excluded, because they are invalid.
-------------
| 8 | 1 | 5 |
-------------
| 4 | x | 2 |
-------------
| 7 | 3 | 6 |
-------------
- connectedness: [4 or 8] return four direct neighbors or all eight. | Below is the the instruction that describes the task:
### Input:
Return tile neighbors.
Tile neighbors are unique, i.e. in some edge cases, where both the left
and right neighbor wrapped around the antimeridian is the same. Also,
neighbors ouside the northern and southern TilePyramid boundaries are
excluded, because they are invalid.
-------------
| 8 | 1 | 5 |
-------------
| 4 | x | 2 |
-------------
| 7 | 3 | 6 |
-------------
- connectedness: [4 or 8] return four direct neighbors or all eight.
### Response:
def get_neighbors(self, connectedness=8):
"""
Return tile neighbors.
Tile neighbors are unique, i.e. in some edge cases, where both the left
and right neighbor wrapped around the antimeridian is the same. Also,
neighbors ouside the northern and southern TilePyramid boundaries are
excluded, because they are invalid.
-------------
| 8 | 1 | 5 |
-------------
| 4 | x | 2 |
-------------
| 7 | 3 | 6 |
-------------
- connectedness: [4 or 8] return four direct neighbors or all eight.
"""
if connectedness not in [4, 8]:
raise ValueError("only connectedness values 8 or 4 are allowed")
unique_neighbors = {}
# 4-connected neighborsfor pyramid
matrix_offsets = [
(-1, 0), # 1: above
(0, 1), # 2: right
(1, 0), # 3: below
(0, -1) # 4: left
]
if connectedness == 8:
matrix_offsets.extend([
(-1, 1), # 5: above right
(1, 1), # 6: below right
(1, -1), # 7: below left
(-1, -1) # 8: above left
])
for row_offset, col_offset in matrix_offsets:
new_row = self.row + row_offset
new_col = self.col + col_offset
# omit if row is outside of tile matrix
if new_row < 0 or new_row >= self.tp.matrix_height(self.zoom):
continue
# wrap around antimeridian if new column is outside of tile matrix
if new_col < 0:
if not self.tp.is_global:
continue
new_col = self.tp.matrix_width(self.zoom) + new_col
elif new_col >= self.tp.matrix_width(self.zoom):
if not self.tp.is_global:
continue
new_col -= self.tp.matrix_width(self.zoom)
# omit if new tile is current tile
if new_row == self.row and new_col == self.col:
continue
# create new tile
unique_neighbors[(new_row, new_col)] = self.tp.tile(
self.zoom, new_row, new_col
)
return unique_neighbors.values() |
def write(self, filename = ""):
"""
Writes data from L{PE} object to a file.
@rtype: str
@return: The L{PE} stream data.
@raise IOError: If the file could not be opened for write operations.
"""
file_data = str(self)
if filename:
try:
self.__write(filename, file_data)
except IOError:
raise IOError("File could not be opened for write operations.")
else:
return file_data | Writes data from L{PE} object to a file.
@rtype: str
@return: The L{PE} stream data.
@raise IOError: If the file could not be opened for write operations. | Below is the the instruction that describes the task:
### Input:
Writes data from L{PE} object to a file.
@rtype: str
@return: The L{PE} stream data.
@raise IOError: If the file could not be opened for write operations.
### Response:
def write(self, filename = ""):
"""
Writes data from L{PE} object to a file.
@rtype: str
@return: The L{PE} stream data.
@raise IOError: If the file could not be opened for write operations.
"""
file_data = str(self)
if filename:
try:
self.__write(filename, file_data)
except IOError:
raise IOError("File could not be opened for write operations.")
else:
return file_data |
def start(self, timeout=None):
"""
Startup of the node.
:param join: optionally wait for the process to end (default : True)
:return: None
"""
assert super(PyrosBase, self).start(timeout=timeout)
# Because we currently use this to setup connection
return self.name | Startup of the node.
:param join: optionally wait for the process to end (default : True)
:return: None | Below is the the instruction that describes the task:
### Input:
Startup of the node.
:param join: optionally wait for the process to end (default : True)
:return: None
### Response:
def start(self, timeout=None):
"""
Startup of the node.
:param join: optionally wait for the process to end (default : True)
:return: None
"""
assert super(PyrosBase, self).start(timeout=timeout)
# Because we currently use this to setup connection
return self.name |
def from_amber(cls, path, positions=None, strict=True, **kwargs):
"""
Loads Amber Parm7 parameters and topology file
Parameters
----------
path : str
Path to *.prmtop or *.top file
positions : simtk.unit.Quantity
Atomic positions
Returns
-------
prmtop : SystemHandler
SystemHandler with topology
"""
if strict and positions is None:
raise ValueError('Amber TOP/PRMTOP files require initial positions.')
prmtop = AmberPrmtopFile(path)
box = kwargs.pop('box', prmtop.topology.getPeriodicBoxVectors())
return cls(master=prmtop, topology=prmtop.topology, positions=positions, box=box,
path=path, **kwargs) | Loads Amber Parm7 parameters and topology file
Parameters
----------
path : str
Path to *.prmtop or *.top file
positions : simtk.unit.Quantity
Atomic positions
Returns
-------
prmtop : SystemHandler
SystemHandler with topology | Below is the the instruction that describes the task:
### Input:
Loads Amber Parm7 parameters and topology file
Parameters
----------
path : str
Path to *.prmtop or *.top file
positions : simtk.unit.Quantity
Atomic positions
Returns
-------
prmtop : SystemHandler
SystemHandler with topology
### Response:
def from_amber(cls, path, positions=None, strict=True, **kwargs):
"""
Loads Amber Parm7 parameters and topology file
Parameters
----------
path : str
Path to *.prmtop or *.top file
positions : simtk.unit.Quantity
Atomic positions
Returns
-------
prmtop : SystemHandler
SystemHandler with topology
"""
if strict and positions is None:
raise ValueError('Amber TOP/PRMTOP files require initial positions.')
prmtop = AmberPrmtopFile(path)
box = kwargs.pop('box', prmtop.topology.getPeriodicBoxVectors())
return cls(master=prmtop, topology=prmtop.topology, positions=positions, box=box,
path=path, **kwargs) |
def handler(self, handler_class):
"""Link to an API handler class (e.g. piston or DRF)."""
self.handler_class = handler_class
# we take the docstring from the handler class, not the methods
if self.docs is None and handler_class.__doc__:
self.docs = clean_docstring(handler_class.__doc__)
return handler_class | Link to an API handler class (e.g. piston or DRF). | Below is the the instruction that describes the task:
### Input:
Link to an API handler class (e.g. piston or DRF).
### Response:
def handler(self, handler_class):
"""Link to an API handler class (e.g. piston or DRF)."""
self.handler_class = handler_class
# we take the docstring from the handler class, not the methods
if self.docs is None and handler_class.__doc__:
self.docs = clean_docstring(handler_class.__doc__)
return handler_class |
def _scan_pth_files(dir_paths):
"""Given an iterable of directory paths, yield paths to all .pth files within."""
for dir_path in dir_paths:
if not os.path.exists(dir_path):
continue
pth_filenames = (f for f in os.listdir(dir_path) if f.endswith('.pth'))
for pth_filename in pth_filenames:
yield os.path.join(dir_path, pth_filename) | Given an iterable of directory paths, yield paths to all .pth files within. | Below is the the instruction that describes the task:
### Input:
Given an iterable of directory paths, yield paths to all .pth files within.
### Response:
def _scan_pth_files(dir_paths):
"""Given an iterable of directory paths, yield paths to all .pth files within."""
for dir_path in dir_paths:
if not os.path.exists(dir_path):
continue
pth_filenames = (f for f in os.listdir(dir_path) if f.endswith('.pth'))
for pth_filename in pth_filenames:
yield os.path.join(dir_path, pth_filename) |
def _realize(self, master, element):
"""Builds a widget from xml element using master as parent."""
data = data_xmlnode_to_dict(element, self.translator)
cname = data['class']
uniqueid = data['id']
if cname not in CLASS_MAP:
self._import_class(cname)
if cname in CLASS_MAP:
self._pre_process_data(data)
parent = CLASS_MAP[cname].builder.factory(self, data)
widget = parent.realize(master)
self.objects[uniqueid] = parent
xpath = "./child"
children = element.findall(xpath)
for child in children:
child_xml = child.find('./object')
child = self._realize(parent, child_xml)
parent.add_child(child)
parent.configure()
parent.layout()
return parent
else:
raise Exception('Class "{0}" not mapped'.format(cname)) | Builds a widget from xml element using master as parent. | Below is the the instruction that describes the task:
### Input:
Builds a widget from xml element using master as parent.
### Response:
def _realize(self, master, element):
"""Builds a widget from xml element using master as parent."""
data = data_xmlnode_to_dict(element, self.translator)
cname = data['class']
uniqueid = data['id']
if cname not in CLASS_MAP:
self._import_class(cname)
if cname in CLASS_MAP:
self._pre_process_data(data)
parent = CLASS_MAP[cname].builder.factory(self, data)
widget = parent.realize(master)
self.objects[uniqueid] = parent
xpath = "./child"
children = element.findall(xpath)
for child in children:
child_xml = child.find('./object')
child = self._realize(parent, child_xml)
parent.add_child(child)
parent.configure()
parent.layout()
return parent
else:
raise Exception('Class "{0}" not mapped'.format(cname)) |
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(set(iterable))
combs = chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
res = set(frozenset(x) for x in combs)
# res = map(frozenset, combs)
return res | powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3) | Below is the the instruction that describes the task:
### Input:
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
### Response:
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(set(iterable))
combs = chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
res = set(frozenset(x) for x in combs)
# res = map(frozenset, combs)
return res |
def send_request(self, request):
"""
Create the transaction and fill it with the outgoing request.
:type request: Request
:param request: the request to send
:rtype : Transaction
:return: the created transaction
"""
logger.debug("send_request - " + str(request))
assert isinstance(request, Request)
try:
host, port = request.destination
except AttributeError:
return
request.timestamp = time.time()
transaction = Transaction(request=request, timestamp=request.timestamp)
if transaction.request.type is None:
transaction.request.type = defines.Types["CON"]
if transaction.request.mid is None:
transaction.request.mid = self.fetch_mid()
key_mid = str_append_hash(host, port, request.mid)
self._transactions[key_mid] = transaction
key_token = str_append_hash(host, port, request.token)
self._transactions_token[key_token] = transaction
return self._transactions[key_mid] | Create the transaction and fill it with the outgoing request.
:type request: Request
:param request: the request to send
:rtype : Transaction
:return: the created transaction | Below is the the instruction that describes the task:
### Input:
Create the transaction and fill it with the outgoing request.
:type request: Request
:param request: the request to send
:rtype : Transaction
:return: the created transaction
### Response:
def send_request(self, request):
"""
Create the transaction and fill it with the outgoing request.
:type request: Request
:param request: the request to send
:rtype : Transaction
:return: the created transaction
"""
logger.debug("send_request - " + str(request))
assert isinstance(request, Request)
try:
host, port = request.destination
except AttributeError:
return
request.timestamp = time.time()
transaction = Transaction(request=request, timestamp=request.timestamp)
if transaction.request.type is None:
transaction.request.type = defines.Types["CON"]
if transaction.request.mid is None:
transaction.request.mid = self.fetch_mid()
key_mid = str_append_hash(host, port, request.mid)
self._transactions[key_mid] = transaction
key_token = str_append_hash(host, port, request.token)
self._transactions_token[key_token] = transaction
return self._transactions[key_mid] |
def CopyTree(self, selection, *args, **kwargs):
"""
Copy the tree while supporting a rootpy.tree.cut.Cut selection in
addition to a simple string.
"""
return super(BaseTree, self).CopyTree(str(selection), *args, **kwargs) | Copy the tree while supporting a rootpy.tree.cut.Cut selection in
addition to a simple string. | Below is the the instruction that describes the task:
### Input:
Copy the tree while supporting a rootpy.tree.cut.Cut selection in
addition to a simple string.
### Response:
def CopyTree(self, selection, *args, **kwargs):
"""
Copy the tree while supporting a rootpy.tree.cut.Cut selection in
addition to a simple string.
"""
return super(BaseTree, self).CopyTree(str(selection), *args, **kwargs) |
def notify_observers(self, which=None, min_priority=None):
"""
Notifies all observers. Which is the element, which kicked off this
notification loop. The first argument will be self, the second `which`.
.. note::
notifies only observers with priority p > min_priority!
:param min_priority: only notify observers with priority > min_priority
if min_priority is None, notify all observers in order
"""
if self._update_on:
if which is None:
which = self
if min_priority is None:
[callble(self, which=which) for _, _, callble in self.observers]
else:
for p, _, callble in self.observers:
if p <= min_priority:
break
callble(self, which=which) | Notifies all observers. Which is the element, which kicked off this
notification loop. The first argument will be self, the second `which`.
.. note::
notifies only observers with priority p > min_priority!
:param min_priority: only notify observers with priority > min_priority
if min_priority is None, notify all observers in order | Below is the the instruction that describes the task:
### Input:
Notifies all observers. Which is the element, which kicked off this
notification loop. The first argument will be self, the second `which`.
.. note::
notifies only observers with priority p > min_priority!
:param min_priority: only notify observers with priority > min_priority
if min_priority is None, notify all observers in order
### Response:
def notify_observers(self, which=None, min_priority=None):
"""
Notifies all observers. Which is the element, which kicked off this
notification loop. The first argument will be self, the second `which`.
.. note::
notifies only observers with priority p > min_priority!
:param min_priority: only notify observers with priority > min_priority
if min_priority is None, notify all observers in order
"""
if self._update_on:
if which is None:
which = self
if min_priority is None:
[callble(self, which=which) for _, _, callble in self.observers]
else:
for p, _, callble in self.observers:
if p <= min_priority:
break
callble(self, which=which) |
def _mkprox(funcname):
"""
Make lazy-init proxy function.
"""
def prox(*args, **kwargs):
_init()
return getattr(_module, funcname)(*args, **kwargs)
return prox | Make lazy-init proxy function. | Below is the the instruction that describes the task:
### Input:
Make lazy-init proxy function.
### Response:
def _mkprox(funcname):
"""
Make lazy-init proxy function.
"""
def prox(*args, **kwargs):
_init()
return getattr(_module, funcname)(*args, **kwargs)
return prox |
def flatten_tree_to_ident_hashes(item_or_tree,
lucent_id=TRANSLUCENT_BINDER_ID):
"""Flatten a tree to id and version values (ident_hash)."""
if 'contents' in item_or_tree:
tree = item_or_tree
if tree['id'] != lucent_id:
yield tree['id']
for i in tree['contents']:
# yield from flatten_tree_to_ident_hashs(i, lucent_id)
for x in flatten_tree_to_ident_hashes(i, lucent_id):
yield x
else:
item = item_or_tree
yield item['id'] | Flatten a tree to id and version values (ident_hash). | Below is the the instruction that describes the task:
### Input:
Flatten a tree to id and version values (ident_hash).
### Response:
def flatten_tree_to_ident_hashes(item_or_tree,
lucent_id=TRANSLUCENT_BINDER_ID):
"""Flatten a tree to id and version values (ident_hash)."""
if 'contents' in item_or_tree:
tree = item_or_tree
if tree['id'] != lucent_id:
yield tree['id']
for i in tree['contents']:
# yield from flatten_tree_to_ident_hashs(i, lucent_id)
for x in flatten_tree_to_ident_hashes(i, lucent_id):
yield x
else:
item = item_or_tree
yield item['id'] |
def sample(self, nsims=1000):
""" Samples from the posterior predictive distribution
Parameters
----------
nsims : int (default : 1000)
How many draws from the posterior predictive distribution
Returns
----------
- np.ndarray of draws from the data
"""
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception("No latent variables estimated!")
else:
lv_draws = self.draw_latent_variables(nsims=nsims)
mus = [self._model(lv_draws[:,i])[0] for i in range(nsims)]
model_scale, model_shape, model_skewness = self._get_scale_and_shape_sim(lv_draws)
data_draws = np.array([self.family.draw_variable(self.link(mus[i]),
np.repeat(model_scale[i], mus[i].shape[0]), np.repeat(model_shape[i], mus[i].shape[0]),
np.repeat(model_skewness[i], mus[i].shape[0]), mus[i].shape[0]) for i in range(nsims)])
return data_draws | Samples from the posterior predictive distribution
Parameters
----------
nsims : int (default : 1000)
How many draws from the posterior predictive distribution
Returns
----------
- np.ndarray of draws from the data | Below is the the instruction that describes the task:
### Input:
Samples from the posterior predictive distribution
Parameters
----------
nsims : int (default : 1000)
How many draws from the posterior predictive distribution
Returns
----------
- np.ndarray of draws from the data
### Response:
def sample(self, nsims=1000):
""" Samples from the posterior predictive distribution
Parameters
----------
nsims : int (default : 1000)
How many draws from the posterior predictive distribution
Returns
----------
- np.ndarray of draws from the data
"""
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception("No latent variables estimated!")
else:
lv_draws = self.draw_latent_variables(nsims=nsims)
mus = [self._model(lv_draws[:,i])[0] for i in range(nsims)]
model_scale, model_shape, model_skewness = self._get_scale_and_shape_sim(lv_draws)
data_draws = np.array([self.family.draw_variable(self.link(mus[i]),
np.repeat(model_scale[i], mus[i].shape[0]), np.repeat(model_shape[i], mus[i].shape[0]),
np.repeat(model_skewness[i], mus[i].shape[0]), mus[i].shape[0]) for i in range(nsims)])
return data_draws |
def get_stdin_data(self):
"""
Get words from stdin.
"""
if self.tty.in_is_tty:
# No pipez found
return False
if sys.version_info < (3, 0):
stdin_lines = (l.decode('utf-8') for l in sys.stdin.xreadlines())
else:
stdin_lines = (l for l in sys.stdin.readlines())
rx_word = re.compile("\w+", re.UNICODE)
# If we have stdin data, we should remove everything else!
self.words.clear()
word_list = [match.group(0)
for line in stdin_lines
for match in rx_word.finditer(line.lower())]
if self.ns.filter_stopwords:
word_list = self.filter_words(
word_list, stopwords=wow.STOPWORDS,
min_length=self.ns.min_length)
self.words.extend(word_list)
return True | Get words from stdin. | Below is the the instruction that describes the task:
### Input:
Get words from stdin.
### Response:
def get_stdin_data(self):
"""
Get words from stdin.
"""
if self.tty.in_is_tty:
# No pipez found
return False
if sys.version_info < (3, 0):
stdin_lines = (l.decode('utf-8') for l in sys.stdin.xreadlines())
else:
stdin_lines = (l for l in sys.stdin.readlines())
rx_word = re.compile("\w+", re.UNICODE)
# If we have stdin data, we should remove everything else!
self.words.clear()
word_list = [match.group(0)
for line in stdin_lines
for match in rx_word.finditer(line.lower())]
if self.ns.filter_stopwords:
word_list = self.filter_words(
word_list, stopwords=wow.STOPWORDS,
min_length=self.ns.min_length)
self.words.extend(word_list)
return True |
def create_experiment(
run_config,
hparams,
model_name,
problem_name,
data_dir,
train_steps,
eval_steps,
min_eval_frequency=2000,
eval_throttle_seconds=600,
schedule="train_and_evaluate",
export=False,
decode_hparams=None,
use_tfdbg=False,
use_dbgprofile=False,
eval_early_stopping_steps=None,
eval_early_stopping_metric=None,
eval_early_stopping_metric_delta=None,
eval_early_stopping_metric_minimize=True,
eval_timeout_mins=240,
eval_use_test_set=False,
use_tpu=False,
use_tpu_estimator=False,
use_xla=False,
additional_train_hooks=None,
additional_eval_hooks=None,
warm_start_from=None,
decode_from_file="",
decode_to_file="",
decode_reference="",
std_server_protocol=None):
"""Create Experiment."""
# HParams
hparams.add_hparam("model_dir", run_config.model_dir)
hparams.add_hparam("data_dir", data_dir)
hparams.add_hparam("train_steps", train_steps)
hparams.add_hparam("eval_steps", eval_steps)
hparams.add_hparam("schedule", schedule)
hparams.add_hparam("warm_start_from", warm_start_from)
hparams.add_hparam("std_server_protocol", std_server_protocol)
hparams.add_hparam("eval_freq_in_steps", min_eval_frequency)
hparams.add_hparam("eval_timeout_mins", eval_timeout_mins)
if decode_hparams is not None:
decode_hparams.add_hparam("decode_from_file", decode_from_file)
if decode_to_file and not decode_hparams.decode_to_file:
decode_hparams.decode_to_file = decode_to_file
if decode_reference and not decode_hparams.decode_reference:
decode_hparams.decode_reference = decode_reference
add_problem_hparams(hparams, problem_name)
# Estimator
estimator = create_estimator(
model_name,
hparams,
run_config,
schedule=schedule,
decode_hparams=decode_hparams,
use_tpu=use_tpu,
use_tpu_estimator=use_tpu_estimator,
use_xla=use_xla)
# Input fns from Problem
problem = hparams.problem
train_input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.TRAIN,
hparams)
dataset_split = "test" if eval_use_test_set else None
dataset_kwargs = {"dataset_split": dataset_split}
eval_input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.EVAL,
hparams,
dataset_kwargs=dataset_kwargs)
# Export
exporter = None
if export:
def compare_fn(best_eval_result, current_eval_result):
metric = eval_early_stopping_metric or "loss"
return current_eval_result[metric] < best_eval_result[metric]
def serving_input_receiver_fn(hparams, decode_hparams, use_tpu):
return problem.serving_input_fn(hparams, decode_hparams, use_tpu)
exporter = tf.estimator.BestExporter(
name="best",
serving_input_receiver_fn=serving_input_receiver_fn,
compare_fn=compare_fn,
assets_extra=problem.export_assets)
# Hooks
validation_monitor_kwargs = dict(
input_fn=eval_input_fn,
eval_steps=eval_steps,
every_n_steps=min_eval_frequency,
early_stopping_rounds=eval_early_stopping_steps,
early_stopping_metric=eval_early_stopping_metric,
early_stopping_metric_minimize=eval_early_stopping_metric_minimize)
dbgprofile_kwargs = {"output_dir": run_config.model_dir}
early_stopping_kwargs = dict(
events_dir=os.path.join(run_config.model_dir, "eval_continuous"),
tag=eval_early_stopping_metric,
num_plateau_steps=eval_early_stopping_steps,
plateau_decrease=eval_early_stopping_metric_minimize,
plateau_delta=eval_early_stopping_metric_delta,
every_n_steps=min_eval_frequency)
# Eval on TPU Pods is not supported yet
if use_tpu and run_config.tpu_config.num_shards > 8 and "eval" in schedule:
raise ValueError("Eval is not currently supported on a TPU Pod")
# In-process eval (and possible early stopping)
if schedule == "continuous_train_and_eval" and min_eval_frequency:
tf.logging.warn("ValidationMonitor only works with "
"--schedule=train_and_evaluate")
use_validation_monitor = (
schedule == "train_and_evaluate" and min_eval_frequency)
# Distributed early stopping
local_schedules = ["train_and_evaluate", "continuous_train_and_eval"]
use_early_stopping = (
schedule not in local_schedules and eval_early_stopping_steps)
train_hooks, eval_hooks = create_hooks(
use_tfdbg=use_tfdbg,
use_dbgprofile=use_dbgprofile,
dbgprofile_kwargs=dbgprofile_kwargs,
use_validation_monitor=use_validation_monitor,
validation_monitor_kwargs=validation_monitor_kwargs,
use_early_stopping=use_early_stopping,
early_stopping_kwargs=early_stopping_kwargs)
hook_context = HookContext(
estimator=estimator, problem=problem, hparams=hparams)
train_hooks += t2t_model.T2TModel.get_train_hooks(model_name, hook_context)
eval_hooks += t2t_model.T2TModel.get_eval_hooks(model_name, hook_context)
if additional_train_hooks:
train_hooks += additional_train_hooks
if additional_eval_hooks:
eval_hooks += additional_eval_hooks
train_hooks = tf.contrib.learn.monitors.replace_monitors_with_hooks(
train_hooks, estimator)
eval_hooks = tf.contrib.learn.monitors.replace_monitors_with_hooks(
eval_hooks, estimator)
train_spec = tf.estimator.TrainSpec(
train_input_fn, max_steps=train_steps, hooks=train_hooks)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=eval_steps,
hooks=eval_hooks,
start_delay_secs=0 if hparams.schedule == "evaluate" else 120,
throttle_secs=eval_throttle_seconds,
exporters=exporter)
return T2TExperiment(estimator, hparams, train_spec, eval_spec,
use_validation_monitor, decode_hparams) | Create Experiment. | Below is the the instruction that describes the task:
### Input:
Create Experiment.
### Response:
def create_experiment(
run_config,
hparams,
model_name,
problem_name,
data_dir,
train_steps,
eval_steps,
min_eval_frequency=2000,
eval_throttle_seconds=600,
schedule="train_and_evaluate",
export=False,
decode_hparams=None,
use_tfdbg=False,
use_dbgprofile=False,
eval_early_stopping_steps=None,
eval_early_stopping_metric=None,
eval_early_stopping_metric_delta=None,
eval_early_stopping_metric_minimize=True,
eval_timeout_mins=240,
eval_use_test_set=False,
use_tpu=False,
use_tpu_estimator=False,
use_xla=False,
additional_train_hooks=None,
additional_eval_hooks=None,
warm_start_from=None,
decode_from_file="",
decode_to_file="",
decode_reference="",
std_server_protocol=None):
"""Create Experiment."""
# HParams
hparams.add_hparam("model_dir", run_config.model_dir)
hparams.add_hparam("data_dir", data_dir)
hparams.add_hparam("train_steps", train_steps)
hparams.add_hparam("eval_steps", eval_steps)
hparams.add_hparam("schedule", schedule)
hparams.add_hparam("warm_start_from", warm_start_from)
hparams.add_hparam("std_server_protocol", std_server_protocol)
hparams.add_hparam("eval_freq_in_steps", min_eval_frequency)
hparams.add_hparam("eval_timeout_mins", eval_timeout_mins)
if decode_hparams is not None:
decode_hparams.add_hparam("decode_from_file", decode_from_file)
if decode_to_file and not decode_hparams.decode_to_file:
decode_hparams.decode_to_file = decode_to_file
if decode_reference and not decode_hparams.decode_reference:
decode_hparams.decode_reference = decode_reference
add_problem_hparams(hparams, problem_name)
# Estimator
estimator = create_estimator(
model_name,
hparams,
run_config,
schedule=schedule,
decode_hparams=decode_hparams,
use_tpu=use_tpu,
use_tpu_estimator=use_tpu_estimator,
use_xla=use_xla)
# Input fns from Problem
problem = hparams.problem
train_input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.TRAIN,
hparams)
dataset_split = "test" if eval_use_test_set else None
dataset_kwargs = {"dataset_split": dataset_split}
eval_input_fn = problem.make_estimator_input_fn(tf.estimator.ModeKeys.EVAL,
hparams,
dataset_kwargs=dataset_kwargs)
# Export
exporter = None
if export:
def compare_fn(best_eval_result, current_eval_result):
metric = eval_early_stopping_metric or "loss"
return current_eval_result[metric] < best_eval_result[metric]
def serving_input_receiver_fn(hparams, decode_hparams, use_tpu):
return problem.serving_input_fn(hparams, decode_hparams, use_tpu)
exporter = tf.estimator.BestExporter(
name="best",
serving_input_receiver_fn=serving_input_receiver_fn,
compare_fn=compare_fn,
assets_extra=problem.export_assets)
# Hooks
validation_monitor_kwargs = dict(
input_fn=eval_input_fn,
eval_steps=eval_steps,
every_n_steps=min_eval_frequency,
early_stopping_rounds=eval_early_stopping_steps,
early_stopping_metric=eval_early_stopping_metric,
early_stopping_metric_minimize=eval_early_stopping_metric_minimize)
dbgprofile_kwargs = {"output_dir": run_config.model_dir}
early_stopping_kwargs = dict(
events_dir=os.path.join(run_config.model_dir, "eval_continuous"),
tag=eval_early_stopping_metric,
num_plateau_steps=eval_early_stopping_steps,
plateau_decrease=eval_early_stopping_metric_minimize,
plateau_delta=eval_early_stopping_metric_delta,
every_n_steps=min_eval_frequency)
# Eval on TPU Pods is not supported yet
if use_tpu and run_config.tpu_config.num_shards > 8 and "eval" in schedule:
raise ValueError("Eval is not currently supported on a TPU Pod")
# In-process eval (and possible early stopping)
if schedule == "continuous_train_and_eval" and min_eval_frequency:
tf.logging.warn("ValidationMonitor only works with "
"--schedule=train_and_evaluate")
use_validation_monitor = (
schedule == "train_and_evaluate" and min_eval_frequency)
# Distributed early stopping
local_schedules = ["train_and_evaluate", "continuous_train_and_eval"]
use_early_stopping = (
schedule not in local_schedules and eval_early_stopping_steps)
train_hooks, eval_hooks = create_hooks(
use_tfdbg=use_tfdbg,
use_dbgprofile=use_dbgprofile,
dbgprofile_kwargs=dbgprofile_kwargs,
use_validation_monitor=use_validation_monitor,
validation_monitor_kwargs=validation_monitor_kwargs,
use_early_stopping=use_early_stopping,
early_stopping_kwargs=early_stopping_kwargs)
hook_context = HookContext(
estimator=estimator, problem=problem, hparams=hparams)
train_hooks += t2t_model.T2TModel.get_train_hooks(model_name, hook_context)
eval_hooks += t2t_model.T2TModel.get_eval_hooks(model_name, hook_context)
if additional_train_hooks:
train_hooks += additional_train_hooks
if additional_eval_hooks:
eval_hooks += additional_eval_hooks
train_hooks = tf.contrib.learn.monitors.replace_monitors_with_hooks(
train_hooks, estimator)
eval_hooks = tf.contrib.learn.monitors.replace_monitors_with_hooks(
eval_hooks, estimator)
train_spec = tf.estimator.TrainSpec(
train_input_fn, max_steps=train_steps, hooks=train_hooks)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=eval_steps,
hooks=eval_hooks,
start_delay_secs=0 if hparams.schedule == "evaluate" else 120,
throttle_secs=eval_throttle_seconds,
exporters=exporter)
return T2TExperiment(estimator, hparams, train_spec, eval_spec,
use_validation_monitor, decode_hparams) |
def make_unicode(string):
"""
Python 2 and 3 compatibility function that converts a string to Unicode. In case of Unicode, the string is returned
unchanged
:param string: input string
:return: Unicode string
"""
if sys.version < '3' and isinstance(string, str):
return unicode(string.decode('utf-8'))
return string | Python 2 and 3 compatibility function that converts a string to Unicode. In case of Unicode, the string is returned
unchanged
:param string: input string
:return: Unicode string | Below is the the instruction that describes the task:
### Input:
Python 2 and 3 compatibility function that converts a string to Unicode. In case of Unicode, the string is returned
unchanged
:param string: input string
:return: Unicode string
### Response:
def make_unicode(string):
"""
Python 2 and 3 compatibility function that converts a string to Unicode. In case of Unicode, the string is returned
unchanged
:param string: input string
:return: Unicode string
"""
if sys.version < '3' and isinstance(string, str):
return unicode(string.decode('utf-8'))
return string |
def load(self, key=None):
"""Read a pickled object representation from the open file."""
if key is None:
key = '_pickle'
obj = None
if _compat_hooks:
save = _compat_hooks[0]()
try:
self.__n += 1
s = self.__file.Get(key + ';{0:d}'.format(self.__n))
self.__io.setvalue(s.GetName())
if sys.version_info[0] < 3:
obj = pickle.Unpickler.load(self)
else:
obj = super(Unpickler, self).load()
self.__io.reopen()
finally:
if _compat_hooks:
save = _compat_hooks[1](save)
return obj | Read a pickled object representation from the open file. | Below is the the instruction that describes the task:
### Input:
Read a pickled object representation from the open file.
### Response:
def load(self, key=None):
"""Read a pickled object representation from the open file."""
if key is None:
key = '_pickle'
obj = None
if _compat_hooks:
save = _compat_hooks[0]()
try:
self.__n += 1
s = self.__file.Get(key + ';{0:d}'.format(self.__n))
self.__io.setvalue(s.GetName())
if sys.version_info[0] < 3:
obj = pickle.Unpickler.load(self)
else:
obj = super(Unpickler, self).load()
self.__io.reopen()
finally:
if _compat_hooks:
save = _compat_hooks[1](save)
return obj |
def prep_jid(nocache=False, passed_jid=None, recurse_count=0):
'''
Return a job id and prepare the job id directory.
This is the function responsible for making sure jids don't collide (unless
it is passed a jid).
So do what you have to do to make sure that stays the case
'''
if recurse_count >= 5:
err = 'prep_jid could not store a jid after {0} tries.'.format(recurse_count)
log.error(err)
raise salt.exceptions.SaltCacheError(err)
if passed_jid is None: # this can be a None or an empty string.
jid = salt.utils.jid.gen_jid(__opts__)
else:
jid = passed_jid
jid_dir = salt.utils.jid.jid_dir(jid, _job_dir(), __opts__['hash_type'])
# Make sure we create the jid dir, otherwise someone else is using it,
# meaning we need a new jid.
if not os.path.isdir(jid_dir):
try:
os.makedirs(jid_dir)
except OSError:
time.sleep(0.1)
if passed_jid is None:
return prep_jid(nocache=nocache, recurse_count=recurse_count+1)
try:
with salt.utils.files.fopen(os.path.join(jid_dir, 'jid'), 'wb+') as fn_:
fn_.write(salt.utils.stringutils.to_bytes(jid))
if nocache:
with salt.utils.files.fopen(os.path.join(jid_dir, 'nocache'), 'wb+'):
pass
except IOError:
log.warning(
'Could not write out jid file for job %s. Retrying.', jid)
time.sleep(0.1)
return prep_jid(passed_jid=jid, nocache=nocache,
recurse_count=recurse_count+1)
return jid | Return a job id and prepare the job id directory.
This is the function responsible for making sure jids don't collide (unless
it is passed a jid).
So do what you have to do to make sure that stays the case | Below is the the instruction that describes the task:
### Input:
Return a job id and prepare the job id directory.
This is the function responsible for making sure jids don't collide (unless
it is passed a jid).
So do what you have to do to make sure that stays the case
### Response:
def prep_jid(nocache=False, passed_jid=None, recurse_count=0):
'''
Return a job id and prepare the job id directory.
This is the function responsible for making sure jids don't collide (unless
it is passed a jid).
So do what you have to do to make sure that stays the case
'''
if recurse_count >= 5:
err = 'prep_jid could not store a jid after {0} tries.'.format(recurse_count)
log.error(err)
raise salt.exceptions.SaltCacheError(err)
if passed_jid is None: # this can be a None or an empty string.
jid = salt.utils.jid.gen_jid(__opts__)
else:
jid = passed_jid
jid_dir = salt.utils.jid.jid_dir(jid, _job_dir(), __opts__['hash_type'])
# Make sure we create the jid dir, otherwise someone else is using it,
# meaning we need a new jid.
if not os.path.isdir(jid_dir):
try:
os.makedirs(jid_dir)
except OSError:
time.sleep(0.1)
if passed_jid is None:
return prep_jid(nocache=nocache, recurse_count=recurse_count+1)
try:
with salt.utils.files.fopen(os.path.join(jid_dir, 'jid'), 'wb+') as fn_:
fn_.write(salt.utils.stringutils.to_bytes(jid))
if nocache:
with salt.utils.files.fopen(os.path.join(jid_dir, 'nocache'), 'wb+'):
pass
except IOError:
log.warning(
'Could not write out jid file for job %s. Retrying.', jid)
time.sleep(0.1)
return prep_jid(passed_jid=jid, nocache=nocache,
recurse_count=recurse_count+1)
return jid |
def sanitize (self):
"Make sure the configuration is consistent."
if self['logger'] is None:
self.sanitize_logger()
if self['loginurl']:
self.sanitize_loginurl()
self.sanitize_proxies()
self.sanitize_plugins()
self.sanitize_ssl()
# set default socket timeout
socket.setdefaulttimeout(self['timeout']) | Make sure the configuration is consistent. | Below is the the instruction that describes the task:
### Input:
Make sure the configuration is consistent.
### Response:
def sanitize (self):
"Make sure the configuration is consistent."
if self['logger'] is None:
self.sanitize_logger()
if self['loginurl']:
self.sanitize_loginurl()
self.sanitize_proxies()
self.sanitize_plugins()
self.sanitize_ssl()
# set default socket timeout
socket.setdefaulttimeout(self['timeout']) |
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""Perform forward migration."""
Process = from_state.apps.get_model('flow', 'Process') # pylint: disable=invalid-name
Data = from_state.apps.get_model('flow', 'Data') # pylint: disable=invalid-name
try:
# pylint: disable=invalid-name
ProcessMigrationHistory = from_state.apps.get_model('flow', 'ProcessMigrationHistory')
DataMigrationHistory = from_state.apps.get_model('flow', 'DataMigrationHistory')
except LookupError:
raise LookupError(
"Unable to retrieve migration history models. Perhaps you need "
"to add a migration dependency to a recent enough Resolwe flow "
"app in your migration?"
)
# Migrate processes.
processes = Process.objects.filter(slug=self.process)
if not processes.exists():
return
migrated_processes = set()
schema_field = '{}_schema'.format(self.schema_type)
for process in processes:
current_schema = getattr(process, schema_field)
if not self.migrate_process_schema(process, current_schema, from_state):
continue
setattr(process, schema_field, current_schema)
process.save()
migrated_processes.add(process)
# Update process migration log.
ProcessMigrationHistory.objects.create(
migration=self.migration_id,
process=process,
metadata=self.describe_process_migration(process),
)
if not migrated_processes:
return
# Migrate all data objects.
data = Data.objects.filter(process__in=migrated_processes)
self.migrate_data(data, from_state)
# Update data migration log.
for datum in data:
DataMigrationHistory.objects.create(
migration=self.migration_id,
data=datum,
metadata=self.describe_data_migration(datum)
) | Perform forward migration. | Below is the the instruction that describes the task:
### Input:
Perform forward migration.
### Response:
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""Perform forward migration."""
Process = from_state.apps.get_model('flow', 'Process') # pylint: disable=invalid-name
Data = from_state.apps.get_model('flow', 'Data') # pylint: disable=invalid-name
try:
# pylint: disable=invalid-name
ProcessMigrationHistory = from_state.apps.get_model('flow', 'ProcessMigrationHistory')
DataMigrationHistory = from_state.apps.get_model('flow', 'DataMigrationHistory')
except LookupError:
raise LookupError(
"Unable to retrieve migration history models. Perhaps you need "
"to add a migration dependency to a recent enough Resolwe flow "
"app in your migration?"
)
# Migrate processes.
processes = Process.objects.filter(slug=self.process)
if not processes.exists():
return
migrated_processes = set()
schema_field = '{}_schema'.format(self.schema_type)
for process in processes:
current_schema = getattr(process, schema_field)
if not self.migrate_process_schema(process, current_schema, from_state):
continue
setattr(process, schema_field, current_schema)
process.save()
migrated_processes.add(process)
# Update process migration log.
ProcessMigrationHistory.objects.create(
migration=self.migration_id,
process=process,
metadata=self.describe_process_migration(process),
)
if not migrated_processes:
return
# Migrate all data objects.
data = Data.objects.filter(process__in=migrated_processes)
self.migrate_data(data, from_state)
# Update data migration log.
for datum in data:
DataMigrationHistory.objects.create(
migration=self.migration_id,
data=datum,
metadata=self.describe_data_migration(datum)
) |
def collect_pre_split_asof_date_adjustments(
self,
split_adjusted_asof_date_idx,
sid_idx,
pre_adjustments,
requested_split_adjusted_columns
):
"""
Collect split adjustments that occur before the
split-adjusted-asof-date. All those adjustments must first be
UN-applied at the first date index and then re-applied on the
appropriate dates in order to match point in time share pricing data.
Parameters
----------
split_adjusted_asof_date_idx : int
The index in the calendar dates as-of which all data was
split-adjusted.
sid_idx : int
The index of the sid for which adjustments should be collected in
the adjusted array.
pre_adjustments : tuple(list(float), list(int))
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred on or before the
split-asof-date.
"""
col_to_split_adjustments = {}
if len(pre_adjustments[0]):
adjustment_values, date_indexes = pre_adjustments
for column_name in requested_split_adjusted_columns:
col_to_split_adjustments[column_name] = {}
# We need to undo all adjustments that happen before the
# split_asof_date here by reversing the split ratio.
col_to_split_adjustments[column_name][0] = [Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
1 / future_adjustment
) for future_adjustment in adjustment_values]
for adjustment, date_index in zip(adjustment_values,
date_indexes):
adj = Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
adjustment
)
add_new_adjustments(col_to_split_adjustments,
[adj],
column_name,
date_index)
return col_to_split_adjustments | Collect split adjustments that occur before the
split-adjusted-asof-date. All those adjustments must first be
UN-applied at the first date index and then re-applied on the
appropriate dates in order to match point in time share pricing data.
Parameters
----------
split_adjusted_asof_date_idx : int
The index in the calendar dates as-of which all data was
split-adjusted.
sid_idx : int
The index of the sid for which adjustments should be collected in
the adjusted array.
pre_adjustments : tuple(list(float), list(int))
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred on or before the
split-asof-date. | Below is the the instruction that describes the task:
### Input:
Collect split adjustments that occur before the
split-adjusted-asof-date. All those adjustments must first be
UN-applied at the first date index and then re-applied on the
appropriate dates in order to match point in time share pricing data.
Parameters
----------
split_adjusted_asof_date_idx : int
The index in the calendar dates as-of which all data was
split-adjusted.
sid_idx : int
The index of the sid for which adjustments should be collected in
the adjusted array.
pre_adjustments : tuple(list(float), list(int))
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred on or before the
split-asof-date.
### Response:
def collect_pre_split_asof_date_adjustments(
self,
split_adjusted_asof_date_idx,
sid_idx,
pre_adjustments,
requested_split_adjusted_columns
):
"""
Collect split adjustments that occur before the
split-adjusted-asof-date. All those adjustments must first be
UN-applied at the first date index and then re-applied on the
appropriate dates in order to match point in time share pricing data.
Parameters
----------
split_adjusted_asof_date_idx : int
The index in the calendar dates as-of which all data was
split-adjusted.
sid_idx : int
The index of the sid for which adjustments should be collected in
the adjusted array.
pre_adjustments : tuple(list(float), list(int))
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred on or before the
split-asof-date.
"""
col_to_split_adjustments = {}
if len(pre_adjustments[0]):
adjustment_values, date_indexes = pre_adjustments
for column_name in requested_split_adjusted_columns:
col_to_split_adjustments[column_name] = {}
# We need to undo all adjustments that happen before the
# split_asof_date here by reversing the split ratio.
col_to_split_adjustments[column_name][0] = [Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
1 / future_adjustment
) for future_adjustment in adjustment_values]
for adjustment, date_index in zip(adjustment_values,
date_indexes):
adj = Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
adjustment
)
add_new_adjustments(col_to_split_adjustments,
[adj],
column_name,
date_index)
return col_to_split_adjustments |
def step(self, action):
"""Pass action to underlying environment(s) or perform special action."""
# Special codes
if action in self._player_actions():
envs_step_tuples = self._player_actions()[action]()
elif self._wait and action == self.name_to_action_num["NOOP"]:
# Ignore no-op, do not pass to environment.
envs_step_tuples = self._last_step_tuples
else:
# Run action on environment(s).
if action == self.WAIT_MODE_NOOP_ACTION:
action = self.name_to_action_num["NOOP"]
# Perform action on underlying environment(s).
envs_step_tuples = self._step_envs(action)
self._update_statistics(envs_step_tuples)
self._last_step_tuples = envs_step_tuples
ob, reward, done, info = self._player_step_tuple(envs_step_tuples)
return ob, reward, done, info | Pass action to underlying environment(s) or perform special action. | Below is the the instruction that describes the task:
### Input:
Pass action to underlying environment(s) or perform special action.
### Response:
def step(self, action):
"""Pass action to underlying environment(s) or perform special action."""
# Special codes
if action in self._player_actions():
envs_step_tuples = self._player_actions()[action]()
elif self._wait and action == self.name_to_action_num["NOOP"]:
# Ignore no-op, do not pass to environment.
envs_step_tuples = self._last_step_tuples
else:
# Run action on environment(s).
if action == self.WAIT_MODE_NOOP_ACTION:
action = self.name_to_action_num["NOOP"]
# Perform action on underlying environment(s).
envs_step_tuples = self._step_envs(action)
self._update_statistics(envs_step_tuples)
self._last_step_tuples = envs_step_tuples
ob, reward, done, info = self._player_step_tuple(envs_step_tuples)
return ob, reward, done, info |
def AssertType(value, expected_type):
"""Ensures that given value has certain type.
Args:
value: A value to assert the type for.
expected_type: An expected type for the given value.
Raises:
TypeError: If given value does not have the expected type.
"""
if not isinstance(value, expected_type):
message = "Expected type `%r`, but got value `%r` of type `%s`"
message %= (expected_type, value, type(value))
raise TypeError(message) | Ensures that given value has certain type.
Args:
value: A value to assert the type for.
expected_type: An expected type for the given value.
Raises:
TypeError: If given value does not have the expected type. | Below is the the instruction that describes the task:
### Input:
Ensures that given value has certain type.
Args:
value: A value to assert the type for.
expected_type: An expected type for the given value.
Raises:
TypeError: If given value does not have the expected type.
### Response:
def AssertType(value, expected_type):
"""Ensures that given value has certain type.
Args:
value: A value to assert the type for.
expected_type: An expected type for the given value.
Raises:
TypeError: If given value does not have the expected type.
"""
if not isinstance(value, expected_type):
message = "Expected type `%r`, but got value `%r` of type `%s`"
message %= (expected_type, value, type(value))
raise TypeError(message) |
async def start(self):
"""Loop forever reading messages and invoking
the operation that caused them"""
while True:
try:
data = await self.reader.read(8192)
if self._trace_enabled:
self._logger.trace(
"Received %d bytes from remote server:\n%s",
len(data),
msg.dump(data),
)
await self.process(data)
except asyncio.CancelledError:
return
except:
logging.exception("Unhandled error in Message Reader")
raise | Loop forever reading messages and invoking
the operation that caused them | Below is the the instruction that describes the task:
### Input:
Loop forever reading messages and invoking
the operation that caused them
### Response:
async def start(self):
"""Loop forever reading messages and invoking
the operation that caused them"""
while True:
try:
data = await self.reader.read(8192)
if self._trace_enabled:
self._logger.trace(
"Received %d bytes from remote server:\n%s",
len(data),
msg.dump(data),
)
await self.process(data)
except asyncio.CancelledError:
return
except:
logging.exception("Unhandled error in Message Reader")
raise |
def load_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Restore names if defined
filepath = _get_metadata_filepath(data_dir, feature_name)
if tf.io.gfile.exists(filepath):
with tf.io.gfile.GFile(filepath, 'r') as f:
info_data = json.load(f)
self.set_encoding_format(info_data['encoding_format'])
self.set_shape([None if d == -1 else d for d in info_data['shape']]) | See base class for details. | Below is the the instruction that describes the task:
### Input:
See base class for details.
### Response:
def load_metadata(self, data_dir, feature_name=None):
"""See base class for details."""
# Restore names if defined
filepath = _get_metadata_filepath(data_dir, feature_name)
if tf.io.gfile.exists(filepath):
with tf.io.gfile.GFile(filepath, 'r') as f:
info_data = json.load(f)
self.set_encoding_format(info_data['encoding_format'])
self.set_shape([None if d == -1 else d for d in info_data['shape']]) |
def similarity_by_infocontent(sense1: "wn.Synset", sense2: "wn.Synset", option: str) -> float:
"""
Returns similarity scores by information content.
:param sense1: A synset.
:param sense2: A synset.
:param option: String, one of ('res', 'jcn', 'lin').
:return: A float, similarity measurement.
"""
if sense1.pos != sense2.pos: # infocontent sim can't do diff POS.
return 0
if option in ['res', 'resnik']:
if sense1.pos not in wnic_bnc_resnik_add1.ic:
return 0
return wn.res_similarity(sense1, sense2, wnic_bnc_resnik_add1)
#return min(wn.res_similarity(sense1, sense2, wnic.ic(ic)) \
# for ic in info_contents)
elif option in ['jcn', "jiang-conrath"]:
if sense1.pos not in wnic_bnc_add1.ic:
return 0
return wn.jcn_similarity(sense1, sense2, wnic_bnc_add1)
elif option in ['lin']:
if sense1.pos not in wnic_bnc_add1.ic:
return 0
return wn.lin_similarity(sense1, sense2, wnic_bnc_add1) | Returns similarity scores by information content.
:param sense1: A synset.
:param sense2: A synset.
:param option: String, one of ('res', 'jcn', 'lin').
:return: A float, similarity measurement. | Below is the the instruction that describes the task:
### Input:
Returns similarity scores by information content.
:param sense1: A synset.
:param sense2: A synset.
:param option: String, one of ('res', 'jcn', 'lin').
:return: A float, similarity measurement.
### Response:
def similarity_by_infocontent(sense1: "wn.Synset", sense2: "wn.Synset", option: str) -> float:
"""
Returns similarity scores by information content.
:param sense1: A synset.
:param sense2: A synset.
:param option: String, one of ('res', 'jcn', 'lin').
:return: A float, similarity measurement.
"""
if sense1.pos != sense2.pos: # infocontent sim can't do diff POS.
return 0
if option in ['res', 'resnik']:
if sense1.pos not in wnic_bnc_resnik_add1.ic:
return 0
return wn.res_similarity(sense1, sense2, wnic_bnc_resnik_add1)
#return min(wn.res_similarity(sense1, sense2, wnic.ic(ic)) \
# for ic in info_contents)
elif option in ['jcn', "jiang-conrath"]:
if sense1.pos not in wnic_bnc_add1.ic:
return 0
return wn.jcn_similarity(sense1, sense2, wnic_bnc_add1)
elif option in ['lin']:
if sense1.pos not in wnic_bnc_add1.ic:
return 0
return wn.lin_similarity(sense1, sense2, wnic_bnc_add1) |
def characters(self, numberOfCharacters):
"""Returns characters at index + number of characters"""
return self.code[self.index:self.index + numberOfCharacters] | Returns characters at index + number of characters | Below is the the instruction that describes the task:
### Input:
Returns characters at index + number of characters
### Response:
def characters(self, numberOfCharacters):
"""Returns characters at index + number of characters"""
return self.code[self.index:self.index + numberOfCharacters] |
def parse_cookies(self, req, name, field):
"""Pull the value from the cookiejar."""
return core.get_value(req.COOKIES, name, field) | Pull the value from the cookiejar. | Below is the the instruction that describes the task:
### Input:
Pull the value from the cookiejar.
### Response:
def parse_cookies(self, req, name, field):
"""Pull the value from the cookiejar."""
return core.get_value(req.COOKIES, name, field) |
def process_host_check_result(self, host, status_code, plugin_output):
"""Process host check result
Format of the line that triggers function call::
PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output>
:param host: host to process check to
:type host: alignak.objects.host.Host
:param status_code: exit code of plugin
:type status_code: int
:param plugin_output: plugin output
:type plugin_output: str
:return: None
TODO: say that check is PASSIVE
"""
now = time.time()
cls = host.__class__
# If globally disabled OR host disabled, do not launch..
if not cls.accept_passive_checks or not host.passive_checks_enabled:
return
try:
plugin_output = plugin_output.decode('utf8', 'ignore')
logger.debug('%s > Passive host check plugin output: %s',
host.get_full_name(), plugin_output)
except AttributeError:
# Python 3 will raise an exception
pass
except UnicodeError:
pass
# Maybe the check is just too old, if so, bail out!
if self.current_timestamp < host.last_chk:
logger.debug('%s > Passive host check is too old (%.2f seconds). '
'Ignoring, check output: %s',
host.get_full_name(), self.current_timestamp < host.last_chk,
plugin_output)
return
chk = host.launch_check(now, self.hosts, self.services, self.timeperiods,
self.daemon.macromodulations, self.daemon.checkmodulations,
self.daemon.checks, force=True)
# We will not have a check if an host/service is checked but it has no defined check_command
if not chk:
return
# Now we 'transform the check into a result'
# So exit_status, output and status is eaten by the host
chk.exit_status = status_code
chk.get_outputs(plugin_output, host.max_plugins_output_length)
chk.status = ACT_STATUS_WAIT_CONSUME
chk.check_time = self.current_timestamp # we are using the external command timestamps
# Set the corresponding host's check type to passive
chk.set_type_passive()
# self.daemon.nb_check_received += 1
self.send_an_element(chk)
# Ok now this result will be read by the scheduler the next loop
# raise a passive check log only if needed
if self.my_conf.log_passive_checks:
log_level = 'info'
if status_code == 1: # DOWN
log_level = 'error'
if status_code == 2: # UNREACHABLE
log_level = 'warning'
self.send_an_element(make_monitoring_log(
log_level, 'PASSIVE HOST CHECK: %s;%d;%s;%s;%s' % (
host.get_name(), status_code, chk.output, chk.long_output, chk.perf_data))) | Process host check result
Format of the line that triggers function call::
PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output>
:param host: host to process check to
:type host: alignak.objects.host.Host
:param status_code: exit code of plugin
:type status_code: int
:param plugin_output: plugin output
:type plugin_output: str
:return: None
TODO: say that check is PASSIVE | Below is the the instruction that describes the task:
### Input:
Process host check result
Format of the line that triggers function call::
PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output>
:param host: host to process check to
:type host: alignak.objects.host.Host
:param status_code: exit code of plugin
:type status_code: int
:param plugin_output: plugin output
:type plugin_output: str
:return: None
TODO: say that check is PASSIVE
### Response:
def process_host_check_result(self, host, status_code, plugin_output):
"""Process host check result
Format of the line that triggers function call::
PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output>
:param host: host to process check to
:type host: alignak.objects.host.Host
:param status_code: exit code of plugin
:type status_code: int
:param plugin_output: plugin output
:type plugin_output: str
:return: None
TODO: say that check is PASSIVE
"""
now = time.time()
cls = host.__class__
# If globally disabled OR host disabled, do not launch..
if not cls.accept_passive_checks or not host.passive_checks_enabled:
return
try:
plugin_output = plugin_output.decode('utf8', 'ignore')
logger.debug('%s > Passive host check plugin output: %s',
host.get_full_name(), plugin_output)
except AttributeError:
# Python 3 will raise an exception
pass
except UnicodeError:
pass
# Maybe the check is just too old, if so, bail out!
if self.current_timestamp < host.last_chk:
logger.debug('%s > Passive host check is too old (%.2f seconds). '
'Ignoring, check output: %s',
host.get_full_name(), self.current_timestamp < host.last_chk,
plugin_output)
return
chk = host.launch_check(now, self.hosts, self.services, self.timeperiods,
self.daemon.macromodulations, self.daemon.checkmodulations,
self.daemon.checks, force=True)
# We will not have a check if an host/service is checked but it has no defined check_command
if not chk:
return
# Now we 'transform the check into a result'
# So exit_status, output and status is eaten by the host
chk.exit_status = status_code
chk.get_outputs(plugin_output, host.max_plugins_output_length)
chk.status = ACT_STATUS_WAIT_CONSUME
chk.check_time = self.current_timestamp # we are using the external command timestamps
# Set the corresponding host's check type to passive
chk.set_type_passive()
# self.daemon.nb_check_received += 1
self.send_an_element(chk)
# Ok now this result will be read by the scheduler the next loop
# raise a passive check log only if needed
if self.my_conf.log_passive_checks:
log_level = 'info'
if status_code == 1: # DOWN
log_level = 'error'
if status_code == 2: # UNREACHABLE
log_level = 'warning'
self.send_an_element(make_monitoring_log(
log_level, 'PASSIVE HOST CHECK: %s;%d;%s;%s;%s' % (
host.get_name(), status_code, chk.output, chk.long_output, chk.perf_data))) |
def add_months(self, month_int):
"""
addition of a number of months
:param BusinessDate d:
:param int month_int:
:return bankdate:
"""
month_int += self.month
while month_int > 12:
self = BusinessDate.add_years(self, 1)
month_int -= 12
while month_int < 1:
self = BusinessDate.add_years(self, -1)
month_int += 12
l = monthrange(self.year, month_int)[1]
return BusinessDate.from_ymd(self.year, month_int, min(l, self.day)) | addition of a number of months
:param BusinessDate d:
:param int month_int:
:return bankdate: | Below is the the instruction that describes the task:
### Input:
addition of a number of months
:param BusinessDate d:
:param int month_int:
:return bankdate:
### Response:
def add_months(self, month_int):
"""
addition of a number of months
:param BusinessDate d:
:param int month_int:
:return bankdate:
"""
month_int += self.month
while month_int > 12:
self = BusinessDate.add_years(self, 1)
month_int -= 12
while month_int < 1:
self = BusinessDate.add_years(self, -1)
month_int += 12
l = monthrange(self.year, month_int)[1]
return BusinessDate.from_ymd(self.year, month_int, min(l, self.day)) |
def _preserve_bonds(self, sliced_cartesian,
use_lookup=None):
"""Is called after cutting geometric shapes.
If you want to change the rules how bonds are preserved, when
applying e.g. :meth:`Cartesian.cut_sphere` this is the
function you have to modify.
It is recommended to inherit from the Cartesian class to
tailor it for your project, instead of modifying the
source code of ChemCoord.
Args:
sliced_frame (Cartesian):
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
Cartesian:
"""
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
included_atoms_set = set(sliced_cartesian.index)
assert included_atoms_set.issubset(set(self.index)), \
'The sliced Cartesian has to be a subset of the bigger frame'
bond_dic = self.get_bonds(use_lookup=use_lookup)
new_atoms = set([])
for atom in included_atoms_set:
new_atoms = new_atoms | bond_dic[atom]
new_atoms = new_atoms - included_atoms_set
while not new_atoms == set([]):
index_of_interest = new_atoms.pop()
included_atoms_set = (
included_atoms_set |
self.get_coordination_sphere(
index_of_interest,
n_sphere=float('inf'),
only_surface=False,
exclude=included_atoms_set,
give_only_index=True,
use_lookup=use_lookup))
new_atoms = new_atoms - included_atoms_set
molecule = self.loc[included_atoms_set, :]
return molecule | Is called after cutting geometric shapes.
If you want to change the rules how bonds are preserved, when
applying e.g. :meth:`Cartesian.cut_sphere` this is the
function you have to modify.
It is recommended to inherit from the Cartesian class to
tailor it for your project, instead of modifying the
source code of ChemCoord.
Args:
sliced_frame (Cartesian):
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
Cartesian: | Below is the the instruction that describes the task:
### Input:
Is called after cutting geometric shapes.
If you want to change the rules how bonds are preserved, when
applying e.g. :meth:`Cartesian.cut_sphere` this is the
function you have to modify.
It is recommended to inherit from the Cartesian class to
tailor it for your project, instead of modifying the
source code of ChemCoord.
Args:
sliced_frame (Cartesian):
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
Cartesian:
### Response:
def _preserve_bonds(self, sliced_cartesian,
use_lookup=None):
"""Is called after cutting geometric shapes.
If you want to change the rules how bonds are preserved, when
applying e.g. :meth:`Cartesian.cut_sphere` this is the
function you have to modify.
It is recommended to inherit from the Cartesian class to
tailor it for your project, instead of modifying the
source code of ChemCoord.
Args:
sliced_frame (Cartesian):
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
Cartesian:
"""
if use_lookup is None:
use_lookup = settings['defaults']['use_lookup']
included_atoms_set = set(sliced_cartesian.index)
assert included_atoms_set.issubset(set(self.index)), \
'The sliced Cartesian has to be a subset of the bigger frame'
bond_dic = self.get_bonds(use_lookup=use_lookup)
new_atoms = set([])
for atom in included_atoms_set:
new_atoms = new_atoms | bond_dic[atom]
new_atoms = new_atoms - included_atoms_set
while not new_atoms == set([]):
index_of_interest = new_atoms.pop()
included_atoms_set = (
included_atoms_set |
self.get_coordination_sphere(
index_of_interest,
n_sphere=float('inf'),
only_surface=False,
exclude=included_atoms_set,
give_only_index=True,
use_lookup=use_lookup))
new_atoms = new_atoms - included_atoms_set
molecule = self.loc[included_atoms_set, :]
return molecule |
def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config,
bins=251, annotator_id=0):
"""Computes the results by using the ground truth dataset identified by
the annotator parameter.
Return
------
results : dict
Dictionary of the results (see function compute_results).
"""
if config["hier"]:
ref_times, ref_labels, ref_levels = \
msaf.io.read_hier_references(
ref_file, annotation_id=annotator_id,
exclude_levels=["segment_salami_function"])
else:
jam = jams.load(ref_file, validate=False)
ann = jam.search(namespace='segment_.*')[annotator_id]
ref_inter, ref_labels = ann.to_interval_values()
# Read estimations with correct configuration
est_inter, est_labels = io.read_estimations(est_file, boundaries_id,
labels_id, **config)
# Compute the results and return
logging.info("Evaluating %s" % os.path.basename(est_file))
if config["hier"]:
# Hierarchical
assert len(est_inter) == len(est_labels), "Same number of levels " \
"are required in the boundaries and labels for the hierarchical " \
"evaluation."
est_times = []
est_labels = []
# Sort based on how many segments per level
est_inter = sorted(est_inter, key=lambda level: len(level))
for inter in est_inter:
est_times.append(msaf.utils.intervals_to_times(inter))
# Add fake labels (hierarchical eval does not use labels --yet--)
est_labels.append(np.ones(len(est_times[-1]) - 1) * -1)
# Align the times
utils.align_end_hierarchies(est_times, ref_times, thres=1)
# To intervals
est_hier = [utils.times_to_intervals(times) for times in est_times]
ref_hier = [utils.times_to_intervals(times) for times in ref_times]
# Compute evaluations
res = {}
res["t_recall10"], res["t_precision10"], res["t_measure10"] = \
mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=10)
res["t_recall15"], res["t_precision15"], res["t_measure15"] = \
mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=15)
res["track_id"] = os.path.basename(est_file)[:-5]
return res
else:
# Flat
return compute_results(ref_inter, est_inter, ref_labels, est_labels,
bins, est_file) | Computes the results by using the ground truth dataset identified by
the annotator parameter.
Return
------
results : dict
Dictionary of the results (see function compute_results). | Below is the the instruction that describes the task:
### Input:
Computes the results by using the ground truth dataset identified by
the annotator parameter.
Return
------
results : dict
Dictionary of the results (see function compute_results).
### Response:
def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config,
bins=251, annotator_id=0):
"""Computes the results by using the ground truth dataset identified by
the annotator parameter.
Return
------
results : dict
Dictionary of the results (see function compute_results).
"""
if config["hier"]:
ref_times, ref_labels, ref_levels = \
msaf.io.read_hier_references(
ref_file, annotation_id=annotator_id,
exclude_levels=["segment_salami_function"])
else:
jam = jams.load(ref_file, validate=False)
ann = jam.search(namespace='segment_.*')[annotator_id]
ref_inter, ref_labels = ann.to_interval_values()
# Read estimations with correct configuration
est_inter, est_labels = io.read_estimations(est_file, boundaries_id,
labels_id, **config)
# Compute the results and return
logging.info("Evaluating %s" % os.path.basename(est_file))
if config["hier"]:
# Hierarchical
assert len(est_inter) == len(est_labels), "Same number of levels " \
"are required in the boundaries and labels for the hierarchical " \
"evaluation."
est_times = []
est_labels = []
# Sort based on how many segments per level
est_inter = sorted(est_inter, key=lambda level: len(level))
for inter in est_inter:
est_times.append(msaf.utils.intervals_to_times(inter))
# Add fake labels (hierarchical eval does not use labels --yet--)
est_labels.append(np.ones(len(est_times[-1]) - 1) * -1)
# Align the times
utils.align_end_hierarchies(est_times, ref_times, thres=1)
# To intervals
est_hier = [utils.times_to_intervals(times) for times in est_times]
ref_hier = [utils.times_to_intervals(times) for times in ref_times]
# Compute evaluations
res = {}
res["t_recall10"], res["t_precision10"], res["t_measure10"] = \
mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=10)
res["t_recall15"], res["t_precision15"], res["t_measure15"] = \
mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=15)
res["track_id"] = os.path.basename(est_file)[:-5]
return res
else:
# Flat
return compute_results(ref_inter, est_inter, ref_labels, est_labels,
bins, est_file) |
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col) | Returns (line, col) of the current position in the stream. | Below is the the instruction that describes the task:
### Input:
Returns (line, col) of the current position in the stream.
### Response:
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col) |
def mdstrip(value, length=None, end='…'):
'''
Truncate and strip tags from a markdown source
The markdown source is truncated at the excerpt if present and
smaller than the required length. Then, all html tags are stripped.
'''
if not value:
return ''
if EXCERPT_TOKEN in value:
value = value.split(EXCERPT_TOKEN, 1)[0]
rendered = md(value, wrap=False)
text = do_striptags(rendered)
text = bleach_clean(text)
if length and length > 0:
text = do_truncate(None, text, length, end=end, leeway=2)
return text | Truncate and strip tags from a markdown source
The markdown source is truncated at the excerpt if present and
smaller than the required length. Then, all html tags are stripped. | Below is the the instruction that describes the task:
### Input:
Truncate and strip tags from a markdown source
The markdown source is truncated at the excerpt if present and
smaller than the required length. Then, all html tags are stripped.
### Response:
def mdstrip(value, length=None, end='…'):
'''
Truncate and strip tags from a markdown source
The markdown source is truncated at the excerpt if present and
smaller than the required length. Then, all html tags are stripped.
'''
if not value:
return ''
if EXCERPT_TOKEN in value:
value = value.split(EXCERPT_TOKEN, 1)[0]
rendered = md(value, wrap=False)
text = do_striptags(rendered)
text = bleach_clean(text)
if length and length > 0:
text = do_truncate(None, text, length, end=end, leeway=2)
return text |
def lchown(path, user, group=None, pgroup=None):
'''
Chown a file, pass the file the desired user and group without following any
symlinks.
Under Windows, the group parameter will be ignored.
This is because while files in Windows do have a 'primary group'
property, this is rarely used. It generally has no bearing on
permissions unless intentionally configured and is most commonly used to
provide Unix compatibility (e.g. Services For Unix, NFS services).
If you do want to change the 'primary group' property and understand the
implications, pass the Windows only parameter, pgroup, instead.
To set the primary group to 'None', it must be specified in quotes.
Otherwise Salt will interpret it as the Python value of None and no primary
group changes will occur. See the example below.
Args:
path (str): The path to the file or directory
user (str): The name of the user to own the file
group (str): The group (not used)
pgroup (str): The primary group to assign
Returns:
bool: True if successful, otherwise error
CLI Example:
.. code-block:: bash
salt '*' file.lchown c:\\temp\\test.txt myusername
salt '*' file.lchown c:\\temp\\test.txt myusername pgroup=Administrators
salt '*' file.lchown c:\\temp\\test.txt myusername "pgroup='None'"
'''
if group:
func_name = '{0}.lchown'.format(__virtualname__)
if __opts__.get('fun', '') == func_name:
log.info('The group parameter has no effect when using %s on '
'Windows systems; see function docs for details.',
func_name)
log.debug('win_file.py %s Ignoring the group parameter for %s',
func_name, path)
group = None
return chown(path, user, group, pgroup, follow_symlinks=False) | Chown a file, pass the file the desired user and group without following any
symlinks.
Under Windows, the group parameter will be ignored.
This is because while files in Windows do have a 'primary group'
property, this is rarely used. It generally has no bearing on
permissions unless intentionally configured and is most commonly used to
provide Unix compatibility (e.g. Services For Unix, NFS services).
If you do want to change the 'primary group' property and understand the
implications, pass the Windows only parameter, pgroup, instead.
To set the primary group to 'None', it must be specified in quotes.
Otherwise Salt will interpret it as the Python value of None and no primary
group changes will occur. See the example below.
Args:
path (str): The path to the file or directory
user (str): The name of the user to own the file
group (str): The group (not used)
pgroup (str): The primary group to assign
Returns:
bool: True if successful, otherwise error
CLI Example:
.. code-block:: bash
salt '*' file.lchown c:\\temp\\test.txt myusername
salt '*' file.lchown c:\\temp\\test.txt myusername pgroup=Administrators
salt '*' file.lchown c:\\temp\\test.txt myusername "pgroup='None'" | Below is the the instruction that describes the task:
### Input:
Chown a file, pass the file the desired user and group without following any
symlinks.
Under Windows, the group parameter will be ignored.
This is because while files in Windows do have a 'primary group'
property, this is rarely used. It generally has no bearing on
permissions unless intentionally configured and is most commonly used to
provide Unix compatibility (e.g. Services For Unix, NFS services).
If you do want to change the 'primary group' property and understand the
implications, pass the Windows only parameter, pgroup, instead.
To set the primary group to 'None', it must be specified in quotes.
Otherwise Salt will interpret it as the Python value of None and no primary
group changes will occur. See the example below.
Args:
path (str): The path to the file or directory
user (str): The name of the user to own the file
group (str): The group (not used)
pgroup (str): The primary group to assign
Returns:
bool: True if successful, otherwise error
CLI Example:
.. code-block:: bash
salt '*' file.lchown c:\\temp\\test.txt myusername
salt '*' file.lchown c:\\temp\\test.txt myusername pgroup=Administrators
salt '*' file.lchown c:\\temp\\test.txt myusername "pgroup='None'"
### Response:
def lchown(path, user, group=None, pgroup=None):
'''
Chown a file, pass the file the desired user and group without following any
symlinks.
Under Windows, the group parameter will be ignored.
This is because while files in Windows do have a 'primary group'
property, this is rarely used. It generally has no bearing on
permissions unless intentionally configured and is most commonly used to
provide Unix compatibility (e.g. Services For Unix, NFS services).
If you do want to change the 'primary group' property and understand the
implications, pass the Windows only parameter, pgroup, instead.
To set the primary group to 'None', it must be specified in quotes.
Otherwise Salt will interpret it as the Python value of None and no primary
group changes will occur. See the example below.
Args:
path (str): The path to the file or directory
user (str): The name of the user to own the file
group (str): The group (not used)
pgroup (str): The primary group to assign
Returns:
bool: True if successful, otherwise error
CLI Example:
.. code-block:: bash
salt '*' file.lchown c:\\temp\\test.txt myusername
salt '*' file.lchown c:\\temp\\test.txt myusername pgroup=Administrators
salt '*' file.lchown c:\\temp\\test.txt myusername "pgroup='None'"
'''
if group:
func_name = '{0}.lchown'.format(__virtualname__)
if __opts__.get('fun', '') == func_name:
log.info('The group parameter has no effect when using %s on '
'Windows systems; see function docs for details.',
func_name)
log.debug('win_file.py %s Ignoring the group parameter for %s',
func_name, path)
group = None
return chown(path, user, group, pgroup, follow_symlinks=False) |
def parse_brome_config_from_browser_config(browser_config):
"""Parse the browser config and look for brome specific config
Args:
browser_config (dict)
"""
config = {}
brome_keys = [key for key in browser_config if key.find(':') != -1]
for brome_key in brome_keys:
section, option = brome_key.split(':')
value = browser_config[brome_key]
if section not in config:
config[section] = {}
config[section][option] = value
return config | Parse the browser config and look for brome specific config
Args:
browser_config (dict) | Below is the the instruction that describes the task:
### Input:
Parse the browser config and look for brome specific config
Args:
browser_config (dict)
### Response:
def parse_brome_config_from_browser_config(browser_config):
"""Parse the browser config and look for brome specific config
Args:
browser_config (dict)
"""
config = {}
brome_keys = [key for key in browser_config if key.find(':') != -1]
for brome_key in brome_keys:
section, option = brome_key.split(':')
value = browser_config[brome_key]
if section not in config:
config[section] = {}
config[section][option] = value
return config |
def selected(script, face=True, vert=True):
""" Delete selected vertices and/or faces
Note: if the mesh has no faces (e.g. a point cloud) you must
set face=False, or the vertices will not be deleted
Args:
script: the FilterScript object or script filename to write
the filter to.
face (bool): if True the selected faces will be deleted. If vert
is also True, then all the vertices surrounded by those faces will
also be deleted. Note that if no faces are selected (only vertices)
then this filter will not do anything. For example, if you want to
delete a point cloud selection, you must set this to False.
vert (bool): if True the selected vertices will be deleted.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
if face and vert:
filter_xml = ' <filter name="Delete Selected Faces and Vertices"/>\n'
elif face and not vert:
filter_xml = ' <filter name="Delete Selected Faces"/>\n'
elif not face and vert:
filter_xml = ' <filter name="Delete Selected Vertices"/>\n'
util.write_filter(script, filter_xml)
return None | Delete selected vertices and/or faces
Note: if the mesh has no faces (e.g. a point cloud) you must
set face=False, or the vertices will not be deleted
Args:
script: the FilterScript object or script filename to write
the filter to.
face (bool): if True the selected faces will be deleted. If vert
is also True, then all the vertices surrounded by those faces will
also be deleted. Note that if no faces are selected (only vertices)
then this filter will not do anything. For example, if you want to
delete a point cloud selection, you must set this to False.
vert (bool): if True the selected vertices will be deleted.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA | Below is the the instruction that describes the task:
### Input:
Delete selected vertices and/or faces
Note: if the mesh has no faces (e.g. a point cloud) you must
set face=False, or the vertices will not be deleted
Args:
script: the FilterScript object or script filename to write
the filter to.
face (bool): if True the selected faces will be deleted. If vert
is also True, then all the vertices surrounded by those faces will
also be deleted. Note that if no faces are selected (only vertices)
then this filter will not do anything. For example, if you want to
delete a point cloud selection, you must set this to False.
vert (bool): if True the selected vertices will be deleted.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
### Response:
def selected(script, face=True, vert=True):
""" Delete selected vertices and/or faces
Note: if the mesh has no faces (e.g. a point cloud) you must
set face=False, or the vertices will not be deleted
Args:
script: the FilterScript object or script filename to write
the filter to.
face (bool): if True the selected faces will be deleted. If vert
is also True, then all the vertices surrounded by those faces will
also be deleted. Note that if no faces are selected (only vertices)
then this filter will not do anything. For example, if you want to
delete a point cloud selection, you must set this to False.
vert (bool): if True the selected vertices will be deleted.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
if face and vert:
filter_xml = ' <filter name="Delete Selected Faces and Vertices"/>\n'
elif face and not vert:
filter_xml = ' <filter name="Delete Selected Faces"/>\n'
elif not face and vert:
filter_xml = ' <filter name="Delete Selected Vertices"/>\n'
util.write_filter(script, filter_xml)
return None |
def popState(self):
"""
Restore domain state from the top of the stack
Variables hidden since the last popped state are then available
again.
"""
diff = self._states.pop() - len(self)
if diff:
self.extend(self._hidden[-diff:])
del self._hidden[-diff:] | Restore domain state from the top of the stack
Variables hidden since the last popped state are then available
again. | Below is the the instruction that describes the task:
### Input:
Restore domain state from the top of the stack
Variables hidden since the last popped state are then available
again.
### Response:
def popState(self):
"""
Restore domain state from the top of the stack
Variables hidden since the last popped state are then available
again.
"""
diff = self._states.pop() - len(self)
if diff:
self.extend(self._hidden[-diff:])
del self._hidden[-diff:] |
def extract_table(html):
soup = BeautifulSoup(html,'lxml')
table = soup.find("table", attrs={"class":"basic_table"})
if table is None:
return table
return table
'''# The first tr contains the field names.
datasets = []
for row in table.find_all("tr"):
dataset = list((td.get_text().strip(),
td.attrs.get('colspan', 1),
td.attrs.get('rowspan', 1))
for td in row.find_all("td"))
datasets.append(dataset)'''
return datasets | # The first tr contains the field names.
datasets = []
for row in table.find_all("tr"):
dataset = list((td.get_text().strip(),
td.attrs.get('colspan', 1),
td.attrs.get('rowspan', 1))
for td in row.find_all("td"))
datasets.append(dataset) | Below is the the instruction that describes the task:
### Input:
# The first tr contains the field names.
datasets = []
for row in table.find_all("tr"):
dataset = list((td.get_text().strip(),
td.attrs.get('colspan', 1),
td.attrs.get('rowspan', 1))
for td in row.find_all("td"))
datasets.append(dataset)
### Response:
def extract_table(html):
soup = BeautifulSoup(html,'lxml')
table = soup.find("table", attrs={"class":"basic_table"})
if table is None:
return table
return table
'''# The first tr contains the field names.
datasets = []
for row in table.find_all("tr"):
dataset = list((td.get_text().strip(),
td.attrs.get('colspan', 1),
td.attrs.get('rowspan', 1))
for td in row.find_all("td"))
datasets.append(dataset)'''
return datasets |
def classify_elements(self,
file,
file_content_type=None,
model=None,
**kwargs):
"""
Classify the elements of a document.
Analyzes the structural and semantic elements of a document.
:param file file: The document to classify.
:param str file_content_type: The content type of file.
:param str model: The analysis model to be used by the service. For the **Element
classification** and **Compare two documents** methods, the default is
`contracts`. For the **Extract tables** method, the default is `tables`. These
defaults apply to the standalone methods as well as to the methods' use in
batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if file is None:
raise ValueError('file must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('compare-comply', 'V1',
'classify_elements')
headers.update(sdk_headers)
params = {'version': self.version, 'model': model}
form_data = {}
form_data['file'] = (None, file, file_content_type or
'application/octet-stream')
url = '/v1/element_classification'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
files=form_data,
accept_json=True)
return response | Classify the elements of a document.
Analyzes the structural and semantic elements of a document.
:param file file: The document to classify.
:param str file_content_type: The content type of file.
:param str model: The analysis model to be used by the service. For the **Element
classification** and **Compare two documents** methods, the default is
`contracts`. For the **Extract tables** method, the default is `tables`. These
defaults apply to the standalone methods as well as to the methods' use in
batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse | Below is the the instruction that describes the task:
### Input:
Classify the elements of a document.
Analyzes the structural and semantic elements of a document.
:param file file: The document to classify.
:param str file_content_type: The content type of file.
:param str model: The analysis model to be used by the service. For the **Element
classification** and **Compare two documents** methods, the default is
`contracts`. For the **Extract tables** method, the default is `tables`. These
defaults apply to the standalone methods as well as to the methods' use in
batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
### Response:
def classify_elements(self,
file,
file_content_type=None,
model=None,
**kwargs):
"""
Classify the elements of a document.
Analyzes the structural and semantic elements of a document.
:param file file: The document to classify.
:param str file_content_type: The content type of file.
:param str model: The analysis model to be used by the service. For the **Element
classification** and **Compare two documents** methods, the default is
`contracts`. For the **Extract tables** method, the default is `tables`. These
defaults apply to the standalone methods as well as to the methods' use in
batch-processing requests.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if file is None:
raise ValueError('file must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('compare-comply', 'V1',
'classify_elements')
headers.update(sdk_headers)
params = {'version': self.version, 'model': model}
form_data = {}
form_data['file'] = (None, file, file_content_type or
'application/octet-stream')
url = '/v1/element_classification'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
files=form_data,
accept_json=True)
return response |
def CountHuntResultsByType(self, hunt_id, cursor=None):
"""Counts number of hunts results per type."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = ("SELECT type, COUNT(*) FROM flow_results "
"WHERE hunt_id = %s GROUP BY type")
cursor.execute(query, [hunt_id_int])
return dict(cursor.fetchall()) | Counts number of hunts results per type. | Below is the the instruction that describes the task:
### Input:
Counts number of hunts results per type.
### Response:
def CountHuntResultsByType(self, hunt_id, cursor=None):
"""Counts number of hunts results per type."""
hunt_id_int = db_utils.HuntIDToInt(hunt_id)
query = ("SELECT type, COUNT(*) FROM flow_results "
"WHERE hunt_id = %s GROUP BY type")
cursor.execute(query, [hunt_id_int])
return dict(cursor.fetchall()) |
def colorize(self, colormap):
"""Colorize the current image using `colormap`.
.. note::
Works only on "L" or "LA" images.
"""
if self.mode not in ("L", "LA"):
raise ValueError("Image should be grayscale to colorize")
if self.mode == "LA":
alpha = self.data.sel(bands=['A'])
else:
alpha = None
l_data = self.data.sel(bands=['L'])
def _colorize(l_data, colormap):
# 'l_data' is (1, rows, cols)
# 'channels' will be a list of 3 (RGB) or 4 (RGBA) arrays
channels = colormap.colorize(l_data)
return np.concatenate(channels, axis=0)
new_data = l_data.data.map_blocks(_colorize, colormap,
chunks=(colormap.colors.shape[1],) + l_data.data.chunks[1:],
dtype=np.float64)
if colormap.colors.shape[1] == 4:
mode = "RGBA"
elif alpha is not None:
new_data = da.concatenate([new_data, alpha.data], axis=0)
mode = "RGBA"
else:
mode = "RGB"
# copy the coordinates so we don't affect the original
coords = dict(self.data.coords)
coords['bands'] = list(mode)
attrs = self.data.attrs
dims = self.data.dims
self.data = xr.DataArray(new_data, coords=coords, attrs=attrs, dims=dims) | Colorize the current image using `colormap`.
.. note::
Works only on "L" or "LA" images. | Below is the the instruction that describes the task:
### Input:
Colorize the current image using `colormap`.
.. note::
Works only on "L" or "LA" images.
### Response:
def colorize(self, colormap):
"""Colorize the current image using `colormap`.
.. note::
Works only on "L" or "LA" images.
"""
if self.mode not in ("L", "LA"):
raise ValueError("Image should be grayscale to colorize")
if self.mode == "LA":
alpha = self.data.sel(bands=['A'])
else:
alpha = None
l_data = self.data.sel(bands=['L'])
def _colorize(l_data, colormap):
# 'l_data' is (1, rows, cols)
# 'channels' will be a list of 3 (RGB) or 4 (RGBA) arrays
channels = colormap.colorize(l_data)
return np.concatenate(channels, axis=0)
new_data = l_data.data.map_blocks(_colorize, colormap,
chunks=(colormap.colors.shape[1],) + l_data.data.chunks[1:],
dtype=np.float64)
if colormap.colors.shape[1] == 4:
mode = "RGBA"
elif alpha is not None:
new_data = da.concatenate([new_data, alpha.data], axis=0)
mode = "RGBA"
else:
mode = "RGB"
# copy the coordinates so we don't affect the original
coords = dict(self.data.coords)
coords['bands'] = list(mode)
attrs = self.data.attrs
dims = self.data.dims
self.data = xr.DataArray(new_data, coords=coords, attrs=attrs, dims=dims) |
def location(self):
"""The location of the repository that contains :attr:`revision` (a string or :data:`None`)."""
location, _, revision = self.expression.partition('#')
return location if location and revision else None | The location of the repository that contains :attr:`revision` (a string or :data:`None`). | Below is the the instruction that describes the task:
### Input:
The location of the repository that contains :attr:`revision` (a string or :data:`None`).
### Response:
def location(self):
"""The location of the repository that contains :attr:`revision` (a string or :data:`None`)."""
location, _, revision = self.expression.partition('#')
return location if location and revision else None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.