code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def os_requires_version(ostack_release, pkg):
"""
Decorator for hook to specify minimum supported release
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args):
if os_release(pkg) < ostack_release:
raise Exception("This hook is not supported on releases"
" before %s" % ostack_release)
f(*args)
return wrapped_f
return wrap | Decorator for hook to specify minimum supported release | Below is the the instruction that describes the task:
### Input:
Decorator for hook to specify minimum supported release
### Response:
def os_requires_version(ostack_release, pkg):
"""
Decorator for hook to specify minimum supported release
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args):
if os_release(pkg) < ostack_release:
raise Exception("This hook is not supported on releases"
" before %s" % ostack_release)
f(*args)
return wrapped_f
return wrap |
def patch_modules():
"""Patch modules to work better with :meth:`ExampleRunner.invoke`.
``subprocess.call` output is redirected to ``click.echo`` so it
shows up in the example output.
"""
old_call = subprocess.call
def dummy_call(*args, **kwargs):
with tempfile.TemporaryFile("wb+") as f:
kwargs["stdout"] = f
kwargs["stderr"] = f
rv = subprocess.Popen(*args, **kwargs).wait()
f.seek(0)
click.echo(f.read().decode("utf-8", "replace").rstrip())
return rv
subprocess.call = dummy_call
try:
yield
finally:
subprocess.call = old_call | Patch modules to work better with :meth:`ExampleRunner.invoke`.
``subprocess.call` output is redirected to ``click.echo`` so it
shows up in the example output. | Below is the the instruction that describes the task:
### Input:
Patch modules to work better with :meth:`ExampleRunner.invoke`.
``subprocess.call` output is redirected to ``click.echo`` so it
shows up in the example output.
### Response:
def patch_modules():
"""Patch modules to work better with :meth:`ExampleRunner.invoke`.
``subprocess.call` output is redirected to ``click.echo`` so it
shows up in the example output.
"""
old_call = subprocess.call
def dummy_call(*args, **kwargs):
with tempfile.TemporaryFile("wb+") as f:
kwargs["stdout"] = f
kwargs["stderr"] = f
rv = subprocess.Popen(*args, **kwargs).wait()
f.seek(0)
click.echo(f.read().decode("utf-8", "replace").rstrip())
return rv
subprocess.call = dummy_call
try:
yield
finally:
subprocess.call = old_call |
def compile_insert(self, query, values):
"""
Compile an insert SQL statement
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param values: The values to insert
:type values: dict or list
:return: The compiled statement
:rtype: str
"""
# Essentially we will force every insert to be treated as a batch insert which
# simply makes creating the SQL easier for us since we can utilize the same
# basic routine regardless of an amount of records given to us to insert.
table = self.wrap_table(query.from__)
if not isinstance(values, list):
values = [values]
columns = self.columnize(values[0].keys())
# We need to build a list of parameter place-holders of values that are bound
# to the query. Each insert should have the exact same amount of parameter
# bindings so we can just go off the first list of values in this array.
parameters = self.parameterize(values[0].values())
value = ['(%s)' % parameters] * len(values)
parameters = ', '.join(value)
return 'INSERT INTO %s (%s) VALUES %s' % (table, columns, parameters) | Compile an insert SQL statement
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param values: The values to insert
:type values: dict or list
:return: The compiled statement
:rtype: str | Below is the the instruction that describes the task:
### Input:
Compile an insert SQL statement
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param values: The values to insert
:type values: dict or list
:return: The compiled statement
:rtype: str
### Response:
def compile_insert(self, query, values):
"""
Compile an insert SQL statement
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param values: The values to insert
:type values: dict or list
:return: The compiled statement
:rtype: str
"""
# Essentially we will force every insert to be treated as a batch insert which
# simply makes creating the SQL easier for us since we can utilize the same
# basic routine regardless of an amount of records given to us to insert.
table = self.wrap_table(query.from__)
if not isinstance(values, list):
values = [values]
columns = self.columnize(values[0].keys())
# We need to build a list of parameter place-holders of values that are bound
# to the query. Each insert should have the exact same amount of parameter
# bindings so we can just go off the first list of values in this array.
parameters = self.parameterize(values[0].values())
value = ['(%s)' % parameters] * len(values)
parameters = ', '.join(value)
return 'INSERT INTO %s (%s) VALUES %s' % (table, columns, parameters) |
def get_model_choices():
"""
Get the select options for the model selector
:return:
"""
result = []
for ct in ContentType.objects.order_by('app_label', 'model'):
try:
if issubclass(ct.model_class(), TranslatableModel):
result.append(
('{} - {}'.format(ct.app_label, ct.model.lower()),
'{} - {}'.format(ct.app_label.capitalize(), ct.model_class()._meta.verbose_name_plural))
)
except TypeError:
continue
return result | Get the select options for the model selector
:return: | Below is the the instruction that describes the task:
### Input:
Get the select options for the model selector
:return:
### Response:
def get_model_choices():
"""
Get the select options for the model selector
:return:
"""
result = []
for ct in ContentType.objects.order_by('app_label', 'model'):
try:
if issubclass(ct.model_class(), TranslatableModel):
result.append(
('{} - {}'.format(ct.app_label, ct.model.lower()),
'{} - {}'.format(ct.app_label.capitalize(), ct.model_class()._meta.verbose_name_plural))
)
except TypeError:
continue
return result |
def show(self):
"""
Plot the result of the simulation once it's been intialized
"""
from matplotlib import pyplot as plt
if self.already_run:
for ref in self.volts.keys():
plt.plot(self.t, self.volts[ref], label=ref)
plt.title("Simulation voltage vs time")
plt.legend()
plt.xlabel("Time [ms]")
plt.ylabel("Voltage [mV]")
else:
pynml.print_comment("First you have to 'go()' the simulation.", True)
plt.show() | Plot the result of the simulation once it's been intialized | Below is the the instruction that describes the task:
### Input:
Plot the result of the simulation once it's been intialized
### Response:
def show(self):
"""
Plot the result of the simulation once it's been intialized
"""
from matplotlib import pyplot as plt
if self.already_run:
for ref in self.volts.keys():
plt.plot(self.t, self.volts[ref], label=ref)
plt.title("Simulation voltage vs time")
plt.legend()
plt.xlabel("Time [ms]")
plt.ylabel("Voltage [mV]")
else:
pynml.print_comment("First you have to 'go()' the simulation.", True)
plt.show() |
def evolve_genomes(rng, pop, params, recorder=None):
"""
Evolve a population without tree sequence recordings. In other words,
complete genomes must be simulated and tracked.
:param rng: random number generator
:type rng: :class:`fwdpy11.GSLrng`
:param pop: A population
:type pop: :class:`fwdpy11.DiploidPopulation`
:param params: simulation parameters
:type params: :class:`fwdpy11.ModelParams`
:param recorder: (None) A temporal sampler/data recorder.
:type recorder: callable
.. note::
If recorder is None,
then :class:`fwdpy11.RecordNothing` will be used.
"""
import warnings
# Test parameters while suppressing warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Will throw exception if anything is wrong:
params.validate()
from ._fwdpy11 import MutationRegions
from ._fwdpy11 import evolve_without_tree_sequences
from ._fwdpy11 import dispatch_create_GeneticMap
pneutral = params.mutrate_n/(params.mutrate_n+params.mutrate_s)
mm = MutationRegions.create(pneutral, params.nregions, params.sregions)
rm = dispatch_create_GeneticMap(params.recrate, params.recregions)
if recorder is None:
from ._fwdpy11 import RecordNothing
recorder = RecordNothing()
evolve_without_tree_sequences(rng, pop, params.demography,
params.mutrate_n, params.mutrate_s,
params.recrate, mm, rm, params.gvalue,
recorder, params.pself, params.prune_selected) | Evolve a population without tree sequence recordings. In other words,
complete genomes must be simulated and tracked.
:param rng: random number generator
:type rng: :class:`fwdpy11.GSLrng`
:param pop: A population
:type pop: :class:`fwdpy11.DiploidPopulation`
:param params: simulation parameters
:type params: :class:`fwdpy11.ModelParams`
:param recorder: (None) A temporal sampler/data recorder.
:type recorder: callable
.. note::
If recorder is None,
then :class:`fwdpy11.RecordNothing` will be used. | Below is the the instruction that describes the task:
### Input:
Evolve a population without tree sequence recordings. In other words,
complete genomes must be simulated and tracked.
:param rng: random number generator
:type rng: :class:`fwdpy11.GSLrng`
:param pop: A population
:type pop: :class:`fwdpy11.DiploidPopulation`
:param params: simulation parameters
:type params: :class:`fwdpy11.ModelParams`
:param recorder: (None) A temporal sampler/data recorder.
:type recorder: callable
.. note::
If recorder is None,
then :class:`fwdpy11.RecordNothing` will be used.
### Response:
def evolve_genomes(rng, pop, params, recorder=None):
"""
Evolve a population without tree sequence recordings. In other words,
complete genomes must be simulated and tracked.
:param rng: random number generator
:type rng: :class:`fwdpy11.GSLrng`
:param pop: A population
:type pop: :class:`fwdpy11.DiploidPopulation`
:param params: simulation parameters
:type params: :class:`fwdpy11.ModelParams`
:param recorder: (None) A temporal sampler/data recorder.
:type recorder: callable
.. note::
If recorder is None,
then :class:`fwdpy11.RecordNothing` will be used.
"""
import warnings
# Test parameters while suppressing warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Will throw exception if anything is wrong:
params.validate()
from ._fwdpy11 import MutationRegions
from ._fwdpy11 import evolve_without_tree_sequences
from ._fwdpy11 import dispatch_create_GeneticMap
pneutral = params.mutrate_n/(params.mutrate_n+params.mutrate_s)
mm = MutationRegions.create(pneutral, params.nregions, params.sregions)
rm = dispatch_create_GeneticMap(params.recrate, params.recregions)
if recorder is None:
from ._fwdpy11 import RecordNothing
recorder = RecordNothing()
evolve_without_tree_sequences(rng, pop, params.demography,
params.mutrate_n, params.mutrate_s,
params.recrate, mm, rm, params.gvalue,
recorder, params.pself, params.prune_selected) |
def setField(self, new_value):
"""
Adjust the field of the magnet by the value of ``scalingFactor``. The adjustment
is multiplicative, so a value of ``scalingFactor = 1.0`` will result in no change
of the field.
"""
self.field_strength = self.field_strength._replace(
val=new_value
) | Adjust the field of the magnet by the value of ``scalingFactor``. The adjustment
is multiplicative, so a value of ``scalingFactor = 1.0`` will result in no change
of the field. | Below is the the instruction that describes the task:
### Input:
Adjust the field of the magnet by the value of ``scalingFactor``. The adjustment
is multiplicative, so a value of ``scalingFactor = 1.0`` will result in no change
of the field.
### Response:
def setField(self, new_value):
"""
Adjust the field of the magnet by the value of ``scalingFactor``. The adjustment
is multiplicative, so a value of ``scalingFactor = 1.0`` will result in no change
of the field.
"""
self.field_strength = self.field_strength._replace(
val=new_value
) |
def derive_key(self,
derivation_method,
derivation_length,
derivation_data=None,
key_material=None,
hash_algorithm=None,
salt=None,
iteration_count=None,
encryption_algorithm=None,
cipher_mode=None,
padding_method=None,
iv_nonce=None):
"""
Derive key data using a variety of key derivation functions.
Args:
derivation_method (DerivationMethod): An enumeration specifying
the key derivation method to use. Required.
derivation_length (int): An integer specifying the size of the
derived key data in bytes. Required.
derivation_data (bytes): The non-cryptographic bytes to be used
in the key derivation process (e.g., the data to be encrypted,
hashed, HMACed). Required in the general case. Optional if the
derivation method is Hash and the key material is provided.
Optional, defaults to None.
key_material (bytes): The bytes of the key material to use for
key derivation. Required in the general case. Optional if
the derivation_method is HASH and derivation_data is provided.
Optional, defaults to None.
hash_algorithm (HashingAlgorithm): An enumeration specifying the
hashing algorithm to use with the key derivation method.
Required in the general case, optional if the derivation
method specifies encryption. Optional, defaults to None.
salt (bytes): Bytes representing a randomly generated salt.
Required if the derivation method is PBKDF2. Optional,
defaults to None.
iteration_count (int): An integer representing the number of
iterations to use when deriving key material. Required if
the derivation method is PBKDF2. Optional, defaults to None.
encryption_algorithm (CryptographicAlgorithm): An enumeration
specifying the symmetric encryption algorithm to use for
encryption-based key derivation. Required if the derivation
method specifies encryption. Optional, defaults to None.
cipher_mode (BlockCipherMode): An enumeration specifying the
block cipher mode to use with the encryption algorithm.
Required in in the general case if the derivation method
specifies encryption and the encryption algorithm is
specified. Optional if the encryption algorithm is RC4 (aka
ARC4). Optional, defaults to None.
padding_method (PaddingMethod): An enumeration specifying the
padding method to use on the data before encryption. Required
in in the general case if the derivation method specifies
encryption and the encryption algorithm is specified. Required
if the cipher mode is for block ciphers (e.g., CBC, ECB).
Optional otherwise, defaults to None.
iv_nonce (bytes): The IV/nonce value to use to initialize the mode
of the encryption algorithm. Required in the general case if
the derivation method specifies encryption and the encryption
algorithm is specified. Optional, defaults to None. If
required and not provided, it will be autogenerated.
Returns:
bytes: the bytes of the derived data
Raises:
InvalidField: Raised when cryptographic data and/or settings are
unsupported or incompatible with the derivation method.
Example:
>>> engine = CryptographyEngine()
>>> result = engine.derive_key(
... derivation_method=enums.DerivationMethod.HASH,
... derivation_length=16,
... derivation_data=b'abc',
... hash_algorithm=enums.HashingAlgorithm.MD5
... )
>>> result
b'\x90\x01P\x98<\xd2O\xb0\xd6\x96?}(\xe1\x7fr'
"""
if derivation_method == enums.DerivationMethod.ENCRYPT:
result = self.encrypt(
encryption_algorithm=encryption_algorithm,
encryption_key=key_material,
plain_text=derivation_data,
cipher_mode=cipher_mode,
padding_method=padding_method,
iv_nonce=iv_nonce
)
return result.get('cipher_text')
else:
# Handle key derivation functions that use hash algorithms
# Set up the hashing algorithm
if hash_algorithm is None:
raise exceptions.InvalidField("Hash algorithm is required.")
hashing_algorithm = self._encryption_hash_algorithms.get(
hash_algorithm,
None
)
if hashing_algorithm is None:
raise exceptions.InvalidField(
"Hash algorithm '{0}' is not a supported hashing "
"algorithm.".format(hash_algorithm)
)
if derivation_method == enums.DerivationMethod.HMAC:
df = hkdf.HKDF(
algorithm=hashing_algorithm(),
length=derivation_length,
salt=salt,
info=derivation_data,
backend=default_backend()
)
derived_data = df.derive(key_material)
return derived_data
elif derivation_method == enums.DerivationMethod.HASH:
if None not in [derivation_data, key_material]:
raise exceptions.InvalidField(
"For hash-based key derivation, specify only "
"derivation data or key material, not both."
)
elif derivation_data is not None:
hashing_data = derivation_data
elif key_material is not None:
hashing_data = key_material
else:
raise exceptions.InvalidField(
"For hash-based key derivation, derivation data or "
"key material must be specified."
)
df = hashes.Hash(
algorithm=hashing_algorithm(),
backend=default_backend()
)
df.update(hashing_data)
derived_data = df.finalize()
return derived_data
elif derivation_method == enums.DerivationMethod.PBKDF2:
if salt is None:
raise exceptions.InvalidField(
"For PBKDF2 key derivation, salt must be specified."
)
if iteration_count is None:
raise exceptions.InvalidField(
"For PBKDF2 key derivation, iteration count must be "
"specified."
)
df = pbkdf2.PBKDF2HMAC(
algorithm=hashing_algorithm(),
length=derivation_length,
salt=salt,
iterations=iteration_count,
backend=default_backend()
)
derived_data = df.derive(key_material)
return derived_data
elif derivation_method == enums.DerivationMethod.NIST800_108_C:
df = kbkdf.KBKDFHMAC(
algorithm=hashing_algorithm(),
mode=kbkdf.Mode.CounterMode,
length=derivation_length,
rlen=4,
llen=None,
location=kbkdf.CounterLocation.BeforeFixed,
label=None,
context=None,
fixed=derivation_data,
backend=default_backend()
)
derived_data = df.derive(key_material)
return derived_data
else:
raise exceptions.InvalidField(
"Derivation method '{0}' is not a supported key "
"derivation method.".format(derivation_method)
) | Derive key data using a variety of key derivation functions.
Args:
derivation_method (DerivationMethod): An enumeration specifying
the key derivation method to use. Required.
derivation_length (int): An integer specifying the size of the
derived key data in bytes. Required.
derivation_data (bytes): The non-cryptographic bytes to be used
in the key derivation process (e.g., the data to be encrypted,
hashed, HMACed). Required in the general case. Optional if the
derivation method is Hash and the key material is provided.
Optional, defaults to None.
key_material (bytes): The bytes of the key material to use for
key derivation. Required in the general case. Optional if
the derivation_method is HASH and derivation_data is provided.
Optional, defaults to None.
hash_algorithm (HashingAlgorithm): An enumeration specifying the
hashing algorithm to use with the key derivation method.
Required in the general case, optional if the derivation
method specifies encryption. Optional, defaults to None.
salt (bytes): Bytes representing a randomly generated salt.
Required if the derivation method is PBKDF2. Optional,
defaults to None.
iteration_count (int): An integer representing the number of
iterations to use when deriving key material. Required if
the derivation method is PBKDF2. Optional, defaults to None.
encryption_algorithm (CryptographicAlgorithm): An enumeration
specifying the symmetric encryption algorithm to use for
encryption-based key derivation. Required if the derivation
method specifies encryption. Optional, defaults to None.
cipher_mode (BlockCipherMode): An enumeration specifying the
block cipher mode to use with the encryption algorithm.
Required in in the general case if the derivation method
specifies encryption and the encryption algorithm is
specified. Optional if the encryption algorithm is RC4 (aka
ARC4). Optional, defaults to None.
padding_method (PaddingMethod): An enumeration specifying the
padding method to use on the data before encryption. Required
in in the general case if the derivation method specifies
encryption and the encryption algorithm is specified. Required
if the cipher mode is for block ciphers (e.g., CBC, ECB).
Optional otherwise, defaults to None.
iv_nonce (bytes): The IV/nonce value to use to initialize the mode
of the encryption algorithm. Required in the general case if
the derivation method specifies encryption and the encryption
algorithm is specified. Optional, defaults to None. If
required and not provided, it will be autogenerated.
Returns:
bytes: the bytes of the derived data
Raises:
InvalidField: Raised when cryptographic data and/or settings are
unsupported or incompatible with the derivation method.
Example:
>>> engine = CryptographyEngine()
>>> result = engine.derive_key(
... derivation_method=enums.DerivationMethod.HASH,
... derivation_length=16,
... derivation_data=b'abc',
... hash_algorithm=enums.HashingAlgorithm.MD5
... )
>>> result
b'\x90\x01P\x98<\xd2O\xb0\xd6\x96?}(\xe1\x7fr' | Below is the the instruction that describes the task:
### Input:
Derive key data using a variety of key derivation functions.
Args:
derivation_method (DerivationMethod): An enumeration specifying
the key derivation method to use. Required.
derivation_length (int): An integer specifying the size of the
derived key data in bytes. Required.
derivation_data (bytes): The non-cryptographic bytes to be used
in the key derivation process (e.g., the data to be encrypted,
hashed, HMACed). Required in the general case. Optional if the
derivation method is Hash and the key material is provided.
Optional, defaults to None.
key_material (bytes): The bytes of the key material to use for
key derivation. Required in the general case. Optional if
the derivation_method is HASH and derivation_data is provided.
Optional, defaults to None.
hash_algorithm (HashingAlgorithm): An enumeration specifying the
hashing algorithm to use with the key derivation method.
Required in the general case, optional if the derivation
method specifies encryption. Optional, defaults to None.
salt (bytes): Bytes representing a randomly generated salt.
Required if the derivation method is PBKDF2. Optional,
defaults to None.
iteration_count (int): An integer representing the number of
iterations to use when deriving key material. Required if
the derivation method is PBKDF2. Optional, defaults to None.
encryption_algorithm (CryptographicAlgorithm): An enumeration
specifying the symmetric encryption algorithm to use for
encryption-based key derivation. Required if the derivation
method specifies encryption. Optional, defaults to None.
cipher_mode (BlockCipherMode): An enumeration specifying the
block cipher mode to use with the encryption algorithm.
Required in in the general case if the derivation method
specifies encryption and the encryption algorithm is
specified. Optional if the encryption algorithm is RC4 (aka
ARC4). Optional, defaults to None.
padding_method (PaddingMethod): An enumeration specifying the
padding method to use on the data before encryption. Required
in in the general case if the derivation method specifies
encryption and the encryption algorithm is specified. Required
if the cipher mode is for block ciphers (e.g., CBC, ECB).
Optional otherwise, defaults to None.
iv_nonce (bytes): The IV/nonce value to use to initialize the mode
of the encryption algorithm. Required in the general case if
the derivation method specifies encryption and the encryption
algorithm is specified. Optional, defaults to None. If
required and not provided, it will be autogenerated.
Returns:
bytes: the bytes of the derived data
Raises:
InvalidField: Raised when cryptographic data and/or settings are
unsupported or incompatible with the derivation method.
Example:
>>> engine = CryptographyEngine()
>>> result = engine.derive_key(
... derivation_method=enums.DerivationMethod.HASH,
... derivation_length=16,
... derivation_data=b'abc',
... hash_algorithm=enums.HashingAlgorithm.MD5
... )
>>> result
b'\x90\x01P\x98<\xd2O\xb0\xd6\x96?}(\xe1\x7fr'
### Response:
def derive_key(self,
derivation_method,
derivation_length,
derivation_data=None,
key_material=None,
hash_algorithm=None,
salt=None,
iteration_count=None,
encryption_algorithm=None,
cipher_mode=None,
padding_method=None,
iv_nonce=None):
"""
Derive key data using a variety of key derivation functions.
Args:
derivation_method (DerivationMethod): An enumeration specifying
the key derivation method to use. Required.
derivation_length (int): An integer specifying the size of the
derived key data in bytes. Required.
derivation_data (bytes): The non-cryptographic bytes to be used
in the key derivation process (e.g., the data to be encrypted,
hashed, HMACed). Required in the general case. Optional if the
derivation method is Hash and the key material is provided.
Optional, defaults to None.
key_material (bytes): The bytes of the key material to use for
key derivation. Required in the general case. Optional if
the derivation_method is HASH and derivation_data is provided.
Optional, defaults to None.
hash_algorithm (HashingAlgorithm): An enumeration specifying the
hashing algorithm to use with the key derivation method.
Required in the general case, optional if the derivation
method specifies encryption. Optional, defaults to None.
salt (bytes): Bytes representing a randomly generated salt.
Required if the derivation method is PBKDF2. Optional,
defaults to None.
iteration_count (int): An integer representing the number of
iterations to use when deriving key material. Required if
the derivation method is PBKDF2. Optional, defaults to None.
encryption_algorithm (CryptographicAlgorithm): An enumeration
specifying the symmetric encryption algorithm to use for
encryption-based key derivation. Required if the derivation
method specifies encryption. Optional, defaults to None.
cipher_mode (BlockCipherMode): An enumeration specifying the
block cipher mode to use with the encryption algorithm.
Required in in the general case if the derivation method
specifies encryption and the encryption algorithm is
specified. Optional if the encryption algorithm is RC4 (aka
ARC4). Optional, defaults to None.
padding_method (PaddingMethod): An enumeration specifying the
padding method to use on the data before encryption. Required
in in the general case if the derivation method specifies
encryption and the encryption algorithm is specified. Required
if the cipher mode is for block ciphers (e.g., CBC, ECB).
Optional otherwise, defaults to None.
iv_nonce (bytes): The IV/nonce value to use to initialize the mode
of the encryption algorithm. Required in the general case if
the derivation method specifies encryption and the encryption
algorithm is specified. Optional, defaults to None. If
required and not provided, it will be autogenerated.
Returns:
bytes: the bytes of the derived data
Raises:
InvalidField: Raised when cryptographic data and/or settings are
unsupported or incompatible with the derivation method.
Example:
>>> engine = CryptographyEngine()
>>> result = engine.derive_key(
... derivation_method=enums.DerivationMethod.HASH,
... derivation_length=16,
... derivation_data=b'abc',
... hash_algorithm=enums.HashingAlgorithm.MD5
... )
>>> result
b'\x90\x01P\x98<\xd2O\xb0\xd6\x96?}(\xe1\x7fr'
"""
if derivation_method == enums.DerivationMethod.ENCRYPT:
result = self.encrypt(
encryption_algorithm=encryption_algorithm,
encryption_key=key_material,
plain_text=derivation_data,
cipher_mode=cipher_mode,
padding_method=padding_method,
iv_nonce=iv_nonce
)
return result.get('cipher_text')
else:
# Handle key derivation functions that use hash algorithms
# Set up the hashing algorithm
if hash_algorithm is None:
raise exceptions.InvalidField("Hash algorithm is required.")
hashing_algorithm = self._encryption_hash_algorithms.get(
hash_algorithm,
None
)
if hashing_algorithm is None:
raise exceptions.InvalidField(
"Hash algorithm '{0}' is not a supported hashing "
"algorithm.".format(hash_algorithm)
)
if derivation_method == enums.DerivationMethod.HMAC:
df = hkdf.HKDF(
algorithm=hashing_algorithm(),
length=derivation_length,
salt=salt,
info=derivation_data,
backend=default_backend()
)
derived_data = df.derive(key_material)
return derived_data
elif derivation_method == enums.DerivationMethod.HASH:
if None not in [derivation_data, key_material]:
raise exceptions.InvalidField(
"For hash-based key derivation, specify only "
"derivation data or key material, not both."
)
elif derivation_data is not None:
hashing_data = derivation_data
elif key_material is not None:
hashing_data = key_material
else:
raise exceptions.InvalidField(
"For hash-based key derivation, derivation data or "
"key material must be specified."
)
df = hashes.Hash(
algorithm=hashing_algorithm(),
backend=default_backend()
)
df.update(hashing_data)
derived_data = df.finalize()
return derived_data
elif derivation_method == enums.DerivationMethod.PBKDF2:
if salt is None:
raise exceptions.InvalidField(
"For PBKDF2 key derivation, salt must be specified."
)
if iteration_count is None:
raise exceptions.InvalidField(
"For PBKDF2 key derivation, iteration count must be "
"specified."
)
df = pbkdf2.PBKDF2HMAC(
algorithm=hashing_algorithm(),
length=derivation_length,
salt=salt,
iterations=iteration_count,
backend=default_backend()
)
derived_data = df.derive(key_material)
return derived_data
elif derivation_method == enums.DerivationMethod.NIST800_108_C:
df = kbkdf.KBKDFHMAC(
algorithm=hashing_algorithm(),
mode=kbkdf.Mode.CounterMode,
length=derivation_length,
rlen=4,
llen=None,
location=kbkdf.CounterLocation.BeforeFixed,
label=None,
context=None,
fixed=derivation_data,
backend=default_backend()
)
derived_data = df.derive(key_material)
return derived_data
else:
raise exceptions.InvalidField(
"Derivation method '{0}' is not a supported key "
"derivation method.".format(derivation_method)
) |
def event(self, event):
"""Reimplementation of QWidget.event
The widget is closed, when the window is deactivated.
The widget is closed after the set interval if the mouse leaves the widget.
The timer is stops when the mouse enters the widget before the interval ends.
On show, the added widgets are rendered for the tooltip into buttons. The buttons
are used to set the widget in focus.
"""
if event.type() == QtCore.QEvent.WindowDeactivate: # hide the tooltip
self.cyatimer.stop()
self.hide()
return True
if event.type() == QtCore.QEvent.Leave: # start timer
self.cyatimer.start(self._interval)
return True
if event.type() == QtCore.QEvent.Enter: # reset/stop timer
self.cyatimer.stop()
return True
if event.type() == QtCore.QEvent.Show: # render the widgets
self.cyatimer.stop()
return True
return super(WidgetToolTip, self).event(event) | Reimplementation of QWidget.event
The widget is closed, when the window is deactivated.
The widget is closed after the set interval if the mouse leaves the widget.
The timer is stops when the mouse enters the widget before the interval ends.
On show, the added widgets are rendered for the tooltip into buttons. The buttons
are used to set the widget in focus. | Below is the the instruction that describes the task:
### Input:
Reimplementation of QWidget.event
The widget is closed, when the window is deactivated.
The widget is closed after the set interval if the mouse leaves the widget.
The timer is stops when the mouse enters the widget before the interval ends.
On show, the added widgets are rendered for the tooltip into buttons. The buttons
are used to set the widget in focus.
### Response:
def event(self, event):
"""Reimplementation of QWidget.event
The widget is closed, when the window is deactivated.
The widget is closed after the set interval if the mouse leaves the widget.
The timer is stops when the mouse enters the widget before the interval ends.
On show, the added widgets are rendered for the tooltip into buttons. The buttons
are used to set the widget in focus.
"""
if event.type() == QtCore.QEvent.WindowDeactivate: # hide the tooltip
self.cyatimer.stop()
self.hide()
return True
if event.type() == QtCore.QEvent.Leave: # start timer
self.cyatimer.start(self._interval)
return True
if event.type() == QtCore.QEvent.Enter: # reset/stop timer
self.cyatimer.stop()
return True
if event.type() == QtCore.QEvent.Show: # render the widgets
self.cyatimer.stop()
return True
return super(WidgetToolTip, self).event(event) |
def prune(containers=False, networks=False, images=False,
build=False, volumes=False, system=None, **filters):
'''
.. versionadded:: 2019.2.0
Prune Docker's various subsystems
.. note::
This requires docker-py version 2.1.0 or later.
containers : False
If ``True``, prunes stopped containers (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/container_prune/#filtering
images : False
If ``True``, prunes unused images (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/image_prune/#filtering
networks : False
If ``False``, prunes unreferenced networks (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
build : False
If ``True``, clears the builder cache
.. note::
Only supported in Docker 17.07.x and newer. Additionally, filters
do not apply to this argument.
volumes : False
If ``True``, prunes unreferenced volumes (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/volume_prune/
system
If ``True``, prunes containers, images, networks, and builder cache.
Assumed to be ``True`` if none of ``containers``, ``images``,
``networks``, or ``build`` are set to ``True``.
.. note::
``volumes=True`` must still be used to prune volumes
filters
- ``dangling=True`` (images only) - remove only dangling images
- ``until=<timestamp>`` - only remove objects created before given
timestamp. Not applicable to volumes. See the documentation links
above for examples of valid time expressions.
- ``label`` - only remove objects matching the label expression. Valid
expressions include ``labelname`` or ``labelname=value``.
CLI Examples:
.. code-block:: bash
salt myminion docker.prune system=True
salt myminion docker.prune system=True until=12h
salt myminion docker.prune images=True dangling=True
salt myminion docker.prune images=True label=foo,bar=baz
'''
if system is None and not any((containers, images, networks, build)):
system = True
filters = __utils__['args.clean_kwargs'](**filters)
for fname in list(filters):
if not isinstance(filters[fname], bool):
# support comma-separated values
filters[fname] = salt.utils.args.split_input(filters[fname])
ret = {}
if system or containers:
ret['containers'] = _client_wrapper(
'prune_containers', filters=filters)
if system or images:
ret['images'] = _client_wrapper('prune_images', filters=filters)
if system or networks:
ret['networks'] = _client_wrapper('prune_networks', filters=filters)
if system or build:
try:
# Doesn't exist currently in docker-py as of 3.0.1
ret['build'] = _client_wrapper('prune_build', filters=filters)
except SaltInvocationError:
# It's not in docker-py yet, POST directly to the API endpoint
ret['build'] = _client_wrapper(
'_result',
_client_wrapper(
'_post',
_client_wrapper('_url', '/build/prune')
),
True
)
if volumes:
ret['volumes'] = _client_wrapper('prune_volumes', filters=filters)
return ret | .. versionadded:: 2019.2.0
Prune Docker's various subsystems
.. note::
This requires docker-py version 2.1.0 or later.
containers : False
If ``True``, prunes stopped containers (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/container_prune/#filtering
images : False
If ``True``, prunes unused images (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/image_prune/#filtering
networks : False
If ``False``, prunes unreferenced networks (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
build : False
If ``True``, clears the builder cache
.. note::
Only supported in Docker 17.07.x and newer. Additionally, filters
do not apply to this argument.
volumes : False
If ``True``, prunes unreferenced volumes (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/volume_prune/
system
If ``True``, prunes containers, images, networks, and builder cache.
Assumed to be ``True`` if none of ``containers``, ``images``,
``networks``, or ``build`` are set to ``True``.
.. note::
``volumes=True`` must still be used to prune volumes
filters
- ``dangling=True`` (images only) - remove only dangling images
- ``until=<timestamp>`` - only remove objects created before given
timestamp. Not applicable to volumes. See the documentation links
above for examples of valid time expressions.
- ``label`` - only remove objects matching the label expression. Valid
expressions include ``labelname`` or ``labelname=value``.
CLI Examples:
.. code-block:: bash
salt myminion docker.prune system=True
salt myminion docker.prune system=True until=12h
salt myminion docker.prune images=True dangling=True
salt myminion docker.prune images=True label=foo,bar=baz | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
Prune Docker's various subsystems
.. note::
This requires docker-py version 2.1.0 or later.
containers : False
If ``True``, prunes stopped containers (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/container_prune/#filtering
images : False
If ``True``, prunes unused images (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/image_prune/#filtering
networks : False
If ``False``, prunes unreferenced networks (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
build : False
If ``True``, clears the builder cache
.. note::
Only supported in Docker 17.07.x and newer. Additionally, filters
do not apply to this argument.
volumes : False
If ``True``, prunes unreferenced volumes (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/volume_prune/
system
If ``True``, prunes containers, images, networks, and builder cache.
Assumed to be ``True`` if none of ``containers``, ``images``,
``networks``, or ``build`` are set to ``True``.
.. note::
``volumes=True`` must still be used to prune volumes
filters
- ``dangling=True`` (images only) - remove only dangling images
- ``until=<timestamp>`` - only remove objects created before given
timestamp. Not applicable to volumes. See the documentation links
above for examples of valid time expressions.
- ``label`` - only remove objects matching the label expression. Valid
expressions include ``labelname`` or ``labelname=value``.
CLI Examples:
.. code-block:: bash
salt myminion docker.prune system=True
salt myminion docker.prune system=True until=12h
salt myminion docker.prune images=True dangling=True
salt myminion docker.prune images=True label=foo,bar=baz
### Response:
def prune(containers=False, networks=False, images=False,
build=False, volumes=False, system=None, **filters):
'''
.. versionadded:: 2019.2.0
Prune Docker's various subsystems
.. note::
This requires docker-py version 2.1.0 or later.
containers : False
If ``True``, prunes stopped containers (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/container_prune/#filtering
images : False
If ``True``, prunes unused images (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/image_prune/#filtering
networks : False
If ``False``, prunes unreferenced networks (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
build : False
If ``True``, clears the builder cache
.. note::
Only supported in Docker 17.07.x and newer. Additionally, filters
do not apply to this argument.
volumes : False
If ``True``, prunes unreferenced volumes (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/volume_prune/
system
If ``True``, prunes containers, images, networks, and builder cache.
Assumed to be ``True`` if none of ``containers``, ``images``,
``networks``, or ``build`` are set to ``True``.
.. note::
``volumes=True`` must still be used to prune volumes
filters
- ``dangling=True`` (images only) - remove only dangling images
- ``until=<timestamp>`` - only remove objects created before given
timestamp. Not applicable to volumes. See the documentation links
above for examples of valid time expressions.
- ``label`` - only remove objects matching the label expression. Valid
expressions include ``labelname`` or ``labelname=value``.
CLI Examples:
.. code-block:: bash
salt myminion docker.prune system=True
salt myminion docker.prune system=True until=12h
salt myminion docker.prune images=True dangling=True
salt myminion docker.prune images=True label=foo,bar=baz
'''
if system is None and not any((containers, images, networks, build)):
system = True
filters = __utils__['args.clean_kwargs'](**filters)
for fname in list(filters):
if not isinstance(filters[fname], bool):
# support comma-separated values
filters[fname] = salt.utils.args.split_input(filters[fname])
ret = {}
if system or containers:
ret['containers'] = _client_wrapper(
'prune_containers', filters=filters)
if system or images:
ret['images'] = _client_wrapper('prune_images', filters=filters)
if system or networks:
ret['networks'] = _client_wrapper('prune_networks', filters=filters)
if system or build:
try:
# Doesn't exist currently in docker-py as of 3.0.1
ret['build'] = _client_wrapper('prune_build', filters=filters)
except SaltInvocationError:
# It's not in docker-py yet, POST directly to the API endpoint
ret['build'] = _client_wrapper(
'_result',
_client_wrapper(
'_post',
_client_wrapper('_url', '/build/prune')
),
True
)
if volumes:
ret['volumes'] = _client_wrapper('prune_volumes', filters=filters)
return ret |
def get_int(self, key: str) -> Optional[int]:
"""
Returns an optional configuration value, as an int, by its key, or None if it doesn't exist.
If the configuration value isn't a legal int, this function will throw an error.
:param str key: The requested configuration key.
:return: The configuration key's value, or None if one does not exist.
:rtype: Optional[int]
:raises ConfigTypeError: The configuration value existed but couldn't be coerced to int.
"""
v = self.get(key)
if v is None:
return None
try:
return int(v)
except:
raise ConfigTypeError(self.full_key(key), v, 'int') | Returns an optional configuration value, as an int, by its key, or None if it doesn't exist.
If the configuration value isn't a legal int, this function will throw an error.
:param str key: The requested configuration key.
:return: The configuration key's value, or None if one does not exist.
:rtype: Optional[int]
:raises ConfigTypeError: The configuration value existed but couldn't be coerced to int. | Below is the the instruction that describes the task:
### Input:
Returns an optional configuration value, as an int, by its key, or None if it doesn't exist.
If the configuration value isn't a legal int, this function will throw an error.
:param str key: The requested configuration key.
:return: The configuration key's value, or None if one does not exist.
:rtype: Optional[int]
:raises ConfigTypeError: The configuration value existed but couldn't be coerced to int.
### Response:
def get_int(self, key: str) -> Optional[int]:
"""
Returns an optional configuration value, as an int, by its key, or None if it doesn't exist.
If the configuration value isn't a legal int, this function will throw an error.
:param str key: The requested configuration key.
:return: The configuration key's value, or None if one does not exist.
:rtype: Optional[int]
:raises ConfigTypeError: The configuration value existed but couldn't be coerced to int.
"""
v = self.get(key)
if v is None:
return None
try:
return int(v)
except:
raise ConfigTypeError(self.full_key(key), v, 'int') |
def draw(self, time: float, frametime: float, target: moderngl.Framebuffer):
"""
Draw function called by the system every frame when the effect is active.
This method raises ``NotImplementedError`` unless implemented.
Args:
time (float): The current time in seconds.
frametime (float): The time the previous frame used to render in seconds.
target (``moderngl.Framebuffer``): The target FBO for the effect.
"""
raise NotImplementedError("draw() is not implemented") | Draw function called by the system every frame when the effect is active.
This method raises ``NotImplementedError`` unless implemented.
Args:
time (float): The current time in seconds.
frametime (float): The time the previous frame used to render in seconds.
target (``moderngl.Framebuffer``): The target FBO for the effect. | Below is the the instruction that describes the task:
### Input:
Draw function called by the system every frame when the effect is active.
This method raises ``NotImplementedError`` unless implemented.
Args:
time (float): The current time in seconds.
frametime (float): The time the previous frame used to render in seconds.
target (``moderngl.Framebuffer``): The target FBO for the effect.
### Response:
def draw(self, time: float, frametime: float, target: moderngl.Framebuffer):
"""
Draw function called by the system every frame when the effect is active.
This method raises ``NotImplementedError`` unless implemented.
Args:
time (float): The current time in seconds.
frametime (float): The time the previous frame used to render in seconds.
target (``moderngl.Framebuffer``): The target FBO for the effect.
"""
raise NotImplementedError("draw() is not implemented") |
def create_ipsec_tunnel(cls, name, local_endpoint, remote_endpoint,
preshared_key=None, monitoring_group=None,
vpn_profile=None, mtu=0, pmtu_discovery=True,
ttl=0, enabled=True, comment=None):
"""
The VPN tunnel type negotiates IPsec tunnels in the same way
as policy-based VPNs, but traffic is selected to be sent into
the tunnel based on routing.
:param str name: name of VPN
:param TunnelEndpoint local_endpoint: the local side endpoint for
this VPN.
:param TunnelEndpoint remote_endpoint: the remote side endpoint for
this VPN.
:param str preshared_key: required if remote endpoint is an ExternalGateway
:param TunnelMonitoringGroup monitoring_group: the group to place
this VPN in for monitoring. Default: 'Uncategorized'.
:param VPNProfile vpn_profile: VPN profile for this VPN.
(default: VPN-A Suite)
:param int mtu: Set MTU for this VPN tunnel (default: 0)
:param boolean pmtu_discovery: enable pmtu discovery (default: True)
:param int ttl: ttl for connections on the VPN (default: 0)
:param bool enabled: enable the RBVPN or leave it disabled
:param str comment: optional comment
:raises CreateVPNFailed: failed to create the VPN with reason
:rtype: RouteVPN
"""
group = monitoring_group or TunnelMonitoringGroup('Uncategorized')
profile = vpn_profile or VPNProfile('VPN-A Suite')
json = {
'name': name,
'mtu': mtu,
'ttl': ttl,
'enabled': enabled,
'monitoring_group_ref': group.href,
'pmtu_discovery': pmtu_discovery,
'preshared_key': preshared_key,
'rbvpn_tunnel_side_a': local_endpoint.data,
'rbvpn_tunnel_side_b': remote_endpoint.data,
'tunnel_mode': 'vpn',
'comment': comment,
'vpn_profile_ref': profile.href
}
try:
return ElementCreator(cls, json)
except CreateElementFailed as err:
raise CreateVPNFailed(err) | The VPN tunnel type negotiates IPsec tunnels in the same way
as policy-based VPNs, but traffic is selected to be sent into
the tunnel based on routing.
:param str name: name of VPN
:param TunnelEndpoint local_endpoint: the local side endpoint for
this VPN.
:param TunnelEndpoint remote_endpoint: the remote side endpoint for
this VPN.
:param str preshared_key: required if remote endpoint is an ExternalGateway
:param TunnelMonitoringGroup monitoring_group: the group to place
this VPN in for monitoring. Default: 'Uncategorized'.
:param VPNProfile vpn_profile: VPN profile for this VPN.
(default: VPN-A Suite)
:param int mtu: Set MTU for this VPN tunnel (default: 0)
:param boolean pmtu_discovery: enable pmtu discovery (default: True)
:param int ttl: ttl for connections on the VPN (default: 0)
:param bool enabled: enable the RBVPN or leave it disabled
:param str comment: optional comment
:raises CreateVPNFailed: failed to create the VPN with reason
:rtype: RouteVPN | Below is the the instruction that describes the task:
### Input:
The VPN tunnel type negotiates IPsec tunnels in the same way
as policy-based VPNs, but traffic is selected to be sent into
the tunnel based on routing.
:param str name: name of VPN
:param TunnelEndpoint local_endpoint: the local side endpoint for
this VPN.
:param TunnelEndpoint remote_endpoint: the remote side endpoint for
this VPN.
:param str preshared_key: required if remote endpoint is an ExternalGateway
:param TunnelMonitoringGroup monitoring_group: the group to place
this VPN in for monitoring. Default: 'Uncategorized'.
:param VPNProfile vpn_profile: VPN profile for this VPN.
(default: VPN-A Suite)
:param int mtu: Set MTU for this VPN tunnel (default: 0)
:param boolean pmtu_discovery: enable pmtu discovery (default: True)
:param int ttl: ttl for connections on the VPN (default: 0)
:param bool enabled: enable the RBVPN or leave it disabled
:param str comment: optional comment
:raises CreateVPNFailed: failed to create the VPN with reason
:rtype: RouteVPN
### Response:
def create_ipsec_tunnel(cls, name, local_endpoint, remote_endpoint,
preshared_key=None, monitoring_group=None,
vpn_profile=None, mtu=0, pmtu_discovery=True,
ttl=0, enabled=True, comment=None):
"""
The VPN tunnel type negotiates IPsec tunnels in the same way
as policy-based VPNs, but traffic is selected to be sent into
the tunnel based on routing.
:param str name: name of VPN
:param TunnelEndpoint local_endpoint: the local side endpoint for
this VPN.
:param TunnelEndpoint remote_endpoint: the remote side endpoint for
this VPN.
:param str preshared_key: required if remote endpoint is an ExternalGateway
:param TunnelMonitoringGroup monitoring_group: the group to place
this VPN in for monitoring. Default: 'Uncategorized'.
:param VPNProfile vpn_profile: VPN profile for this VPN.
(default: VPN-A Suite)
:param int mtu: Set MTU for this VPN tunnel (default: 0)
:param boolean pmtu_discovery: enable pmtu discovery (default: True)
:param int ttl: ttl for connections on the VPN (default: 0)
:param bool enabled: enable the RBVPN or leave it disabled
:param str comment: optional comment
:raises CreateVPNFailed: failed to create the VPN with reason
:rtype: RouteVPN
"""
group = monitoring_group or TunnelMonitoringGroup('Uncategorized')
profile = vpn_profile or VPNProfile('VPN-A Suite')
json = {
'name': name,
'mtu': mtu,
'ttl': ttl,
'enabled': enabled,
'monitoring_group_ref': group.href,
'pmtu_discovery': pmtu_discovery,
'preshared_key': preshared_key,
'rbvpn_tunnel_side_a': local_endpoint.data,
'rbvpn_tunnel_side_b': remote_endpoint.data,
'tunnel_mode': 'vpn',
'comment': comment,
'vpn_profile_ref': profile.href
}
try:
return ElementCreator(cls, json)
except CreateElementFailed as err:
raise CreateVPNFailed(err) |
def container_file_get(name, src, dst, overwrite=False,
mode=None, uid=None, gid=None, remote_addr=None,
cert=None, key=None, verify_cert=True):
'''
Get a file from a container
name :
Name of the container
src :
The source file or directory
dst :
The destination file or directory
mode :
Set file mode to octal number
uid :
Set file uid (owner)
gid :
Set file gid (group)
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
'''
# Fix mode. Salt commandline doesn't use octals, so 0600 will be
# the decimal integer 600 (and not the octal 0600). So, it it's
# and integer, handle it as if it where a octal representation.
# Do only if mode is not None, otherwise we get 0None
if mode is not None:
mode = six.text_type(mode)
if not mode.startswith('0'):
mode = '0{0}'.format(mode)
container = container_get(
name, remote_addr, cert, key, verify_cert, _raw=True
)
dst = os.path.expanduser(dst)
if not os.path.isabs(dst):
raise SaltInvocationError('File path must be absolute.')
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
elif not os.path.isdir(os.path.dirname(dst)):
raise SaltInvocationError(
"Parent directory for destination doesn't exist."
)
if os.path.exists(dst):
if not overwrite:
raise SaltInvocationError(
'Destination exists and overwrite is false.'
)
if not os.path.isfile(dst):
raise SaltInvocationError(
'Destination exists but is not a file.'
)
else:
dst_path = os.path.dirname(dst)
if not os.path.isdir(dst_path):
raise CommandExecutionError(
'No such file or directory \'{0}\''.format(dst_path)
)
# Seems to be duplicate of line 1794, produces /path/file_name/file_name
#dst = os.path.join(dst, os.path.basename(src))
with salt.utils.files.fopen(dst, 'wb') as df:
df.write(container.files.get(src))
if mode:
os.chmod(dst, mode)
if uid or uid is '0':
uid = int(uid)
else:
uid = -1
if gid or gid is '0':
gid = int(gid)
else:
gid = -1
if uid != -1 or gid != -1:
os.chown(dst, uid, gid)
return True | Get a file from a container
name :
Name of the container
src :
The source file or directory
dst :
The destination file or directory
mode :
Set file mode to octal number
uid :
Set file uid (owner)
gid :
Set file gid (group)
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates. | Below is the the instruction that describes the task:
### Input:
Get a file from a container
name :
Name of the container
src :
The source file or directory
dst :
The destination file or directory
mode :
Set file mode to octal number
uid :
Set file uid (owner)
gid :
Set file gid (group)
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
### Response:
def container_file_get(name, src, dst, overwrite=False,
mode=None, uid=None, gid=None, remote_addr=None,
cert=None, key=None, verify_cert=True):
'''
Get a file from a container
name :
Name of the container
src :
The source file or directory
dst :
The destination file or directory
mode :
Set file mode to octal number
uid :
Set file uid (owner)
gid :
Set file gid (group)
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
'''
# Fix mode. Salt commandline doesn't use octals, so 0600 will be
# the decimal integer 600 (and not the octal 0600). So, it it's
# and integer, handle it as if it where a octal representation.
# Do only if mode is not None, otherwise we get 0None
if mode is not None:
mode = six.text_type(mode)
if not mode.startswith('0'):
mode = '0{0}'.format(mode)
container = container_get(
name, remote_addr, cert, key, verify_cert, _raw=True
)
dst = os.path.expanduser(dst)
if not os.path.isabs(dst):
raise SaltInvocationError('File path must be absolute.')
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
elif not os.path.isdir(os.path.dirname(dst)):
raise SaltInvocationError(
"Parent directory for destination doesn't exist."
)
if os.path.exists(dst):
if not overwrite:
raise SaltInvocationError(
'Destination exists and overwrite is false.'
)
if not os.path.isfile(dst):
raise SaltInvocationError(
'Destination exists but is not a file.'
)
else:
dst_path = os.path.dirname(dst)
if not os.path.isdir(dst_path):
raise CommandExecutionError(
'No such file or directory \'{0}\''.format(dst_path)
)
# Seems to be duplicate of line 1794, produces /path/file_name/file_name
#dst = os.path.join(dst, os.path.basename(src))
with salt.utils.files.fopen(dst, 'wb') as df:
df.write(container.files.get(src))
if mode:
os.chmod(dst, mode)
if uid or uid is '0':
uid = int(uid)
else:
uid = -1
if gid or gid is '0':
gid = int(gid)
else:
gid = -1
if uid != -1 or gid != -1:
os.chown(dst, uid, gid)
return True |
def execute(self):
""" Execute a bundle of operations
"""
self.bitshares.blocking = "head"
r = self.bitshares.txbuffer.broadcast()
self.bitshares.blocking = False
return r | Execute a bundle of operations | Below is the the instruction that describes the task:
### Input:
Execute a bundle of operations
### Response:
def execute(self):
""" Execute a bundle of operations
"""
self.bitshares.blocking = "head"
r = self.bitshares.txbuffer.broadcast()
self.bitshares.blocking = False
return r |
def _pdf(self, x, dist, cache):
"""Probability density function."""
return evaluation.evaluate_density(
dist, numpy.arcsinh(x), cache=cache)/numpy.sqrt(1+x*x) | Probability density function. | Below is the the instruction that describes the task:
### Input:
Probability density function.
### Response:
def _pdf(self, x, dist, cache):
"""Probability density function."""
return evaluation.evaluate_density(
dist, numpy.arcsinh(x), cache=cache)/numpy.sqrt(1+x*x) |
def _validate(self, request):
"""Validate an L{HTTPRequest} before executing it.
The following conditions are checked:
- The request contains all the generic parameters.
- The action specified in the request is a supported one.
- The signature mechanism is a supported one.
- The provided signature matches the one calculated using the locally
stored secret access key for the user.
- The signature hasn't expired.
@return: The validated L{Call}, set with its default arguments and the
the principal of the accessing L{User}.
"""
call_arguments = self.get_call_arguments(request)
args = call_arguments["transport_args"]
rest = call_arguments["handler_args"]
params = call_arguments["raw_args"]
self._validate_generic_parameters(args)
def create_call(principal):
self._validate_principal(principal, args)
self._validate_signature(request, principal, args, params)
return Call(raw_params=rest,
principal=principal,
action=args["action"],
version=args["version"],
id=request.id)
deferred = maybeDeferred(self.get_principal, args["access_key_id"])
deferred.addCallback(create_call)
return deferred | Validate an L{HTTPRequest} before executing it.
The following conditions are checked:
- The request contains all the generic parameters.
- The action specified in the request is a supported one.
- The signature mechanism is a supported one.
- The provided signature matches the one calculated using the locally
stored secret access key for the user.
- The signature hasn't expired.
@return: The validated L{Call}, set with its default arguments and the
the principal of the accessing L{User}. | Below is the the instruction that describes the task:
### Input:
Validate an L{HTTPRequest} before executing it.
The following conditions are checked:
- The request contains all the generic parameters.
- The action specified in the request is a supported one.
- The signature mechanism is a supported one.
- The provided signature matches the one calculated using the locally
stored secret access key for the user.
- The signature hasn't expired.
@return: The validated L{Call}, set with its default arguments and the
the principal of the accessing L{User}.
### Response:
def _validate(self, request):
"""Validate an L{HTTPRequest} before executing it.
The following conditions are checked:
- The request contains all the generic parameters.
- The action specified in the request is a supported one.
- The signature mechanism is a supported one.
- The provided signature matches the one calculated using the locally
stored secret access key for the user.
- The signature hasn't expired.
@return: The validated L{Call}, set with its default arguments and the
the principal of the accessing L{User}.
"""
call_arguments = self.get_call_arguments(request)
args = call_arguments["transport_args"]
rest = call_arguments["handler_args"]
params = call_arguments["raw_args"]
self._validate_generic_parameters(args)
def create_call(principal):
self._validate_principal(principal, args)
self._validate_signature(request, principal, args, params)
return Call(raw_params=rest,
principal=principal,
action=args["action"],
version=args["version"],
id=request.id)
deferred = maybeDeferred(self.get_principal, args["access_key_id"])
deferred.addCallback(create_call)
return deferred |
def assess_component(model, reaction, side, flux_coefficient_cutoff=0.001,
solver=None):
"""Assesses the ability of the model to provide sufficient precursors,
or absorb products, for a reaction operating at, or beyond,
the specified cutoff.
Parameters
----------
model : cobra.Model
The cobra model to assess production capacity for
reaction : reaction identifier or cobra.Reaction
The reaction to assess
side : basestring
Side of the reaction, 'products' or 'reactants'
flux_coefficient_cutoff : float
The minimum flux that reaction must carry to be considered active.
solver : basestring
Solver name. If None, the default solver will be used.
Returns
-------
bool or dict
True if the precursors can be simultaneously produced at the
specified cutoff. False, if the model has the capacity to produce
each individual precursor at the specified threshold but not all
precursors at the required level simultaneously. Otherwise a
dictionary of the required and the produced fluxes for each reactant
that is not produced in sufficient quantities.
"""
reaction = model.reactions.get_by_any(reaction)[0]
result_key = dict(reactants='produced', products='capacity')[side]
get_components = attrgetter(side)
with model as m:
m.objective = reaction
if _optimize_or_value(m, solver=solver) >= flux_coefficient_cutoff:
return True
simulation_results = {}
# build the demand reactions and add all at once
demand_reactions = {}
for component in get_components(reaction):
coeff = reaction.metabolites[component]
demand = m.add_boundary(component, type='demand')
demand.metabolites[component] = coeff
demand_reactions[demand] = (component, coeff)
# First assess whether all precursors can be produced simultaneously
joint_demand = Reaction("joint_demand")
for demand_reaction in demand_reactions:
joint_demand += demand_reaction
m.add_reactions([joint_demand])
m.objective = joint_demand
if _optimize_or_value(m, solver=solver) >= flux_coefficient_cutoff:
return True
# Otherwise assess the ability of the model to produce each precursor
# individually. Now assess the ability of the model to produce each
# reactant for a reaction
for demand_reaction, (component, coeff) in iteritems(demand_reactions):
# Calculate the maximum amount of the
with m:
m.objective = demand_reaction
flux = _optimize_or_value(m, solver=solver)
# metabolite that can be produced.
if flux_coefficient_cutoff > flux:
# Scale the results to a single unit
simulation_results.update({
component: {
'required': flux_coefficient_cutoff / abs(coeff),
result_key: flux / abs(coeff)
}})
if len(simulation_results) == 0:
simulation_results = False
return simulation_results | Assesses the ability of the model to provide sufficient precursors,
or absorb products, for a reaction operating at, or beyond,
the specified cutoff.
Parameters
----------
model : cobra.Model
The cobra model to assess production capacity for
reaction : reaction identifier or cobra.Reaction
The reaction to assess
side : basestring
Side of the reaction, 'products' or 'reactants'
flux_coefficient_cutoff : float
The minimum flux that reaction must carry to be considered active.
solver : basestring
Solver name. If None, the default solver will be used.
Returns
-------
bool or dict
True if the precursors can be simultaneously produced at the
specified cutoff. False, if the model has the capacity to produce
each individual precursor at the specified threshold but not all
precursors at the required level simultaneously. Otherwise a
dictionary of the required and the produced fluxes for each reactant
that is not produced in sufficient quantities. | Below is the the instruction that describes the task:
### Input:
Assesses the ability of the model to provide sufficient precursors,
or absorb products, for a reaction operating at, or beyond,
the specified cutoff.
Parameters
----------
model : cobra.Model
The cobra model to assess production capacity for
reaction : reaction identifier or cobra.Reaction
The reaction to assess
side : basestring
Side of the reaction, 'products' or 'reactants'
flux_coefficient_cutoff : float
The minimum flux that reaction must carry to be considered active.
solver : basestring
Solver name. If None, the default solver will be used.
Returns
-------
bool or dict
True if the precursors can be simultaneously produced at the
specified cutoff. False, if the model has the capacity to produce
each individual precursor at the specified threshold but not all
precursors at the required level simultaneously. Otherwise a
dictionary of the required and the produced fluxes for each reactant
that is not produced in sufficient quantities.
### Response:
def assess_component(model, reaction, side, flux_coefficient_cutoff=0.001,
solver=None):
"""Assesses the ability of the model to provide sufficient precursors,
or absorb products, for a reaction operating at, or beyond,
the specified cutoff.
Parameters
----------
model : cobra.Model
The cobra model to assess production capacity for
reaction : reaction identifier or cobra.Reaction
The reaction to assess
side : basestring
Side of the reaction, 'products' or 'reactants'
flux_coefficient_cutoff : float
The minimum flux that reaction must carry to be considered active.
solver : basestring
Solver name. If None, the default solver will be used.
Returns
-------
bool or dict
True if the precursors can be simultaneously produced at the
specified cutoff. False, if the model has the capacity to produce
each individual precursor at the specified threshold but not all
precursors at the required level simultaneously. Otherwise a
dictionary of the required and the produced fluxes for each reactant
that is not produced in sufficient quantities.
"""
reaction = model.reactions.get_by_any(reaction)[0]
result_key = dict(reactants='produced', products='capacity')[side]
get_components = attrgetter(side)
with model as m:
m.objective = reaction
if _optimize_or_value(m, solver=solver) >= flux_coefficient_cutoff:
return True
simulation_results = {}
# build the demand reactions and add all at once
demand_reactions = {}
for component in get_components(reaction):
coeff = reaction.metabolites[component]
demand = m.add_boundary(component, type='demand')
demand.metabolites[component] = coeff
demand_reactions[demand] = (component, coeff)
# First assess whether all precursors can be produced simultaneously
joint_demand = Reaction("joint_demand")
for demand_reaction in demand_reactions:
joint_demand += demand_reaction
m.add_reactions([joint_demand])
m.objective = joint_demand
if _optimize_or_value(m, solver=solver) >= flux_coefficient_cutoff:
return True
# Otherwise assess the ability of the model to produce each precursor
# individually. Now assess the ability of the model to produce each
# reactant for a reaction
for demand_reaction, (component, coeff) in iteritems(demand_reactions):
# Calculate the maximum amount of the
with m:
m.objective = demand_reaction
flux = _optimize_or_value(m, solver=solver)
# metabolite that can be produced.
if flux_coefficient_cutoff > flux:
# Scale the results to a single unit
simulation_results.update({
component: {
'required': flux_coefficient_cutoff / abs(coeff),
result_key: flux / abs(coeff)
}})
if len(simulation_results) == 0:
simulation_results = False
return simulation_results |
def read_dict_from_file(file_path):
"""
Read a dictionary of strings from a file
"""
with open(file_path) as file:
lines = file.read().splitlines()
obj = {}
for line in lines:
key, value = line.split(':', maxsplit=1)
obj[key] = eval(value)
return obj | Read a dictionary of strings from a file | Below is the the instruction that describes the task:
### Input:
Read a dictionary of strings from a file
### Response:
def read_dict_from_file(file_path):
"""
Read a dictionary of strings from a file
"""
with open(file_path) as file:
lines = file.read().splitlines()
obj = {}
for line in lines:
key, value = line.split(':', maxsplit=1)
obj[key] = eval(value)
return obj |
def __remove_alias(type_):
"""
Implementation detail.
Args:
type_ (type_t): type
Returns:
type_t: the type associated to the inputted type
"""
if isinstance(type_, cpptypes.declarated_t) and \
isinstance(type_.declaration, typedef.typedef_t):
return __remove_alias(type_.declaration.decl_type)
if isinstance(type_, cpptypes.compound_t):
type_.base = __remove_alias(type_.base)
return type_
return type_ | Implementation detail.
Args:
type_ (type_t): type
Returns:
type_t: the type associated to the inputted type | Below is the the instruction that describes the task:
### Input:
Implementation detail.
Args:
type_ (type_t): type
Returns:
type_t: the type associated to the inputted type
### Response:
def __remove_alias(type_):
"""
Implementation detail.
Args:
type_ (type_t): type
Returns:
type_t: the type associated to the inputted type
"""
if isinstance(type_, cpptypes.declarated_t) and \
isinstance(type_.declaration, typedef.typedef_t):
return __remove_alias(type_.declaration.decl_type)
if isinstance(type_, cpptypes.compound_t):
type_.base = __remove_alias(type_.base)
return type_
return type_ |
def _imported_symbol(import_path):
"""Resolve a dotted path into a symbol, and return that.
For example...
>>> _imported_symbol('django.db.models.Model')
<class 'django.db.models.base.Model'>
Raise ImportError if there's no such module, AttributeError if no
such symbol.
"""
module_name, symbol_name = import_path.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, symbol_name) | Resolve a dotted path into a symbol, and return that.
For example...
>>> _imported_symbol('django.db.models.Model')
<class 'django.db.models.base.Model'>
Raise ImportError if there's no such module, AttributeError if no
such symbol. | Below is the the instruction that describes the task:
### Input:
Resolve a dotted path into a symbol, and return that.
For example...
>>> _imported_symbol('django.db.models.Model')
<class 'django.db.models.base.Model'>
Raise ImportError if there's no such module, AttributeError if no
such symbol.
### Response:
def _imported_symbol(import_path):
"""Resolve a dotted path into a symbol, and return that.
For example...
>>> _imported_symbol('django.db.models.Model')
<class 'django.db.models.base.Model'>
Raise ImportError if there's no such module, AttributeError if no
such symbol.
"""
module_name, symbol_name = import_path.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, symbol_name) |
def chromedriver_element_center_patch():
"""
Patch move_to_element on ActionChains to work around a bug present
in Chromedriver 2.14 to 2.20.
Calling this function multiple times in the same process will
install the patch once, and just once.
"""
patch_name = "_selenic_chromedriver_element_center_patched"
if getattr(ActionChains, patch_name, None):
return # We've patched ActionChains already!!
# This is the patched method, which uses getBoundingClientRect
# to get the location of the center.
def move_to_element(self, el):
pos = self._driver.execute_script("""
var rect = arguments[0].getBoundingClientRect();
return { x: rect.width / 2, y: rect.height / 2};
""", el)
self.move_to_element_with_offset(el, pos["x"], pos["y"])
return self
old_init = ActionChains.__init__
def init(self, driver):
old_init(self, driver)
# Patch the instance, only if the driver needs it.
if getattr(driver, CHROMEDRIVER_ELEMENT_CENTER_PATCH_FLAG, None):
self.move_to_element = types.MethodType(move_to_element, self)
ActionChains.__init__ = init
# Mark ActionChains as patched!
setattr(ActionChains, patch_name, True) | Patch move_to_element on ActionChains to work around a bug present
in Chromedriver 2.14 to 2.20.
Calling this function multiple times in the same process will
install the patch once, and just once. | Below is the the instruction that describes the task:
### Input:
Patch move_to_element on ActionChains to work around a bug present
in Chromedriver 2.14 to 2.20.
Calling this function multiple times in the same process will
install the patch once, and just once.
### Response:
def chromedriver_element_center_patch():
"""
Patch move_to_element on ActionChains to work around a bug present
in Chromedriver 2.14 to 2.20.
Calling this function multiple times in the same process will
install the patch once, and just once.
"""
patch_name = "_selenic_chromedriver_element_center_patched"
if getattr(ActionChains, patch_name, None):
return # We've patched ActionChains already!!
# This is the patched method, which uses getBoundingClientRect
# to get the location of the center.
def move_to_element(self, el):
pos = self._driver.execute_script("""
var rect = arguments[0].getBoundingClientRect();
return { x: rect.width / 2, y: rect.height / 2};
""", el)
self.move_to_element_with_offset(el, pos["x"], pos["y"])
return self
old_init = ActionChains.__init__
def init(self, driver):
old_init(self, driver)
# Patch the instance, only if the driver needs it.
if getattr(driver, CHROMEDRIVER_ELEMENT_CENTER_PATCH_FLAG, None):
self.move_to_element = types.MethodType(move_to_element, self)
ActionChains.__init__ = init
# Mark ActionChains as patched!
setattr(ActionChains, patch_name, True) |
def _redistribute_builder(self, afi='ipv4', source=None):
"""Build BGP redistribute method.
Do not use this method directly. You probably want ``redistribute``.
Args:
source (str): Source for redistributing. (connected)
afi (str): Address family to configure. (ipv4, ipv6)
Returns:
Method to redistribute desired source.
Raises:
KeyError: if `source` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp._redistribute_builder(source='connected',
... afi='ipv4')
... dev.bgp._redistribute_builder(source='hodor',
... afi='ipv4') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError
"""
if source == 'connected':
return getattr(self._rbridge,
'rbridge_id_router_router_bgp_address_family_{0}_'
'{0}_unicast_default_vrf_af_{0}_uc_and_vrf_cmds_'
'call_point_holder_redistribute_connected_'
'redistribute_connected'.format(afi))
# TODO: Add support for 'static' and 'ospf'
else:
raise AttributeError('Invalid source.') | Build BGP redistribute method.
Do not use this method directly. You probably want ``redistribute``.
Args:
source (str): Source for redistributing. (connected)
afi (str): Address family to configure. (ipv4, ipv6)
Returns:
Method to redistribute desired source.
Raises:
KeyError: if `source` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp._redistribute_builder(source='connected',
... afi='ipv4')
... dev.bgp._redistribute_builder(source='hodor',
... afi='ipv4') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError | Below is the the instruction that describes the task:
### Input:
Build BGP redistribute method.
Do not use this method directly. You probably want ``redistribute``.
Args:
source (str): Source for redistributing. (connected)
afi (str): Address family to configure. (ipv4, ipv6)
Returns:
Method to redistribute desired source.
Raises:
KeyError: if `source` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp._redistribute_builder(source='connected',
... afi='ipv4')
... dev.bgp._redistribute_builder(source='hodor',
... afi='ipv4') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError
### Response:
def _redistribute_builder(self, afi='ipv4', source=None):
"""Build BGP redistribute method.
Do not use this method directly. You probably want ``redistribute``.
Args:
source (str): Source for redistributing. (connected)
afi (str): Address family to configure. (ipv4, ipv6)
Returns:
Method to redistribute desired source.
Raises:
KeyError: if `source` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.203', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp._redistribute_builder(source='connected',
... afi='ipv4')
... dev.bgp._redistribute_builder(source='hodor',
... afi='ipv4') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError
"""
if source == 'connected':
return getattr(self._rbridge,
'rbridge_id_router_router_bgp_address_family_{0}_'
'{0}_unicast_default_vrf_af_{0}_uc_and_vrf_cmds_'
'call_point_holder_redistribute_connected_'
'redistribute_connected'.format(afi))
# TODO: Add support for 'static' and 'ospf'
else:
raise AttributeError('Invalid source.') |
def as_money(self, number, **options):
"""Format a number into currency.
Usage: accounting.formatMoney(number, symbol, precision, thousandsSep,
decimalSep, format)
defaults: (0, "$", 2, ",", ".", "%s%v")
Localise by overriding the symbol, precision,
thousand / decimal separators and format
Second param can be an object matching `settings.currency`
which is the easiest way.
Args:
number (TYPE): Description
precision (TYPE): Description
thousand (TYPE): Description
decimal (TYPE): Description
Returns:
name (TYPE): Description
"""
# Resursively format arrays
if isinstance(number, list):
return map(lambda val: self.as_money(val, **options))
# Clean up number
decimal = options.get('decimal')
number = self.parse(number, decimal)
# Build options object from second param (if object) or all params,
# extending defaults
if check_type(options, 'dict'):
options = (self.settings['currency'].update(options))
# Check format (returns object with pos, neg and zero)
formats = self._check_currency_format(options['format'])
# Choose which format to use for this value
use_format = (lambda num: formats['pos'] if num > 0 else formats[
'neg'] if num < 0 else formats['zero'])(number)
precision = self._change_precision(number, options['precision'])
thousands = options['thousand']
decimal = options['decimal']
formater = self.format(abs(number), precision, thousands, decimal)
# Return with currency symbol added
amount = use_format.replace(
'%s', options['symbol']).replace('%v', formater)
return amount | Format a number into currency.
Usage: accounting.formatMoney(number, symbol, precision, thousandsSep,
decimalSep, format)
defaults: (0, "$", 2, ",", ".", "%s%v")
Localise by overriding the symbol, precision,
thousand / decimal separators and format
Second param can be an object matching `settings.currency`
which is the easiest way.
Args:
number (TYPE): Description
precision (TYPE): Description
thousand (TYPE): Description
decimal (TYPE): Description
Returns:
name (TYPE): Description | Below is the the instruction that describes the task:
### Input:
Format a number into currency.
Usage: accounting.formatMoney(number, symbol, precision, thousandsSep,
decimalSep, format)
defaults: (0, "$", 2, ",", ".", "%s%v")
Localise by overriding the symbol, precision,
thousand / decimal separators and format
Second param can be an object matching `settings.currency`
which is the easiest way.
Args:
number (TYPE): Description
precision (TYPE): Description
thousand (TYPE): Description
decimal (TYPE): Description
Returns:
name (TYPE): Description
### Response:
def as_money(self, number, **options):
"""Format a number into currency.
Usage: accounting.formatMoney(number, symbol, precision, thousandsSep,
decimalSep, format)
defaults: (0, "$", 2, ",", ".", "%s%v")
Localise by overriding the symbol, precision,
thousand / decimal separators and format
Second param can be an object matching `settings.currency`
which is the easiest way.
Args:
number (TYPE): Description
precision (TYPE): Description
thousand (TYPE): Description
decimal (TYPE): Description
Returns:
name (TYPE): Description
"""
# Resursively format arrays
if isinstance(number, list):
return map(lambda val: self.as_money(val, **options))
# Clean up number
decimal = options.get('decimal')
number = self.parse(number, decimal)
# Build options object from second param (if object) or all params,
# extending defaults
if check_type(options, 'dict'):
options = (self.settings['currency'].update(options))
# Check format (returns object with pos, neg and zero)
formats = self._check_currency_format(options['format'])
# Choose which format to use for this value
use_format = (lambda num: formats['pos'] if num > 0 else formats[
'neg'] if num < 0 else formats['zero'])(number)
precision = self._change_precision(number, options['precision'])
thousands = options['thousand']
decimal = options['decimal']
formater = self.format(abs(number), precision, thousands, decimal)
# Return with currency symbol added
amount = use_format.replace(
'%s', options['symbol']).replace('%v', formater)
return amount |
def _apply_serial_port(serial_device_spec, key, operation='add'):
'''
Returns a vim.vm.device.VirtualSerialPort representing a serial port
component
serial_device_spec
Serial device properties
key
Unique key of the device
operation
Add or edit the given device
.. code-block:: bash
serial_ports:
adapter: 'Serial port 1'
backing:
type: uri
uri: 'telnet://something:port'
direction: <client|server>
filename: 'service_uri'
connectable:
allow_guest_control: True
start_connected: True
yield: False
'''
log.trace('Creating serial port adapter=%s type=%s connectable=%s yield=%s',
serial_device_spec['adapter'], serial_device_spec['type'],
serial_device_spec['connectable'], serial_device_spec['yield'])
device_spec = vim.vm.device.VirtualDeviceSpec()
device_spec.device = vim.vm.device.VirtualSerialPort()
if operation == 'add':
device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
elif operation == 'edit':
device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
connect_info = vim.vm.device.VirtualDevice.ConnectInfo()
type_backing = None
if serial_device_spec['type'] == 'network':
type_backing = vim.vm.device.VirtualSerialPort.URIBackingInfo()
if 'uri' not in serial_device_spec['backing'].keys():
raise ValueError('vSPC proxy URI not specified in config')
if 'uri' not in serial_device_spec['backing'].keys():
raise ValueError('vSPC Direction not specified in config')
if 'filename' not in serial_device_spec['backing'].keys():
raise ValueError('vSPC Filename not specified in config')
type_backing.proxyURI = serial_device_spec['backing']['uri']
type_backing.direction = serial_device_spec['backing']['direction']
type_backing.serviceURI = serial_device_spec['backing']['filename']
if serial_device_spec['type'] == 'pipe':
type_backing = vim.vm.device.VirtualSerialPort.PipeBackingInfo()
if serial_device_spec['type'] == 'file':
type_backing = vim.vm.device.VirtualSerialPort.FileBackingInfo()
if serial_device_spec['type'] == 'device':
type_backing = vim.vm.device.VirtualSerialPort.DeviceBackingInfo()
connect_info.allowGuestControl = \
serial_device_spec['connectable']['allow_guest_control']
connect_info.startConnected = \
serial_device_spec['connectable']['start_connected']
device_spec.device.backing = type_backing
device_spec.device.connectable = connect_info
device_spec.device.unitNumber = 1
device_spec.device.key = key
device_spec.device.yieldOnPoll = serial_device_spec['yield']
return device_spec | Returns a vim.vm.device.VirtualSerialPort representing a serial port
component
serial_device_spec
Serial device properties
key
Unique key of the device
operation
Add or edit the given device
.. code-block:: bash
serial_ports:
adapter: 'Serial port 1'
backing:
type: uri
uri: 'telnet://something:port'
direction: <client|server>
filename: 'service_uri'
connectable:
allow_guest_control: True
start_connected: True
yield: False | Below is the the instruction that describes the task:
### Input:
Returns a vim.vm.device.VirtualSerialPort representing a serial port
component
serial_device_spec
Serial device properties
key
Unique key of the device
operation
Add or edit the given device
.. code-block:: bash
serial_ports:
adapter: 'Serial port 1'
backing:
type: uri
uri: 'telnet://something:port'
direction: <client|server>
filename: 'service_uri'
connectable:
allow_guest_control: True
start_connected: True
yield: False
### Response:
def _apply_serial_port(serial_device_spec, key, operation='add'):
'''
Returns a vim.vm.device.VirtualSerialPort representing a serial port
component
serial_device_spec
Serial device properties
key
Unique key of the device
operation
Add or edit the given device
.. code-block:: bash
serial_ports:
adapter: 'Serial port 1'
backing:
type: uri
uri: 'telnet://something:port'
direction: <client|server>
filename: 'service_uri'
connectable:
allow_guest_control: True
start_connected: True
yield: False
'''
log.trace('Creating serial port adapter=%s type=%s connectable=%s yield=%s',
serial_device_spec['adapter'], serial_device_spec['type'],
serial_device_spec['connectable'], serial_device_spec['yield'])
device_spec = vim.vm.device.VirtualDeviceSpec()
device_spec.device = vim.vm.device.VirtualSerialPort()
if operation == 'add':
device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
elif operation == 'edit':
device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
connect_info = vim.vm.device.VirtualDevice.ConnectInfo()
type_backing = None
if serial_device_spec['type'] == 'network':
type_backing = vim.vm.device.VirtualSerialPort.URIBackingInfo()
if 'uri' not in serial_device_spec['backing'].keys():
raise ValueError('vSPC proxy URI not specified in config')
if 'uri' not in serial_device_spec['backing'].keys():
raise ValueError('vSPC Direction not specified in config')
if 'filename' not in serial_device_spec['backing'].keys():
raise ValueError('vSPC Filename not specified in config')
type_backing.proxyURI = serial_device_spec['backing']['uri']
type_backing.direction = serial_device_spec['backing']['direction']
type_backing.serviceURI = serial_device_spec['backing']['filename']
if serial_device_spec['type'] == 'pipe':
type_backing = vim.vm.device.VirtualSerialPort.PipeBackingInfo()
if serial_device_spec['type'] == 'file':
type_backing = vim.vm.device.VirtualSerialPort.FileBackingInfo()
if serial_device_spec['type'] == 'device':
type_backing = vim.vm.device.VirtualSerialPort.DeviceBackingInfo()
connect_info.allowGuestControl = \
serial_device_spec['connectable']['allow_guest_control']
connect_info.startConnected = \
serial_device_spec['connectable']['start_connected']
device_spec.device.backing = type_backing
device_spec.device.connectable = connect_info
device_spec.device.unitNumber = 1
device_spec.device.key = key
device_spec.device.yieldOnPoll = serial_device_spec['yield']
return device_spec |
def _get_repeat_masker_header(pairwise_alignment):
"""generate header string of repeatmasker formated repr of self."""
res = ""
res += str(pairwise_alignment.meta[ALIG_SCORE_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_SUBS_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_S1_INDELS_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_S2_INDELS_KEY]) + " "
res += (pairwise_alignment.s1.name
if (pairwise_alignment.s1.name != "" and
pairwise_alignment.s1.name is not None)
else UNKNOWN_SEQ_NAME) + " "
res += str(pairwise_alignment.s1.start) + " "
res += str(pairwise_alignment.s1.end - 1) + " "
res += "(" + str(pairwise_alignment.s1.remaining) + ") "
res += ("C " if not pairwise_alignment.s2.is_positive_strand() else "")
res += (pairwise_alignment.s2.name
if (pairwise_alignment.s2.name != "" and
pairwise_alignment.s2.name is not None)
else UNKNOWN_SEQ_NAME) + " "
res += ("(" + str(pairwise_alignment.s2.remaining) + ")"
if not pairwise_alignment.s2.is_positive_strand()
else str(pairwise_alignment.s2.start))
res += " "
# Note here that we need to convert between our internal representation
# for coordinates and the repeat-masker one; internally, we always store
# coordinates as exclusive of the final value with start < end;
# repeatmasker gives the larger coordinate as the 'start' when the match
# is to the reverse complement, so we have to swap start/end, and its
# coordinates are inclusive of end, so we have to subtract 1 from end.
res += str(pairwise_alignment.s2.end - 1) + " "
res += (str(pairwise_alignment.s2.start)
if not pairwise_alignment.s2.is_positive_strand()
else "(" + str(pairwise_alignment.s2.remaining) + ")") + " "
res += pairwise_alignment.meta[UNKNOWN_RM_HEADER_FIELD_KEY] + " "
res += str(pairwise_alignment.meta[RM_ID_KEY])
return res | generate header string of repeatmasker formated repr of self. | Below is the the instruction that describes the task:
### Input:
generate header string of repeatmasker formated repr of self.
### Response:
def _get_repeat_masker_header(pairwise_alignment):
"""generate header string of repeatmasker formated repr of self."""
res = ""
res += str(pairwise_alignment.meta[ALIG_SCORE_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_SUBS_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_S1_INDELS_KEY]) + " "
res += "{:.2f}".format(pairwise_alignment.meta[PCENT_S2_INDELS_KEY]) + " "
res += (pairwise_alignment.s1.name
if (pairwise_alignment.s1.name != "" and
pairwise_alignment.s1.name is not None)
else UNKNOWN_SEQ_NAME) + " "
res += str(pairwise_alignment.s1.start) + " "
res += str(pairwise_alignment.s1.end - 1) + " "
res += "(" + str(pairwise_alignment.s1.remaining) + ") "
res += ("C " if not pairwise_alignment.s2.is_positive_strand() else "")
res += (pairwise_alignment.s2.name
if (pairwise_alignment.s2.name != "" and
pairwise_alignment.s2.name is not None)
else UNKNOWN_SEQ_NAME) + " "
res += ("(" + str(pairwise_alignment.s2.remaining) + ")"
if not pairwise_alignment.s2.is_positive_strand()
else str(pairwise_alignment.s2.start))
res += " "
# Note here that we need to convert between our internal representation
# for coordinates and the repeat-masker one; internally, we always store
# coordinates as exclusive of the final value with start < end;
# repeatmasker gives the larger coordinate as the 'start' when the match
# is to the reverse complement, so we have to swap start/end, and its
# coordinates are inclusive of end, so we have to subtract 1 from end.
res += str(pairwise_alignment.s2.end - 1) + " "
res += (str(pairwise_alignment.s2.start)
if not pairwise_alignment.s2.is_positive_strand()
else "(" + str(pairwise_alignment.s2.remaining) + ")") + " "
res += pairwise_alignment.meta[UNKNOWN_RM_HEADER_FIELD_KEY] + " "
res += str(pairwise_alignment.meta[RM_ID_KEY])
return res |
def window_cauchy(N, alpha=3):
r"""Cauchy tapering window
:param int N: window length
:param float alpha: parameter of the poisson window
.. math:: w(n) = \frac{1}{1+\left(\frac{\alpha*n}{N/2}\right)**2}
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'cauchy', alpha=3)
window_visu(64, 'cauchy', alpha=4)
window_visu(64, 'cauchy', alpha=5)
.. seealso:: :func:`window_poisson`, :func:`window_hann`
"""
n = linspace(-N/2., (N)/2., N)
w = 1./(1.+ (alpha*n/(N/2.))**2)
return w | r"""Cauchy tapering window
:param int N: window length
:param float alpha: parameter of the poisson window
.. math:: w(n) = \frac{1}{1+\left(\frac{\alpha*n}{N/2}\right)**2}
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'cauchy', alpha=3)
window_visu(64, 'cauchy', alpha=4)
window_visu(64, 'cauchy', alpha=5)
.. seealso:: :func:`window_poisson`, :func:`window_hann` | Below is the the instruction that describes the task:
### Input:
r"""Cauchy tapering window
:param int N: window length
:param float alpha: parameter of the poisson window
.. math:: w(n) = \frac{1}{1+\left(\frac{\alpha*n}{N/2}\right)**2}
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'cauchy', alpha=3)
window_visu(64, 'cauchy', alpha=4)
window_visu(64, 'cauchy', alpha=5)
.. seealso:: :func:`window_poisson`, :func:`window_hann`
### Response:
def window_cauchy(N, alpha=3):
r"""Cauchy tapering window
:param int N: window length
:param float alpha: parameter of the poisson window
.. math:: w(n) = \frac{1}{1+\left(\frac{\alpha*n}{N/2}\right)**2}
.. plot::
:width: 80%
:include-source:
from spectrum import window_visu
window_visu(64, 'cauchy', alpha=3)
window_visu(64, 'cauchy', alpha=4)
window_visu(64, 'cauchy', alpha=5)
.. seealso:: :func:`window_poisson`, :func:`window_hann`
"""
n = linspace(-N/2., (N)/2., N)
w = 1./(1.+ (alpha*n/(N/2.))**2)
return w |
def docpie(doc, argv=None, help=True, version=None,
stdopt=True, attachopt=True, attachvalue=True,
helpstyle='python',
auto2dashes=True, name=None, case_sensitive=False,
optionsfirst=False, appearedonly=False, namedoptions=False,
extra=None):
"""
Parse `argv` based on command-line interface described in `doc`.
`docpie` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object but None
If passed, the object will be printed if --version is in
`argv`.
stdopt : bool (default: True)
When it's True, long flag should only starts with --
attachopt: bool (default: True)
write/pass several short flag into one, e.g. -abc can mean -a -b -c.
This only works when stdopt=True
attachvalue: bool (default: True)
allow you to write short flag and its value together,
e.g. -abc can mean -a bc
auto2dashes: bool (default: True)
automaticly handle -- (which means "end of command line flag")
name: str (default: None)
the "name" of your program. In each of your "usage" the "name" will be
ignored. By default docpie will ignore the first element of your
"usage".
case_sensitive: bool (deprecated / default: False)
specifies if it need case sensitive when matching
"Usage:" and "Options:"
optionsfirst: bool (default: False)
everything after first positional argument will be interpreted as
positional argument
appearedonly: bool (default: False)
when set True, the options that never appear in argv will not
be put in result. Note this only affect options
extra: dict
customize pre-handled options. See
http://docpie.comes.today/document/advanced-apis/
for more infomation.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docpie import docpie
>>> doc = '''
... Usage:
... my_program tcp <host> <port> [--timeout=<seconds>]
... my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
... my_program (-h | --help | --version)
...
... Options:
... -h, --help Show this screen and exit.
... --baud=<n> Baudrate [default: 9600]
... '''
>>> argv = ['my_program', 'tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docpie(doc, argv)
{
'--': False,
'-h': False,
'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* Full documentation is available in README.md as well as online
at http://docpie.comes.today/document/quick-start/
"""
if case_sensitive:
warnings.warn('`case_sensitive` is deprecated, `docpie` is always '
'case insensitive')
kwargs = locals()
argv = kwargs.pop('argv')
pie = Docpie(**kwargs)
pie.docpie(argv)
return pie | Parse `argv` based on command-line interface described in `doc`.
`docpie` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object but None
If passed, the object will be printed if --version is in
`argv`.
stdopt : bool (default: True)
When it's True, long flag should only starts with --
attachopt: bool (default: True)
write/pass several short flag into one, e.g. -abc can mean -a -b -c.
This only works when stdopt=True
attachvalue: bool (default: True)
allow you to write short flag and its value together,
e.g. -abc can mean -a bc
auto2dashes: bool (default: True)
automaticly handle -- (which means "end of command line flag")
name: str (default: None)
the "name" of your program. In each of your "usage" the "name" will be
ignored. By default docpie will ignore the first element of your
"usage".
case_sensitive: bool (deprecated / default: False)
specifies if it need case sensitive when matching
"Usage:" and "Options:"
optionsfirst: bool (default: False)
everything after first positional argument will be interpreted as
positional argument
appearedonly: bool (default: False)
when set True, the options that never appear in argv will not
be put in result. Note this only affect options
extra: dict
customize pre-handled options. See
http://docpie.comes.today/document/advanced-apis/
for more infomation.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docpie import docpie
>>> doc = '''
... Usage:
... my_program tcp <host> <port> [--timeout=<seconds>]
... my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
... my_program (-h | --help | --version)
...
... Options:
... -h, --help Show this screen and exit.
... --baud=<n> Baudrate [default: 9600]
... '''
>>> argv = ['my_program', 'tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docpie(doc, argv)
{
'--': False,
'-h': False,
'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* Full documentation is available in README.md as well as online
at http://docpie.comes.today/document/quick-start/ | Below is the the instruction that describes the task:
### Input:
Parse `argv` based on command-line interface described in `doc`.
`docpie` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object but None
If passed, the object will be printed if --version is in
`argv`.
stdopt : bool (default: True)
When it's True, long flag should only starts with --
attachopt: bool (default: True)
write/pass several short flag into one, e.g. -abc can mean -a -b -c.
This only works when stdopt=True
attachvalue: bool (default: True)
allow you to write short flag and its value together,
e.g. -abc can mean -a bc
auto2dashes: bool (default: True)
automaticly handle -- (which means "end of command line flag")
name: str (default: None)
the "name" of your program. In each of your "usage" the "name" will be
ignored. By default docpie will ignore the first element of your
"usage".
case_sensitive: bool (deprecated / default: False)
specifies if it need case sensitive when matching
"Usage:" and "Options:"
optionsfirst: bool (default: False)
everything after first positional argument will be interpreted as
positional argument
appearedonly: bool (default: False)
when set True, the options that never appear in argv will not
be put in result. Note this only affect options
extra: dict
customize pre-handled options. See
http://docpie.comes.today/document/advanced-apis/
for more infomation.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docpie import docpie
>>> doc = '''
... Usage:
... my_program tcp <host> <port> [--timeout=<seconds>]
... my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
... my_program (-h | --help | --version)
...
... Options:
... -h, --help Show this screen and exit.
... --baud=<n> Baudrate [default: 9600]
... '''
>>> argv = ['my_program', 'tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docpie(doc, argv)
{
'--': False,
'-h': False,
'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* Full documentation is available in README.md as well as online
at http://docpie.comes.today/document/quick-start/
### Response:
def docpie(doc, argv=None, help=True, version=None,
stdopt=True, attachopt=True, attachvalue=True,
helpstyle='python',
auto2dashes=True, name=None, case_sensitive=False,
optionsfirst=False, appearedonly=False, namedoptions=False,
extra=None):
"""
Parse `argv` based on command-line interface described in `doc`.
`docpie` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object but None
If passed, the object will be printed if --version is in
`argv`.
stdopt : bool (default: True)
When it's True, long flag should only starts with --
attachopt: bool (default: True)
write/pass several short flag into one, e.g. -abc can mean -a -b -c.
This only works when stdopt=True
attachvalue: bool (default: True)
allow you to write short flag and its value together,
e.g. -abc can mean -a bc
auto2dashes: bool (default: True)
automaticly handle -- (which means "end of command line flag")
name: str (default: None)
the "name" of your program. In each of your "usage" the "name" will be
ignored. By default docpie will ignore the first element of your
"usage".
case_sensitive: bool (deprecated / default: False)
specifies if it need case sensitive when matching
"Usage:" and "Options:"
optionsfirst: bool (default: False)
everything after first positional argument will be interpreted as
positional argument
appearedonly: bool (default: False)
when set True, the options that never appear in argv will not
be put in result. Note this only affect options
extra: dict
customize pre-handled options. See
http://docpie.comes.today/document/advanced-apis/
for more infomation.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docpie import docpie
>>> doc = '''
... Usage:
... my_program tcp <host> <port> [--timeout=<seconds>]
... my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
... my_program (-h | --help | --version)
...
... Options:
... -h, --help Show this screen and exit.
... --baud=<n> Baudrate [default: 9600]
... '''
>>> argv = ['my_program', 'tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docpie(doc, argv)
{
'--': False,
'-h': False,
'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* Full documentation is available in README.md as well as online
at http://docpie.comes.today/document/quick-start/
"""
if case_sensitive:
warnings.warn('`case_sensitive` is deprecated, `docpie` is always '
'case insensitive')
kwargs = locals()
argv = kwargs.pop('argv')
pie = Docpie(**kwargs)
pie.docpie(argv)
return pie |
def rfind(self, bs, start=None, end=None, bytealigned=None):
"""Find final occurrence of substring bs.
Returns a single item tuple with the bit position if found, or an
empty tuple if not found. The bit position (pos property) will
also be set to the start of the substring if it is found.
bs -- The bitstring to find.
start -- The bit position to end the reverse search. Defaults to 0.
end -- The bit position one past the first bit to reverse search.
Defaults to self.len.
bytealigned -- If True the bitstring will only be found on byte
boundaries.
Raises ValueError if bs is empty, if start < 0, if end > self.len or
if end < start.
"""
bs = Bits(bs)
start, end = self._validate_slice(start, end)
if bytealigned is None:
bytealigned = globals()['bytealigned']
if not bs.len:
raise ValueError("Cannot find an empty bitstring.")
# Search chunks starting near the end and then moving back
# until we find bs.
increment = max(8192, bs.len * 80)
buffersize = min(increment + bs.len, end - start)
pos = max(start, end - buffersize)
while True:
found = list(self.findall(bs, start=pos, end=pos + buffersize,
bytealigned=bytealigned))
if not found:
if pos == start:
return ()
pos = max(start, pos - increment)
continue
return (found[-1],) | Find final occurrence of substring bs.
Returns a single item tuple with the bit position if found, or an
empty tuple if not found. The bit position (pos property) will
also be set to the start of the substring if it is found.
bs -- The bitstring to find.
start -- The bit position to end the reverse search. Defaults to 0.
end -- The bit position one past the first bit to reverse search.
Defaults to self.len.
bytealigned -- If True the bitstring will only be found on byte
boundaries.
Raises ValueError if bs is empty, if start < 0, if end > self.len or
if end < start. | Below is the the instruction that describes the task:
### Input:
Find final occurrence of substring bs.
Returns a single item tuple with the bit position if found, or an
empty tuple if not found. The bit position (pos property) will
also be set to the start of the substring if it is found.
bs -- The bitstring to find.
start -- The bit position to end the reverse search. Defaults to 0.
end -- The bit position one past the first bit to reverse search.
Defaults to self.len.
bytealigned -- If True the bitstring will only be found on byte
boundaries.
Raises ValueError if bs is empty, if start < 0, if end > self.len or
if end < start.
### Response:
def rfind(self, bs, start=None, end=None, bytealigned=None):
"""Find final occurrence of substring bs.
Returns a single item tuple with the bit position if found, or an
empty tuple if not found. The bit position (pos property) will
also be set to the start of the substring if it is found.
bs -- The bitstring to find.
start -- The bit position to end the reverse search. Defaults to 0.
end -- The bit position one past the first bit to reverse search.
Defaults to self.len.
bytealigned -- If True the bitstring will only be found on byte
boundaries.
Raises ValueError if bs is empty, if start < 0, if end > self.len or
if end < start.
"""
bs = Bits(bs)
start, end = self._validate_slice(start, end)
if bytealigned is None:
bytealigned = globals()['bytealigned']
if not bs.len:
raise ValueError("Cannot find an empty bitstring.")
# Search chunks starting near the end and then moving back
# until we find bs.
increment = max(8192, bs.len * 80)
buffersize = min(increment + bs.len, end - start)
pos = max(start, end - buffersize)
while True:
found = list(self.findall(bs, start=pos, end=pos + buffersize,
bytealigned=bytealigned))
if not found:
if pos == start:
return ()
pos = max(start, pos - increment)
continue
return (found[-1],) |
def url(**attributes):
"""Parses an URL and validates its attributes."""
def check_url(value):
validate(text, value)
parsed = urlparse(value)
if not parsed.netloc:
raise ValueError("'{0}' is not a valid URL".format(value))
for name, schema in attributes.items():
if not _hasattr(parsed, name):
raise ValueError("Invalid URL attribute '{0}'".format(name))
try:
validate(schema, _getattr(parsed, name))
except ValueError as err:
raise ValueError(
"Unable to validate URL attribute '{0}': {1}".format(
name, err
)
)
return True
# Convert "http" to be either any("http", "https") for convenience
if attributes.get("scheme") == "http":
attributes["scheme"] = any("http", "https")
return check_url | Parses an URL and validates its attributes. | Below is the the instruction that describes the task:
### Input:
Parses an URL and validates its attributes.
### Response:
def url(**attributes):
"""Parses an URL and validates its attributes."""
def check_url(value):
validate(text, value)
parsed = urlparse(value)
if not parsed.netloc:
raise ValueError("'{0}' is not a valid URL".format(value))
for name, schema in attributes.items():
if not _hasattr(parsed, name):
raise ValueError("Invalid URL attribute '{0}'".format(name))
try:
validate(schema, _getattr(parsed, name))
except ValueError as err:
raise ValueError(
"Unable to validate URL attribute '{0}': {1}".format(
name, err
)
)
return True
# Convert "http" to be either any("http", "https") for convenience
if attributes.get("scheme") == "http":
attributes["scheme"] = any("http", "https")
return check_url |
def build_char_states(self, char_embed, is_training, reuse, char_ids, char_lengths):
"""Build char embedding network for the QA model."""
max_char_length = self.cfg.max_char_length
inputs = dropout(tf.nn.embedding_lookup(char_embed, char_ids),
self.cfg.dropout, is_training)
inputs = tf.reshape(
inputs, shape=[max_char_length, -1, self.cfg.char_embed_dim])
char_lengths = tf.reshape(char_lengths, shape=[-1])
with tf.variable_scope('char_encoding', reuse=reuse):
cell_fw = XGRUCell(hidden_dim=self.cfg.char_embed_dim)
cell_bw = XGRUCell(hidden_dim=self.cfg.char_embed_dim)
_, (left_right, right_left) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
sequence_length=char_lengths,
inputs=inputs,
time_major=True,
dtype=tf.float32
)
left_right = tf.reshape(left_right, shape=[-1, self.cfg.char_embed_dim])
right_left = tf.reshape(right_left, shape=[-1, self.cfg.char_embed_dim])
states = tf.concat([left_right, right_left], axis=1)
out_shape = tf.shape(char_ids)[1:3]
out_shape = tf.concat([out_shape, tf.constant(
value=[self.cfg.char_embed_dim * 2], dtype=tf.int32)], axis=0)
return tf.reshape(states, shape=out_shape) | Build char embedding network for the QA model. | Below is the the instruction that describes the task:
### Input:
Build char embedding network for the QA model.
### Response:
def build_char_states(self, char_embed, is_training, reuse, char_ids, char_lengths):
"""Build char embedding network for the QA model."""
max_char_length = self.cfg.max_char_length
inputs = dropout(tf.nn.embedding_lookup(char_embed, char_ids),
self.cfg.dropout, is_training)
inputs = tf.reshape(
inputs, shape=[max_char_length, -1, self.cfg.char_embed_dim])
char_lengths = tf.reshape(char_lengths, shape=[-1])
with tf.variable_scope('char_encoding', reuse=reuse):
cell_fw = XGRUCell(hidden_dim=self.cfg.char_embed_dim)
cell_bw = XGRUCell(hidden_dim=self.cfg.char_embed_dim)
_, (left_right, right_left) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
sequence_length=char_lengths,
inputs=inputs,
time_major=True,
dtype=tf.float32
)
left_right = tf.reshape(left_right, shape=[-1, self.cfg.char_embed_dim])
right_left = tf.reshape(right_left, shape=[-1, self.cfg.char_embed_dim])
states = tf.concat([left_right, right_left], axis=1)
out_shape = tf.shape(char_ids)[1:3]
out_shape = tf.concat([out_shape, tf.constant(
value=[self.cfg.char_embed_dim * 2], dtype=tf.int32)], axis=0)
return tf.reshape(states, shape=out_shape) |
def _repair(record: Dict[str, Any]) -> Dict[str, Any]:
"""Repair a corrupted IterationRecord with a specific known issue."""
output_records = record.get("output_records")
if record.get("_type", None) == "IterationRecord" and output_records is not None:
birdsite_record = output_records.get("birdsite")
# check for the bug
if isinstance(birdsite_record, dict) and birdsite_record.get("_type") == "IterationRecord":
# get to the bottom of the corrupted record
failed = False
while birdsite_record.get("_type") == "IterationRecord":
sub_record = birdsite_record.get("output_records")
if sub_record is None:
failed = True
break
birdsite_record = sub_record.get("birdsite")
if birdsite_record is None:
failed = True
break
if failed:
return record
# add type
birdsite_record["_type"] = TweetRecord.__name__
# lift extra keys, just in case
if "extra_keys" in birdsite_record:
record_extra_values = record.get("extra_keys", {})
for key, value in birdsite_record["extra_keys"].items():
if key not in record_extra_values:
record_extra_values[key] = value
record["extra_keys"] = record_extra_values
del birdsite_record["extra_keys"]
output_records["birdsite"] = birdsite_record
# pull that correct record up to the top level, fixing corruption
record["output_records"] = output_records
return record | Repair a corrupted IterationRecord with a specific known issue. | Below is the the instruction that describes the task:
### Input:
Repair a corrupted IterationRecord with a specific known issue.
### Response:
def _repair(record: Dict[str, Any]) -> Dict[str, Any]:
"""Repair a corrupted IterationRecord with a specific known issue."""
output_records = record.get("output_records")
if record.get("_type", None) == "IterationRecord" and output_records is not None:
birdsite_record = output_records.get("birdsite")
# check for the bug
if isinstance(birdsite_record, dict) and birdsite_record.get("_type") == "IterationRecord":
# get to the bottom of the corrupted record
failed = False
while birdsite_record.get("_type") == "IterationRecord":
sub_record = birdsite_record.get("output_records")
if sub_record is None:
failed = True
break
birdsite_record = sub_record.get("birdsite")
if birdsite_record is None:
failed = True
break
if failed:
return record
# add type
birdsite_record["_type"] = TweetRecord.__name__
# lift extra keys, just in case
if "extra_keys" in birdsite_record:
record_extra_values = record.get("extra_keys", {})
for key, value in birdsite_record["extra_keys"].items():
if key not in record_extra_values:
record_extra_values[key] = value
record["extra_keys"] = record_extra_values
del birdsite_record["extra_keys"]
output_records["birdsite"] = birdsite_record
# pull that correct record up to the top level, fixing corruption
record["output_records"] = output_records
return record |
def load(self, wishlist, calibration=None, resolution=None,
polarization=None, level=None, generate=True, unload=True,
**kwargs):
"""Read and generate requested datasets.
When the `wishlist` contains `DatasetID` objects they can either be
fully-specified `DatasetID` objects with every parameter specified
or they can not provide certain parameters and the "best" parameter
will be chosen. For example, if a dataset is available in multiple
resolutions and no resolution is specified in the wishlist's DatasetID
then the highest (smallest number) resolution will be chosen.
Loaded `DataArray` objects are created and stored in the Scene object.
Args:
wishlist (iterable): Names (str), wavelengths (float), or
DatasetID objects of the requested datasets
to load. See `available_dataset_ids()` for
what datasets are available.
calibration (list, str): Calibration levels to limit available
datasets. This is a shortcut to
having to list each DatasetID in
`wishlist`.
resolution (list | float): Resolution to limit available datasets.
This is a shortcut similar to
calibration.
polarization (list | str): Polarization ('V', 'H') to limit
available datasets. This is a shortcut
similar to calibration.
level (list | str): Pressure level to limit available datasets.
Pressure should be in hPa or mb. If an
altitude is used it should be specified in
inverse meters (1/m). The units of this
parameter ultimately depend on the reader.
generate (bool): Generate composites from the loaded datasets
(default: True)
unload (bool): Unload datasets that were required to generate
the requested datasets (composite dependencies)
but are no longer needed.
"""
dataset_keys = set(wishlist)
needed_datasets = (self.wishlist | dataset_keys) - \
set(self.datasets.keys())
unknown = self.dep_tree.find_dependencies(needed_datasets,
calibration=calibration,
polarization=polarization,
resolution=resolution,
level=level)
self.wishlist |= needed_datasets
if unknown:
unknown_str = ", ".join(map(str, unknown))
raise KeyError("Unknown datasets: {}".format(unknown_str))
self.read(**kwargs)
if generate:
keepables = self.generate_composites()
else:
# don't lose datasets we loaded to try to generate composites
keepables = set(self.datasets.keys()) | self.wishlist
if self.missing_datasets:
# copy the set of missing datasets because they won't be valid
# after they are removed in the next line
missing = self.missing_datasets.copy()
self._remove_failed_datasets(keepables)
missing_str = ", ".join(str(x) for x in missing)
LOG.warning("The following datasets were not created and may require "
"resampling to be generated: {}".format(missing_str))
if unload:
self.unload(keepables=keepables) | Read and generate requested datasets.
When the `wishlist` contains `DatasetID` objects they can either be
fully-specified `DatasetID` objects with every parameter specified
or they can not provide certain parameters and the "best" parameter
will be chosen. For example, if a dataset is available in multiple
resolutions and no resolution is specified in the wishlist's DatasetID
then the highest (smallest number) resolution will be chosen.
Loaded `DataArray` objects are created and stored in the Scene object.
Args:
wishlist (iterable): Names (str), wavelengths (float), or
DatasetID objects of the requested datasets
to load. See `available_dataset_ids()` for
what datasets are available.
calibration (list, str): Calibration levels to limit available
datasets. This is a shortcut to
having to list each DatasetID in
`wishlist`.
resolution (list | float): Resolution to limit available datasets.
This is a shortcut similar to
calibration.
polarization (list | str): Polarization ('V', 'H') to limit
available datasets. This is a shortcut
similar to calibration.
level (list | str): Pressure level to limit available datasets.
Pressure should be in hPa or mb. If an
altitude is used it should be specified in
inverse meters (1/m). The units of this
parameter ultimately depend on the reader.
generate (bool): Generate composites from the loaded datasets
(default: True)
unload (bool): Unload datasets that were required to generate
the requested datasets (composite dependencies)
but are no longer needed. | Below is the the instruction that describes the task:
### Input:
Read and generate requested datasets.
When the `wishlist` contains `DatasetID` objects they can either be
fully-specified `DatasetID` objects with every parameter specified
or they can not provide certain parameters and the "best" parameter
will be chosen. For example, if a dataset is available in multiple
resolutions and no resolution is specified in the wishlist's DatasetID
then the highest (smallest number) resolution will be chosen.
Loaded `DataArray` objects are created and stored in the Scene object.
Args:
wishlist (iterable): Names (str), wavelengths (float), or
DatasetID objects of the requested datasets
to load. See `available_dataset_ids()` for
what datasets are available.
calibration (list, str): Calibration levels to limit available
datasets. This is a shortcut to
having to list each DatasetID in
`wishlist`.
resolution (list | float): Resolution to limit available datasets.
This is a shortcut similar to
calibration.
polarization (list | str): Polarization ('V', 'H') to limit
available datasets. This is a shortcut
similar to calibration.
level (list | str): Pressure level to limit available datasets.
Pressure should be in hPa or mb. If an
altitude is used it should be specified in
inverse meters (1/m). The units of this
parameter ultimately depend on the reader.
generate (bool): Generate composites from the loaded datasets
(default: True)
unload (bool): Unload datasets that were required to generate
the requested datasets (composite dependencies)
but are no longer needed.
### Response:
def load(self, wishlist, calibration=None, resolution=None,
polarization=None, level=None, generate=True, unload=True,
**kwargs):
"""Read and generate requested datasets.
When the `wishlist` contains `DatasetID` objects they can either be
fully-specified `DatasetID` objects with every parameter specified
or they can not provide certain parameters and the "best" parameter
will be chosen. For example, if a dataset is available in multiple
resolutions and no resolution is specified in the wishlist's DatasetID
then the highest (smallest number) resolution will be chosen.
Loaded `DataArray` objects are created and stored in the Scene object.
Args:
wishlist (iterable): Names (str), wavelengths (float), or
DatasetID objects of the requested datasets
to load. See `available_dataset_ids()` for
what datasets are available.
calibration (list, str): Calibration levels to limit available
datasets. This is a shortcut to
having to list each DatasetID in
`wishlist`.
resolution (list | float): Resolution to limit available datasets.
This is a shortcut similar to
calibration.
polarization (list | str): Polarization ('V', 'H') to limit
available datasets. This is a shortcut
similar to calibration.
level (list | str): Pressure level to limit available datasets.
Pressure should be in hPa or mb. If an
altitude is used it should be specified in
inverse meters (1/m). The units of this
parameter ultimately depend on the reader.
generate (bool): Generate composites from the loaded datasets
(default: True)
unload (bool): Unload datasets that were required to generate
the requested datasets (composite dependencies)
but are no longer needed.
"""
dataset_keys = set(wishlist)
needed_datasets = (self.wishlist | dataset_keys) - \
set(self.datasets.keys())
unknown = self.dep_tree.find_dependencies(needed_datasets,
calibration=calibration,
polarization=polarization,
resolution=resolution,
level=level)
self.wishlist |= needed_datasets
if unknown:
unknown_str = ", ".join(map(str, unknown))
raise KeyError("Unknown datasets: {}".format(unknown_str))
self.read(**kwargs)
if generate:
keepables = self.generate_composites()
else:
# don't lose datasets we loaded to try to generate composites
keepables = set(self.datasets.keys()) | self.wishlist
if self.missing_datasets:
# copy the set of missing datasets because they won't be valid
# after they are removed in the next line
missing = self.missing_datasets.copy()
self._remove_failed_datasets(keepables)
missing_str = ", ".join(str(x) for x in missing)
LOG.warning("The following datasets were not created and may require "
"resampling to be generated: {}".format(missing_str))
if unload:
self.unload(keepables=keepables) |
def get_handlers(self, component_context, instance):
"""
Sets up service providers for the given component
:param component_context: The ComponentContext bean
:param instance: The component instance
:return: The list of handlers associated to the given component
"""
# Extract information from the context
requirements = component_context.get_handler(
ipopo_constants.HANDLER_REQUIRES
)
requires_filters = component_context.properties.get(
ipopo_constants.IPOPO_REQUIRES_FILTERS, None
)
# Prepare requirements
requirements = self._prepare_requirements(
requirements, requires_filters
)
# Set up the runtime dependency handlers
handlers = []
for field, requirement in requirements.items():
# Construct the handler
if requirement.aggregate:
handlers.append(AggregateDependency(field, requirement))
else:
handlers.append(SimpleDependency(field, requirement))
return handlers | Sets up service providers for the given component
:param component_context: The ComponentContext bean
:param instance: The component instance
:return: The list of handlers associated to the given component | Below is the the instruction that describes the task:
### Input:
Sets up service providers for the given component
:param component_context: The ComponentContext bean
:param instance: The component instance
:return: The list of handlers associated to the given component
### Response:
def get_handlers(self, component_context, instance):
"""
Sets up service providers for the given component
:param component_context: The ComponentContext bean
:param instance: The component instance
:return: The list of handlers associated to the given component
"""
# Extract information from the context
requirements = component_context.get_handler(
ipopo_constants.HANDLER_REQUIRES
)
requires_filters = component_context.properties.get(
ipopo_constants.IPOPO_REQUIRES_FILTERS, None
)
# Prepare requirements
requirements = self._prepare_requirements(
requirements, requires_filters
)
# Set up the runtime dependency handlers
handlers = []
for field, requirement in requirements.items():
# Construct the handler
if requirement.aggregate:
handlers.append(AggregateDependency(field, requirement))
else:
handlers.append(SimpleDependency(field, requirement))
return handlers |
def __extractProperties(self):
"""
2015-06-04: removed sparql 1.1 queries
2015-06-03: analogous to get classes
# instantiate properties making sure duplicates are pruned
# but the most specific rdftype is kept
# eg OWL:ObjectProperty over RDF:property
"""
self.properties = [] # @todo: keep adding?
self.annotationProperties = []
self.objectProperties = []
self.datatypeProperties = []
qres = self.queryHelper.getAllProperties()
for candidate in qres:
test_existing_prop = self.getProperty(uri=candidate[0])
if not test_existing_prop:
# create it
self.properties += [OntoProperty(candidate[0], candidate[1], self.namespaces)]
else:
# update it
if candidate[1] and (test_existing_prop.rdftype == rdflib.RDF.Property):
test_existing_prop.rdftype = inferMainPropertyType(candidate[1])
#add more data
for aProp in self.properties:
if aProp.rdftype == rdflib.OWL.DatatypeProperty:
self.datatypeProperties += [aProp]
elif aProp.rdftype == rdflib.OWL.AnnotationProperty:
self.annotationProperties += [aProp]
elif aProp.rdftype == rdflib.OWL.ObjectProperty:
self.objectProperties += [aProp]
else:
pass
aProp.triples = self.queryHelper.entityTriples(aProp.uri)
aProp._buildGraph() # force construction of mini graph
# attach to an ontology [2015-06-15: no property type distinction yet]
for uri in aProp.getValuesForProperty(rdflib.RDFS.isDefinedBy):
onto = self.getOntology(str(uri))
if onto:
onto.properties += [aProp]
aProp.ontology = onto
self.__buildDomainRanges(aProp)
# add direct Supers
directSupers = self.queryHelper.getPropDirectSupers(aProp.uri)
for x in directSupers:
superprop = self.getProperty(uri=x[0])
if superprop:
aProp._parents.append(superprop)
# add inverse relationships (= direct subs for superprop)
if aProp not in superprop.children():
superprop._children.append(aProp) | 2015-06-04: removed sparql 1.1 queries
2015-06-03: analogous to get classes
# instantiate properties making sure duplicates are pruned
# but the most specific rdftype is kept
# eg OWL:ObjectProperty over RDF:property | Below is the the instruction that describes the task:
### Input:
2015-06-04: removed sparql 1.1 queries
2015-06-03: analogous to get classes
# instantiate properties making sure duplicates are pruned
# but the most specific rdftype is kept
# eg OWL:ObjectProperty over RDF:property
### Response:
def __extractProperties(self):
"""
2015-06-04: removed sparql 1.1 queries
2015-06-03: analogous to get classes
# instantiate properties making sure duplicates are pruned
# but the most specific rdftype is kept
# eg OWL:ObjectProperty over RDF:property
"""
self.properties = [] # @todo: keep adding?
self.annotationProperties = []
self.objectProperties = []
self.datatypeProperties = []
qres = self.queryHelper.getAllProperties()
for candidate in qres:
test_existing_prop = self.getProperty(uri=candidate[0])
if not test_existing_prop:
# create it
self.properties += [OntoProperty(candidate[0], candidate[1], self.namespaces)]
else:
# update it
if candidate[1] and (test_existing_prop.rdftype == rdflib.RDF.Property):
test_existing_prop.rdftype = inferMainPropertyType(candidate[1])
#add more data
for aProp in self.properties:
if aProp.rdftype == rdflib.OWL.DatatypeProperty:
self.datatypeProperties += [aProp]
elif aProp.rdftype == rdflib.OWL.AnnotationProperty:
self.annotationProperties += [aProp]
elif aProp.rdftype == rdflib.OWL.ObjectProperty:
self.objectProperties += [aProp]
else:
pass
aProp.triples = self.queryHelper.entityTriples(aProp.uri)
aProp._buildGraph() # force construction of mini graph
# attach to an ontology [2015-06-15: no property type distinction yet]
for uri in aProp.getValuesForProperty(rdflib.RDFS.isDefinedBy):
onto = self.getOntology(str(uri))
if onto:
onto.properties += [aProp]
aProp.ontology = onto
self.__buildDomainRanges(aProp)
# add direct Supers
directSupers = self.queryHelper.getPropDirectSupers(aProp.uri)
for x in directSupers:
superprop = self.getProperty(uri=x[0])
if superprop:
aProp._parents.append(superprop)
# add inverse relationships (= direct subs for superprop)
if aProp not in superprop.children():
superprop._children.append(aProp) |
def decode(self, key):
"""
Given a bucket key, compute the parameters used to compute
that key.
Note: Deprecated. Use BucketKey.decode() instead.
:param key: The bucket key. Note that the UUID must match the
UUID of this limit; a ValueError will be raised if
this is not the case.
"""
# Parse the bucket key
key = BucketKey.decode(key)
# Make sure the uuids match
if key.uuid != self.uuid:
raise ValueError("%s is not a bucket corresponding to this limit" %
key)
return key.params | Given a bucket key, compute the parameters used to compute
that key.
Note: Deprecated. Use BucketKey.decode() instead.
:param key: The bucket key. Note that the UUID must match the
UUID of this limit; a ValueError will be raised if
this is not the case. | Below is the the instruction that describes the task:
### Input:
Given a bucket key, compute the parameters used to compute
that key.
Note: Deprecated. Use BucketKey.decode() instead.
:param key: The bucket key. Note that the UUID must match the
UUID of this limit; a ValueError will be raised if
this is not the case.
### Response:
def decode(self, key):
"""
Given a bucket key, compute the parameters used to compute
that key.
Note: Deprecated. Use BucketKey.decode() instead.
:param key: The bucket key. Note that the UUID must match the
UUID of this limit; a ValueError will be raised if
this is not the case.
"""
# Parse the bucket key
key = BucketKey.decode(key)
# Make sure the uuids match
if key.uuid != self.uuid:
raise ValueError("%s is not a bucket corresponding to this limit" %
key)
return key.params |
def get_minimum(self):
'''
Return
(t1, t2, value) triple where the value is the minimal one.
'''
return (self._min_value_t1, self._min_value_t2,
self._min_value, self._min_value_data) | Return
(t1, t2, value) triple where the value is the minimal one. | Below is the the instruction that describes the task:
### Input:
Return
(t1, t2, value) triple where the value is the minimal one.
### Response:
def get_minimum(self):
'''
Return
(t1, t2, value) triple where the value is the minimal one.
'''
return (self._min_value_t1, self._min_value_t2,
self._min_value, self._min_value_data) |
def get_requests_request_name(self, request_name):
"""GetRequestsRequestName.
[Preview API] Get a symbol request by request name.
:param str request_name:
:rtype: :class:`<Request> <azure.devops.v5_0.symbol.models.Request>`
"""
query_parameters = {}
if request_name is not None:
query_parameters['requestName'] = self._serialize.query('request_name', request_name, 'str')
response = self._send(http_method='GET',
location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52',
version='5.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('Request', response) | GetRequestsRequestName.
[Preview API] Get a symbol request by request name.
:param str request_name:
:rtype: :class:`<Request> <azure.devops.v5_0.symbol.models.Request>` | Below is the the instruction that describes the task:
### Input:
GetRequestsRequestName.
[Preview API] Get a symbol request by request name.
:param str request_name:
:rtype: :class:`<Request> <azure.devops.v5_0.symbol.models.Request>`
### Response:
def get_requests_request_name(self, request_name):
"""GetRequestsRequestName.
[Preview API] Get a symbol request by request name.
:param str request_name:
:rtype: :class:`<Request> <azure.devops.v5_0.symbol.models.Request>`
"""
query_parameters = {}
if request_name is not None:
query_parameters['requestName'] = self._serialize.query('request_name', request_name, 'str')
response = self._send(http_method='GET',
location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52',
version='5.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('Request', response) |
def do_GET(self):
# check here request header to identify the type of req, if http or ws
# if this is a ws req, instance a ws handler, add it to App's ws list, return
if "Upgrade" in self.headers:
if self.headers['Upgrade'] == 'websocket':
#passing arguments to websocket handler, otherwise it will lost the last message,
# and will be unable to handshake
ws = WebSocketsHandler(self.headers, self.request, self.client_address, self.server)
return
"""Handler for the GET requests."""
do_process = False
if self.server.auth is None:
do_process = True
else:
if not ('Authorization' in self.headers) or self.headers['Authorization'] is None:
self._log.info("Authenticating")
self.do_AUTHHEAD()
self.wfile.write(encode_text('no auth header received'))
elif self.headers['Authorization'] == 'Basic ' + self.server.auth.decode():
do_process = True
else:
self.do_AUTHHEAD()
self.wfile.write(encode_text(self.headers['Authorization']))
self.wfile.write(encode_text('not authenticated'))
if do_process:
path = str(unquote(self.path))
# noinspection PyBroadException
try:
self._instance()
# build the page (call main()) in user code, if not built yet
with self.update_lock:
# build the root page once if necessary
if not 'root' in self.page.children['body'].children.keys():
self._log.info('built UI (path=%s)' % path)
self.set_root_widget(self.main(*self.server.userdata))
self._process_all(path)
except:
self._log.error('error processing GET request', exc_info=True) | Handler for the GET requests. | Below is the the instruction that describes the task:
### Input:
Handler for the GET requests.
### Response:
def do_GET(self):
# check here request header to identify the type of req, if http or ws
# if this is a ws req, instance a ws handler, add it to App's ws list, return
if "Upgrade" in self.headers:
if self.headers['Upgrade'] == 'websocket':
#passing arguments to websocket handler, otherwise it will lost the last message,
# and will be unable to handshake
ws = WebSocketsHandler(self.headers, self.request, self.client_address, self.server)
return
"""Handler for the GET requests."""
do_process = False
if self.server.auth is None:
do_process = True
else:
if not ('Authorization' in self.headers) or self.headers['Authorization'] is None:
self._log.info("Authenticating")
self.do_AUTHHEAD()
self.wfile.write(encode_text('no auth header received'))
elif self.headers['Authorization'] == 'Basic ' + self.server.auth.decode():
do_process = True
else:
self.do_AUTHHEAD()
self.wfile.write(encode_text(self.headers['Authorization']))
self.wfile.write(encode_text('not authenticated'))
if do_process:
path = str(unquote(self.path))
# noinspection PyBroadException
try:
self._instance()
# build the page (call main()) in user code, if not built yet
with self.update_lock:
# build the root page once if necessary
if not 'root' in self.page.children['body'].children.keys():
self._log.info('built UI (path=%s)' % path)
self.set_root_widget(self.main(*self.server.userdata))
self._process_all(path)
except:
self._log.error('error processing GET request', exc_info=True) |
def get_SZ(self, psd, geometry):
"""
Compute the scattering matrices for the given PSD and geometries.
Returns:
The new amplitude (S) and phase (Z) matrices.
"""
if (self._S_table is None) or (self._Z_table is None):
raise AttributeError(
"Initialize or load the scattering table first.")
if (not isinstance(psd, PSD)) or self._previous_psd != psd:
self._S_dict = {}
self._Z_dict = {}
psd_w = psd(self._psd_D)
for geom in self.geometries:
self._S_dict[geom] = \
trapz(self._S_table[geom] * psd_w, self._psd_D)
self._Z_dict[geom] = \
trapz(self._Z_table[geom] * psd_w, self._psd_D)
self._previous_psd = psd
return (self._S_dict[geometry], self._Z_dict[geometry]) | Compute the scattering matrices for the given PSD and geometries.
Returns:
The new amplitude (S) and phase (Z) matrices. | Below is the the instruction that describes the task:
### Input:
Compute the scattering matrices for the given PSD and geometries.
Returns:
The new amplitude (S) and phase (Z) matrices.
### Response:
def get_SZ(self, psd, geometry):
"""
Compute the scattering matrices for the given PSD and geometries.
Returns:
The new amplitude (S) and phase (Z) matrices.
"""
if (self._S_table is None) or (self._Z_table is None):
raise AttributeError(
"Initialize or load the scattering table first.")
if (not isinstance(psd, PSD)) or self._previous_psd != psd:
self._S_dict = {}
self._Z_dict = {}
psd_w = psd(self._psd_D)
for geom in self.geometries:
self._S_dict[geom] = \
trapz(self._S_table[geom] * psd_w, self._psd_D)
self._Z_dict[geom] = \
trapz(self._Z_table[geom] * psd_w, self._psd_D)
self._previous_psd = psd
return (self._S_dict[geometry], self._Z_dict[geometry]) |
def setReturnParameter(self, name, type, namespace=None, element_type=0):
"""Set the return parameter description for the call info."""
parameter = ParameterInfo(name, type, namespace, element_type)
self.retval = parameter
return parameter | Set the return parameter description for the call info. | Below is the the instruction that describes the task:
### Input:
Set the return parameter description for the call info.
### Response:
def setReturnParameter(self, name, type, namespace=None, element_type=0):
"""Set the return parameter description for the call info."""
parameter = ParameterInfo(name, type, namespace, element_type)
self.retval = parameter
return parameter |
def get_string(self, key):
"""Get a string from all_options"""
# Massive hack, lol
if key.startswith("lambda."):
key = "lambda.items.{0}".format(key[7:])
if key.startswith("apigateway."):
key = "apigateway.items.{0}".format(key[11:])
# Make sure key is in all_options
if key not in self.all_options:
kwargs = {}
if len(self.chain) > 1:
kwargs['source'] = Meta(self.all_options, self.chain[-2]).source
raise BadOptionFormat("Can't find key in options", key=key, chain=self.chain, **kwargs)
return super(MergedOptionStringFormatter, self).get_string(key) | Get a string from all_options | Below is the the instruction that describes the task:
### Input:
Get a string from all_options
### Response:
def get_string(self, key):
"""Get a string from all_options"""
# Massive hack, lol
if key.startswith("lambda."):
key = "lambda.items.{0}".format(key[7:])
if key.startswith("apigateway."):
key = "apigateway.items.{0}".format(key[11:])
# Make sure key is in all_options
if key not in self.all_options:
kwargs = {}
if len(self.chain) > 1:
kwargs['source'] = Meta(self.all_options, self.chain[-2]).source
raise BadOptionFormat("Can't find key in options", key=key, chain=self.chain, **kwargs)
return super(MergedOptionStringFormatter, self).get_string(key) |
def get_works(self):
"""Returns a list of the names of all works in the corpus.
:rtype: `list` of `str`
"""
return [os.path.split(filepath)[1] for filepath in
glob.glob(os.path.join(self._path, '*'))
if os.path.isdir(filepath)] | Returns a list of the names of all works in the corpus.
:rtype: `list` of `str` | Below is the the instruction that describes the task:
### Input:
Returns a list of the names of all works in the corpus.
:rtype: `list` of `str`
### Response:
def get_works(self):
"""Returns a list of the names of all works in the corpus.
:rtype: `list` of `str`
"""
return [os.path.split(filepath)[1] for filepath in
glob.glob(os.path.join(self._path, '*'))
if os.path.isdir(filepath)] |
def points(points, T_points_world=None, color=np.array([0,1,0]), scale=0.01, n_cuts=20, subsample=None, random=False, name=None):
"""Scatter a point cloud in pose T_points_world.
Parameters
----------
points : autolab_core.BagOfPoints or (n,3) float
The point set to visualize.
T_points_world : autolab_core.RigidTransform
Pose of points, specified as a transformation from point frame to world frame.
color : (3,) or (n,3) float
Color of whole cloud or per-point colors
scale : float
Radius of each point.
n_cuts : int
Number of longitude/latitude lines on sphere points.
subsample : int
Parameter of subsampling to display fewer points.
name : str
A name for the object to be added.
"""
if isinstance(points, BagOfPoints):
if points.dim != 3:
raise ValueError('BagOfPoints must have dimension 3xN!')
else:
if type(points) is not np.ndarray:
raise ValueError('Points visualizer expects BagOfPoints or numpy array!')
if len(points.shape) == 1:
points = points[:,np.newaxis].T
if len(points.shape) != 2 or points.shape[1] != 3:
raise ValueError('Numpy array of points must have dimension (N,3)')
frame = 'points'
if T_points_world:
frame = T_points_world.from_frame
points = PointCloud(points.T, frame=frame)
color = np.array(color)
if subsample is not None:
num_points = points.num_points
points, inds = points.subsample(subsample, random=random)
if color.shape[0] == num_points and color.shape[1] == 3:
color = color[inds,:]
# transform into world frame
if points.frame != 'world':
if T_points_world is None:
T_points_world = RigidTransform(from_frame=points.frame, to_frame='world')
points_world = T_points_world * points
else:
points_world = points
point_data = points_world.data
if len(point_data.shape) == 1:
point_data = point_data[:,np.newaxis]
point_data = point_data.T
mpcolor = color
if len(color.shape) > 1:
mpcolor = color[0]
mp = MaterialProperties(
color = np.array(mpcolor),
k_a = 0.5,
k_d = 0.3,
k_s = 0.0,
alpha = 10.0,
smooth=True
)
# For each point, create a sphere of the specified color and size.
sphere = trimesh.creation.uv_sphere(scale, [n_cuts, n_cuts])
raw_pose_data = np.tile(np.eye(4), (points.num_points, 1))
raw_pose_data[3::4, :3] = point_data
instcolor = None
if color.ndim == 2 and color.shape[0] == points.num_points and color.shape[1] == 3:
instcolor = color
obj = InstancedSceneObject(sphere, raw_pose_data=raw_pose_data, colors=instcolor, material=mp)
if name is None:
name = str(uuid.uuid4())
Visualizer3D._scene.add_object(name, obj) | Scatter a point cloud in pose T_points_world.
Parameters
----------
points : autolab_core.BagOfPoints or (n,3) float
The point set to visualize.
T_points_world : autolab_core.RigidTransform
Pose of points, specified as a transformation from point frame to world frame.
color : (3,) or (n,3) float
Color of whole cloud or per-point colors
scale : float
Radius of each point.
n_cuts : int
Number of longitude/latitude lines on sphere points.
subsample : int
Parameter of subsampling to display fewer points.
name : str
A name for the object to be added. | Below is the the instruction that describes the task:
### Input:
Scatter a point cloud in pose T_points_world.
Parameters
----------
points : autolab_core.BagOfPoints or (n,3) float
The point set to visualize.
T_points_world : autolab_core.RigidTransform
Pose of points, specified as a transformation from point frame to world frame.
color : (3,) or (n,3) float
Color of whole cloud or per-point colors
scale : float
Radius of each point.
n_cuts : int
Number of longitude/latitude lines on sphere points.
subsample : int
Parameter of subsampling to display fewer points.
name : str
A name for the object to be added.
### Response:
def points(points, T_points_world=None, color=np.array([0,1,0]), scale=0.01, n_cuts=20, subsample=None, random=False, name=None):
"""Scatter a point cloud in pose T_points_world.
Parameters
----------
points : autolab_core.BagOfPoints or (n,3) float
The point set to visualize.
T_points_world : autolab_core.RigidTransform
Pose of points, specified as a transformation from point frame to world frame.
color : (3,) or (n,3) float
Color of whole cloud or per-point colors
scale : float
Radius of each point.
n_cuts : int
Number of longitude/latitude lines on sphere points.
subsample : int
Parameter of subsampling to display fewer points.
name : str
A name for the object to be added.
"""
if isinstance(points, BagOfPoints):
if points.dim != 3:
raise ValueError('BagOfPoints must have dimension 3xN!')
else:
if type(points) is not np.ndarray:
raise ValueError('Points visualizer expects BagOfPoints or numpy array!')
if len(points.shape) == 1:
points = points[:,np.newaxis].T
if len(points.shape) != 2 or points.shape[1] != 3:
raise ValueError('Numpy array of points must have dimension (N,3)')
frame = 'points'
if T_points_world:
frame = T_points_world.from_frame
points = PointCloud(points.T, frame=frame)
color = np.array(color)
if subsample is not None:
num_points = points.num_points
points, inds = points.subsample(subsample, random=random)
if color.shape[0] == num_points and color.shape[1] == 3:
color = color[inds,:]
# transform into world frame
if points.frame != 'world':
if T_points_world is None:
T_points_world = RigidTransform(from_frame=points.frame, to_frame='world')
points_world = T_points_world * points
else:
points_world = points
point_data = points_world.data
if len(point_data.shape) == 1:
point_data = point_data[:,np.newaxis]
point_data = point_data.T
mpcolor = color
if len(color.shape) > 1:
mpcolor = color[0]
mp = MaterialProperties(
color = np.array(mpcolor),
k_a = 0.5,
k_d = 0.3,
k_s = 0.0,
alpha = 10.0,
smooth=True
)
# For each point, create a sphere of the specified color and size.
sphere = trimesh.creation.uv_sphere(scale, [n_cuts, n_cuts])
raw_pose_data = np.tile(np.eye(4), (points.num_points, 1))
raw_pose_data[3::4, :3] = point_data
instcolor = None
if color.ndim == 2 and color.shape[0] == points.num_points and color.shape[1] == 3:
instcolor = color
obj = InstancedSceneObject(sphere, raw_pose_data=raw_pose_data, colors=instcolor, material=mp)
if name is None:
name = str(uuid.uuid4())
Visualizer3D._scene.add_object(name, obj) |
def end_day_to_datetime(end_day, config):
"""
Convert a given end day to its proper datetime.
This is non trivial because of variable ``day_start``. We want to make sure
that even if an 'end day' is specified the actual point in time may reach into the following
day.
Args:
end (datetime.date): Raw end date that is to be adjusted.
config: Controller config containing information on when a workday starts.
Returns:
datetime.datetime: The endday as a adjusted datetime object.
Example:
Given a ``day_start`` of ``5:30`` and end date of ``2015-04-01`` we actually want to
consider even points in time up to ``2015-04-02 5:29``. That is to represent that a
*work day*
does not match *calendar days*.
Note:
An alternative implementation for the similar problem in legacy hamster:
``hamster.storage.db.Storage.__get_todays_facts``.
"""
day_start_time = config['day_start']
day_end_time = get_day_end(config)
if day_start_time == datetime.time(0, 0, 0):
end = datetime.datetime.combine(end_day, day_end_time)
else:
end = datetime.datetime.combine(end_day, day_end_time) + datetime.timedelta(days=1)
return end | Convert a given end day to its proper datetime.
This is non trivial because of variable ``day_start``. We want to make sure
that even if an 'end day' is specified the actual point in time may reach into the following
day.
Args:
end (datetime.date): Raw end date that is to be adjusted.
config: Controller config containing information on when a workday starts.
Returns:
datetime.datetime: The endday as a adjusted datetime object.
Example:
Given a ``day_start`` of ``5:30`` and end date of ``2015-04-01`` we actually want to
consider even points in time up to ``2015-04-02 5:29``. That is to represent that a
*work day*
does not match *calendar days*.
Note:
An alternative implementation for the similar problem in legacy hamster:
``hamster.storage.db.Storage.__get_todays_facts``. | Below is the the instruction that describes the task:
### Input:
Convert a given end day to its proper datetime.
This is non trivial because of variable ``day_start``. We want to make sure
that even if an 'end day' is specified the actual point in time may reach into the following
day.
Args:
end (datetime.date): Raw end date that is to be adjusted.
config: Controller config containing information on when a workday starts.
Returns:
datetime.datetime: The endday as a adjusted datetime object.
Example:
Given a ``day_start`` of ``5:30`` and end date of ``2015-04-01`` we actually want to
consider even points in time up to ``2015-04-02 5:29``. That is to represent that a
*work day*
does not match *calendar days*.
Note:
An alternative implementation for the similar problem in legacy hamster:
``hamster.storage.db.Storage.__get_todays_facts``.
### Response:
def end_day_to_datetime(end_day, config):
"""
Convert a given end day to its proper datetime.
This is non trivial because of variable ``day_start``. We want to make sure
that even if an 'end day' is specified the actual point in time may reach into the following
day.
Args:
end (datetime.date): Raw end date that is to be adjusted.
config: Controller config containing information on when a workday starts.
Returns:
datetime.datetime: The endday as a adjusted datetime object.
Example:
Given a ``day_start`` of ``5:30`` and end date of ``2015-04-01`` we actually want to
consider even points in time up to ``2015-04-02 5:29``. That is to represent that a
*work day*
does not match *calendar days*.
Note:
An alternative implementation for the similar problem in legacy hamster:
``hamster.storage.db.Storage.__get_todays_facts``.
"""
day_start_time = config['day_start']
day_end_time = get_day_end(config)
if day_start_time == datetime.time(0, 0, 0):
end = datetime.datetime.combine(end_day, day_end_time)
else:
end = datetime.datetime.combine(end_day, day_end_time) + datetime.timedelta(days=1)
return end |
def mark_in_progress(self, rr_id: str, rr_size: int) -> None:
"""
Prepare sentinel directory for revocation registry construction.
:param rr_id: revocation registry identifier
:rr_size: size of revocation registry to build
"""
try:
makedirs(join(self._dir_tails_sentinel, rr_id), exist_ok=False)
except FileExistsError:
LOGGER.warning('Rev reg %s construction already in progress', rr_id)
else:
open(join(self._dir_tails_sentinel, rr_id, '.{}'.format(rr_size)), 'w').close() | Prepare sentinel directory for revocation registry construction.
:param rr_id: revocation registry identifier
:rr_size: size of revocation registry to build | Below is the the instruction that describes the task:
### Input:
Prepare sentinel directory for revocation registry construction.
:param rr_id: revocation registry identifier
:rr_size: size of revocation registry to build
### Response:
def mark_in_progress(self, rr_id: str, rr_size: int) -> None:
"""
Prepare sentinel directory for revocation registry construction.
:param rr_id: revocation registry identifier
:rr_size: size of revocation registry to build
"""
try:
makedirs(join(self._dir_tails_sentinel, rr_id), exist_ok=False)
except FileExistsError:
LOGGER.warning('Rev reg %s construction already in progress', rr_id)
else:
open(join(self._dir_tails_sentinel, rr_id, '.{}'.format(rr_size)), 'w').close() |
def commit_config(self):
"""
Commits exiting configuration
Example:
.. code-block:: python
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.open_config()
dev.commit_config()
dev.close_config()
dev.close()
"""
try:
self.dev.rpc.commit_configuration()
except Exception as err:
print err | Commits exiting configuration
Example:
.. code-block:: python
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.open_config()
dev.commit_config()
dev.close_config()
dev.close() | Below is the the instruction that describes the task:
### Input:
Commits exiting configuration
Example:
.. code-block:: python
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.open_config()
dev.commit_config()
dev.close_config()
dev.close()
### Response:
def commit_config(self):
"""
Commits exiting configuration
Example:
.. code-block:: python
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.open_config()
dev.commit_config()
dev.close_config()
dev.close()
"""
try:
self.dev.rpc.commit_configuration()
except Exception as err:
print err |
def set_property(self, name, value):
"""set_property(property_name: str, value: object)
Set property *property_name* to *value*.
"""
if not hasattr(self.props, name):
raise TypeError("Unknown property: %r" % name)
setattr(self.props, name, value) | set_property(property_name: str, value: object)
Set property *property_name* to *value*. | Below is the the instruction that describes the task:
### Input:
set_property(property_name: str, value: object)
Set property *property_name* to *value*.
### Response:
def set_property(self, name, value):
"""set_property(property_name: str, value: object)
Set property *property_name* to *value*.
"""
if not hasattr(self.props, name):
raise TypeError("Unknown property: %r" % name)
setattr(self.props, name, value) |
def equation(self):
"""Mix-in class that returns matrix rows for mscreen condition.
Mscreen condition applied at each control point separately (so not like in ditch).
Returns matrix part (nunknowns,neq)
Returns rhs part (nunknowns)
"""
mat = np.zeros((self.nunknowns, self.model.neq))
rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero
rhs[0:self.nlayers - 1] = 0.0
rhs[self.nlayers - 1] = self.Qc
ieq = 0
for e in self.model.elementlist:
if e.nunknowns > 0:
head = e.potinflayers(self.xc, self.yc, self.layers) / self.aq.Tcol[self.layers, :]
mat[0:self.nlayers - 1, ieq:ieq + e.nunknowns] = head[:-1] - head[1:]
if e == self:
for i in range(self.nlayers - 1):
mat[i, ieq + i] -= self.resfac[i]
mat[i, ieq + i + 1] += self.resfac[i + 1]
mat[self.nlayers - 1, ieq:ieq + self.nlayers] = 1.0
ieq += e.nunknowns
else:
head = e.potentiallayers(self.xc, self.yc, self.layers) / self.aq.T[self.layers]
rhs[0:self.nlayers - 1] -= head[:-1] - head[1:]
return mat, rhs | Mix-in class that returns matrix rows for mscreen condition.
Mscreen condition applied at each control point separately (so not like in ditch).
Returns matrix part (nunknowns,neq)
Returns rhs part (nunknowns) | Below is the the instruction that describes the task:
### Input:
Mix-in class that returns matrix rows for mscreen condition.
Mscreen condition applied at each control point separately (so not like in ditch).
Returns matrix part (nunknowns,neq)
Returns rhs part (nunknowns)
### Response:
def equation(self):
"""Mix-in class that returns matrix rows for mscreen condition.
Mscreen condition applied at each control point separately (so not like in ditch).
Returns matrix part (nunknowns,neq)
Returns rhs part (nunknowns)
"""
mat = np.zeros((self.nunknowns, self.model.neq))
rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero
rhs[0:self.nlayers - 1] = 0.0
rhs[self.nlayers - 1] = self.Qc
ieq = 0
for e in self.model.elementlist:
if e.nunknowns > 0:
head = e.potinflayers(self.xc, self.yc, self.layers) / self.aq.Tcol[self.layers, :]
mat[0:self.nlayers - 1, ieq:ieq + e.nunknowns] = head[:-1] - head[1:]
if e == self:
for i in range(self.nlayers - 1):
mat[i, ieq + i] -= self.resfac[i]
mat[i, ieq + i + 1] += self.resfac[i + 1]
mat[self.nlayers - 1, ieq:ieq + self.nlayers] = 1.0
ieq += e.nunknowns
else:
head = e.potentiallayers(self.xc, self.yc, self.layers) / self.aq.T[self.layers]
rhs[0:self.nlayers - 1] -= head[:-1] - head[1:]
return mat, rhs |
def transformToNative(obj):
"""
Turn obj.value into a datetime.timedelta.
"""
if obj.isNative:
return obj
obj.isNative = True
obj.value=obj.value
if obj.value == '':
return obj
else:
deltalist=stringToDurations(obj.value)
# When can DURATION have multiple durations? For now:
if len(deltalist) == 1:
obj.value = deltalist[0]
return obj
else:
raise ParseError("DURATION must have a single duration string.") | Turn obj.value into a datetime.timedelta. | Below is the the instruction that describes the task:
### Input:
Turn obj.value into a datetime.timedelta.
### Response:
def transformToNative(obj):
"""
Turn obj.value into a datetime.timedelta.
"""
if obj.isNative:
return obj
obj.isNative = True
obj.value=obj.value
if obj.value == '':
return obj
else:
deltalist=stringToDurations(obj.value)
# When can DURATION have multiple durations? For now:
if len(deltalist) == 1:
obj.value = deltalist[0]
return obj
else:
raise ParseError("DURATION must have a single duration string.") |
def euclidean_distances(a, b, squared=False, to_numpy=True):
"""
Compute the pairwise euclidean distance between matrices a and b.
If the input matrix are in numpy format, they will be uploaded to the
GPU first which can incur significant time overhead.
Parameters
----------
a : np.ndarray (n, f)
first matrix
b : np.ndarray (m, f)
second matrix
to_numpy : boolean, optional (default True)
If true convert back the GPU array result to numpy format.
squared : boolean, optional (default False)
if True, return squared euclidean distance matrix
Returns
-------
c : (n x m) np.ndarray or cupy.ndarray
pairwise euclidean distance distance matrix
"""
a, b = to_gpu(a, b)
a2 = np.sum(np.square(a), 1)
b2 = np.sum(np.square(b), 1)
c = -2 * np.dot(a, b.T)
c += a2[:, None]
c += b2[None, :]
if not squared:
np.sqrt(c, out=c)
if to_numpy:
return to_np(c)
else:
return c | Compute the pairwise euclidean distance between matrices a and b.
If the input matrix are in numpy format, they will be uploaded to the
GPU first which can incur significant time overhead.
Parameters
----------
a : np.ndarray (n, f)
first matrix
b : np.ndarray (m, f)
second matrix
to_numpy : boolean, optional (default True)
If true convert back the GPU array result to numpy format.
squared : boolean, optional (default False)
if True, return squared euclidean distance matrix
Returns
-------
c : (n x m) np.ndarray or cupy.ndarray
pairwise euclidean distance distance matrix | Below is the the instruction that describes the task:
### Input:
Compute the pairwise euclidean distance between matrices a and b.
If the input matrix are in numpy format, they will be uploaded to the
GPU first which can incur significant time overhead.
Parameters
----------
a : np.ndarray (n, f)
first matrix
b : np.ndarray (m, f)
second matrix
to_numpy : boolean, optional (default True)
If true convert back the GPU array result to numpy format.
squared : boolean, optional (default False)
if True, return squared euclidean distance matrix
Returns
-------
c : (n x m) np.ndarray or cupy.ndarray
pairwise euclidean distance distance matrix
### Response:
def euclidean_distances(a, b, squared=False, to_numpy=True):
"""
Compute the pairwise euclidean distance between matrices a and b.
If the input matrix are in numpy format, they will be uploaded to the
GPU first which can incur significant time overhead.
Parameters
----------
a : np.ndarray (n, f)
first matrix
b : np.ndarray (m, f)
second matrix
to_numpy : boolean, optional (default True)
If true convert back the GPU array result to numpy format.
squared : boolean, optional (default False)
if True, return squared euclidean distance matrix
Returns
-------
c : (n x m) np.ndarray or cupy.ndarray
pairwise euclidean distance distance matrix
"""
a, b = to_gpu(a, b)
a2 = np.sum(np.square(a), 1)
b2 = np.sum(np.square(b), 1)
c = -2 * np.dot(a, b.T)
c += a2[:, None]
c += b2[None, :]
if not squared:
np.sqrt(c, out=c)
if to_numpy:
return to_np(c)
else:
return c |
def extract_formats(config_handle):
"""Get application formats.
See :class:`gogoutils.Formats` for available options.
Args:
config_handle (configparser.ConfigParser): Instance of configurations.
Returns:
dict: Formats in ``{$format_type: $format_pattern}``.
"""
configurations = dict(config_handle)
formats = dict(configurations.get('formats', {}))
return formats | Get application formats.
See :class:`gogoutils.Formats` for available options.
Args:
config_handle (configparser.ConfigParser): Instance of configurations.
Returns:
dict: Formats in ``{$format_type: $format_pattern}``. | Below is the the instruction that describes the task:
### Input:
Get application formats.
See :class:`gogoutils.Formats` for available options.
Args:
config_handle (configparser.ConfigParser): Instance of configurations.
Returns:
dict: Formats in ``{$format_type: $format_pattern}``.
### Response:
def extract_formats(config_handle):
"""Get application formats.
See :class:`gogoutils.Formats` for available options.
Args:
config_handle (configparser.ConfigParser): Instance of configurations.
Returns:
dict: Formats in ``{$format_type: $format_pattern}``.
"""
configurations = dict(config_handle)
formats = dict(configurations.get('formats', {}))
return formats |
def make_placeholders(seq, start=1):
"""
Generate placeholders for the given sequence.
"""
if len(seq) == 0:
raise ValueError('Sequence must have at least one element.')
param_style = Context.current().param_style
placeholders = None
if isinstance(seq, dict):
if param_style in ('named', 'pyformat'):
template = ':%s' if param_style == 'named' else '%%(%s)s'
placeholders = (template % key
for key in six.iterkeys(seq))
elif isinstance(seq, (list, tuple)):
if param_style == 'numeric':
placeholders = (':%d' % i
for i in xrange(start, start + len(seq)))
elif param_style in ('qmark', 'format', 'pyformat'):
placeholders = itertools.repeat(
'?' if param_style == 'qmark' else '%s',
len(seq))
if placeholders is None:
raise NotSupported(
"Param style '%s' does not support sequence type '%s'" % (
param_style, seq.__class__.__name__))
return ', '.join(placeholders) | Generate placeholders for the given sequence. | Below is the the instruction that describes the task:
### Input:
Generate placeholders for the given sequence.
### Response:
def make_placeholders(seq, start=1):
"""
Generate placeholders for the given sequence.
"""
if len(seq) == 0:
raise ValueError('Sequence must have at least one element.')
param_style = Context.current().param_style
placeholders = None
if isinstance(seq, dict):
if param_style in ('named', 'pyformat'):
template = ':%s' if param_style == 'named' else '%%(%s)s'
placeholders = (template % key
for key in six.iterkeys(seq))
elif isinstance(seq, (list, tuple)):
if param_style == 'numeric':
placeholders = (':%d' % i
for i in xrange(start, start + len(seq)))
elif param_style in ('qmark', 'format', 'pyformat'):
placeholders = itertools.repeat(
'?' if param_style == 'qmark' else '%s',
len(seq))
if placeholders is None:
raise NotSupported(
"Param style '%s' does not support sequence type '%s'" % (
param_style, seq.__class__.__name__))
return ', '.join(placeholders) |
def exists(name, path=None):
'''
Returns whether the named container exists.
path
path to the container parent directory (default: /var/lib/lxc)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' lxc.exists name
'''
_exists = name in ls_(path=path)
# container may be just created but we did cached earlier the
# lxc-ls results
if not _exists:
_exists = name in ls_(cache=False, path=path)
return _exists | Returns whether the named container exists.
path
path to the container parent directory (default: /var/lib/lxc)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' lxc.exists name | Below is the the instruction that describes the task:
### Input:
Returns whether the named container exists.
path
path to the container parent directory (default: /var/lib/lxc)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' lxc.exists name
### Response:
def exists(name, path=None):
'''
Returns whether the named container exists.
path
path to the container parent directory (default: /var/lib/lxc)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' lxc.exists name
'''
_exists = name in ls_(path=path)
# container may be just created but we did cached earlier the
# lxc-ls results
if not _exists:
_exists = name in ls_(cache=False, path=path)
return _exists |
def get_deposit_address(self, currency):
"""Get deposit address for a currency
https://docs.kucoin.com/#get-deposit-address
:param currency: Name of currency
:type currency: string
.. code:: python
address = client.get_deposit_address('NEO')
:returns: ApiResponse
.. code:: python
{
"address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1",
"memo": "5c247c8a03aa677cea2a251d"
}
:raises: KucoinResponseException, KucoinAPIException
"""
data = {
'currency': currency
}
return self._get('deposit-addresses', True, data=data) | Get deposit address for a currency
https://docs.kucoin.com/#get-deposit-address
:param currency: Name of currency
:type currency: string
.. code:: python
address = client.get_deposit_address('NEO')
:returns: ApiResponse
.. code:: python
{
"address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1",
"memo": "5c247c8a03aa677cea2a251d"
}
:raises: KucoinResponseException, KucoinAPIException | Below is the the instruction that describes the task:
### Input:
Get deposit address for a currency
https://docs.kucoin.com/#get-deposit-address
:param currency: Name of currency
:type currency: string
.. code:: python
address = client.get_deposit_address('NEO')
:returns: ApiResponse
.. code:: python
{
"address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1",
"memo": "5c247c8a03aa677cea2a251d"
}
:raises: KucoinResponseException, KucoinAPIException
### Response:
def get_deposit_address(self, currency):
"""Get deposit address for a currency
https://docs.kucoin.com/#get-deposit-address
:param currency: Name of currency
:type currency: string
.. code:: python
address = client.get_deposit_address('NEO')
:returns: ApiResponse
.. code:: python
{
"address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1",
"memo": "5c247c8a03aa677cea2a251d"
}
:raises: KucoinResponseException, KucoinAPIException
"""
data = {
'currency': currency
}
return self._get('deposit-addresses', True, data=data) |
def make_update(model, docs, optimizer, drop=0.0, objective="L2"):
"""Perform an update over a single batch of documents.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
optimizer (callable): An optimizer.
RETURNS loss: A float for the loss.
"""
predictions, backprop = model.begin_update(docs, drop=drop)
loss, gradients = get_vectors_loss(model.ops, docs, predictions, objective)
backprop(gradients, sgd=optimizer)
# Don't want to return a cupy object here
# The gradients are modified in-place by the BERT MLM,
# so we get an accurate loss
return float(loss) | Perform an update over a single batch of documents.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
optimizer (callable): An optimizer.
RETURNS loss: A float for the loss. | Below is the the instruction that describes the task:
### Input:
Perform an update over a single batch of documents.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
optimizer (callable): An optimizer.
RETURNS loss: A float for the loss.
### Response:
def make_update(model, docs, optimizer, drop=0.0, objective="L2"):
"""Perform an update over a single batch of documents.
docs (iterable): A batch of `Doc` objects.
drop (float): The droput rate.
optimizer (callable): An optimizer.
RETURNS loss: A float for the loss.
"""
predictions, backprop = model.begin_update(docs, drop=drop)
loss, gradients = get_vectors_loss(model.ops, docs, predictions, objective)
backprop(gradients, sgd=optimizer)
# Don't want to return a cupy object here
# The gradients are modified in-place by the BERT MLM,
# so we get an accurate loss
return float(loss) |
def worker_failed():
"""Fail worker. Used by bots only for now."""
participant_id = request.args.get("participant_id")
if not participant_id:
return error_response(
error_type="bad request", error_text="participantId parameter is required"
)
try:
_worker_failed(participant_id)
except KeyError:
return error_response(
error_type="ParticipantId not found: {}".format(participant_id)
)
return success_response(
field="status", data="success", request_type="worker failed"
) | Fail worker. Used by bots only for now. | Below is the the instruction that describes the task:
### Input:
Fail worker. Used by bots only for now.
### Response:
def worker_failed():
"""Fail worker. Used by bots only for now."""
participant_id = request.args.get("participant_id")
if not participant_id:
return error_response(
error_type="bad request", error_text="participantId parameter is required"
)
try:
_worker_failed(participant_id)
except KeyError:
return error_response(
error_type="ParticipantId not found: {}".format(participant_id)
)
return success_response(
field="status", data="success", request_type="worker failed"
) |
def VerifyGitkitToken(self, jwt):
"""Verifies a Gitkit token string.
Args:
jwt: string, the token to be checked
Returns:
GitkitUser, if the token is valid. None otherwise.
"""
certs = self.rpc_helper.GetPublicCert()
crypt.MAX_TOKEN_LIFETIME_SECS = 30 * 86400 # 30 days
parsed = None
for aud in filter(lambda x: x is not None, [self.project_id, self.client_id]):
try:
parsed = crypt.verify_signed_jwt_with_certs(jwt, certs, aud)
except crypt.AppIdentityError as e:
if "Wrong recipient" not in e.message:
return None
if parsed:
return GitkitUser.FromToken(parsed)
return None | Verifies a Gitkit token string.
Args:
jwt: string, the token to be checked
Returns:
GitkitUser, if the token is valid. None otherwise. | Below is the the instruction that describes the task:
### Input:
Verifies a Gitkit token string.
Args:
jwt: string, the token to be checked
Returns:
GitkitUser, if the token is valid. None otherwise.
### Response:
def VerifyGitkitToken(self, jwt):
"""Verifies a Gitkit token string.
Args:
jwt: string, the token to be checked
Returns:
GitkitUser, if the token is valid. None otherwise.
"""
certs = self.rpc_helper.GetPublicCert()
crypt.MAX_TOKEN_LIFETIME_SECS = 30 * 86400 # 30 days
parsed = None
for aud in filter(lambda x: x is not None, [self.project_id, self.client_id]):
try:
parsed = crypt.verify_signed_jwt_with_certs(jwt, certs, aud)
except crypt.AppIdentityError as e:
if "Wrong recipient" not in e.message:
return None
if parsed:
return GitkitUser.FromToken(parsed)
return None |
def cnst_A0(self, X, Xf=None):
r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
constraint.
"""
# This calculation involves non-negligible computational cost
# when Xf is None (i.e. the function is not being applied to
# self.X).
if Xf is None:
Xf = sl.rfftn(X, None, self.cri.axisN)
return sl.irfftn(sl.inner(self.Df, Xf, axis=self.cri.axisM),
self.cri.Nv, self.cri.axisN) | r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
constraint. | Below is the the instruction that describes the task:
### Input:
r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
constraint.
### Response:
def cnst_A0(self, X, Xf=None):
r"""Compute :math:`A_0 \mathbf{x}` component of ADMM problem
constraint.
"""
# This calculation involves non-negligible computational cost
# when Xf is None (i.e. the function is not being applied to
# self.X).
if Xf is None:
Xf = sl.rfftn(X, None, self.cri.axisN)
return sl.irfftn(sl.inner(self.Df, Xf, axis=self.cri.axisM),
self.cri.Nv, self.cri.axisN) |
def to_global(s):
"""
Format a global variable name.
"""
if s.startswith('GPSTime'):
s = 'Gps' + s[3:]
if '_' in s:
s = "".join([i.capitalize() for i in s.split("_")])
return s[0].lower() + s[1:] | Format a global variable name. | Below is the the instruction that describes the task:
### Input:
Format a global variable name.
### Response:
def to_global(s):
"""
Format a global variable name.
"""
if s.startswith('GPSTime'):
s = 'Gps' + s[3:]
if '_' in s:
s = "".join([i.capitalize() for i in s.split("_")])
return s[0].lower() + s[1:] |
def parse_datetime(value: Union[datetime, StrIntFloat]) -> datetime:
"""
Parse a datetime/int/float/string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raise ValueError if the input is well formatted but not a valid datetime.
Raise ValueError if the input isn't well formatted.
"""
if isinstance(value, datetime):
return value
number = get_numeric(value)
if number is not None:
return from_unix_seconds(number)
match = datetime_re.match(cast(str, value))
if not match:
raise errors.DateTimeError()
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo_str = kw.pop('tzinfo')
if tzinfo_str == 'Z':
tzinfo = timezone.utc
elif tzinfo_str is not None:
offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0
offset = 60 * int(tzinfo_str[1:3]) + offset_mins
if tzinfo_str[0] == '-':
offset = -offset
tzinfo = timezone(timedelta(minutes=offset))
else:
tzinfo = None
kw_: Dict[str, Union[int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
kw_['tzinfo'] = tzinfo
with change_exception(errors.DateTimeError, ValueError):
return datetime(**kw_) | Parse a datetime/int/float/string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raise ValueError if the input is well formatted but not a valid datetime.
Raise ValueError if the input isn't well formatted. | Below is the the instruction that describes the task:
### Input:
Parse a datetime/int/float/string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raise ValueError if the input is well formatted but not a valid datetime.
Raise ValueError if the input isn't well formatted.
### Response:
def parse_datetime(value: Union[datetime, StrIntFloat]) -> datetime:
"""
Parse a datetime/int/float/string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raise ValueError if the input is well formatted but not a valid datetime.
Raise ValueError if the input isn't well formatted.
"""
if isinstance(value, datetime):
return value
number = get_numeric(value)
if number is not None:
return from_unix_seconds(number)
match = datetime_re.match(cast(str, value))
if not match:
raise errors.DateTimeError()
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo_str = kw.pop('tzinfo')
if tzinfo_str == 'Z':
tzinfo = timezone.utc
elif tzinfo_str is not None:
offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0
offset = 60 * int(tzinfo_str[1:3]) + offset_mins
if tzinfo_str[0] == '-':
offset = -offset
tzinfo = timezone(timedelta(minutes=offset))
else:
tzinfo = None
kw_: Dict[str, Union[int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
kw_['tzinfo'] = tzinfo
with change_exception(errors.DateTimeError, ValueError):
return datetime(**kw_) |
def check_configuration_tokens(self):
'''Ensure the spec file is 'correct'.
'''
self.info('Check configuration tokens')
self.migrate_configuration_tokens()
get = self.config.getdefault
errors = []
adderror = errors.append
if not get('app', 'title', ''):
adderror('[app] "title" is missing')
if not get('app', 'source.dir', ''):
adderror('[app] "source.dir" is missing')
package_name = get('app', 'package.name', '')
if not package_name:
adderror('[app] "package.name" is missing')
elif package_name[0] in map(str, range(10)):
adderror('[app] "package.name" may not start with a number.')
version = get('app', 'version', '')
version_regex = get('app', 'version.regex', '')
if not version and not version_regex:
adderror('[app] One of "version" or "version.regex" must be set')
if version and version_regex:
adderror('[app] Conflict between "version" and "version.regex"'
', only one can be used.')
if version_regex and not get('app', 'version.filename', ''):
adderror('[app] "version.filename" is missing'
', required by "version.regex"')
orientation = get('app', 'orientation', 'landscape')
if orientation not in ('landscape', 'portrait', 'all', 'sensorLandscape'):
adderror('[app] "orientation" have an invalid value')
if errors:
self.error('{0} error(s) found in the buildozer.spec'.format(
len(errors)))
for error in errors:
print(error)
exit(1) | Ensure the spec file is 'correct'. | Below is the the instruction that describes the task:
### Input:
Ensure the spec file is 'correct'.
### Response:
def check_configuration_tokens(self):
'''Ensure the spec file is 'correct'.
'''
self.info('Check configuration tokens')
self.migrate_configuration_tokens()
get = self.config.getdefault
errors = []
adderror = errors.append
if not get('app', 'title', ''):
adderror('[app] "title" is missing')
if not get('app', 'source.dir', ''):
adderror('[app] "source.dir" is missing')
package_name = get('app', 'package.name', '')
if not package_name:
adderror('[app] "package.name" is missing')
elif package_name[0] in map(str, range(10)):
adderror('[app] "package.name" may not start with a number.')
version = get('app', 'version', '')
version_regex = get('app', 'version.regex', '')
if not version and not version_regex:
adderror('[app] One of "version" or "version.regex" must be set')
if version and version_regex:
adderror('[app] Conflict between "version" and "version.regex"'
', only one can be used.')
if version_regex and not get('app', 'version.filename', ''):
adderror('[app] "version.filename" is missing'
', required by "version.regex"')
orientation = get('app', 'orientation', 'landscape')
if orientation not in ('landscape', 'portrait', 'all', 'sensorLandscape'):
adderror('[app] "orientation" have an invalid value')
if errors:
self.error('{0} error(s) found in the buildozer.spec'.format(
len(errors)))
for error in errors:
print(error)
exit(1) |
def extendManager(mixinClass):
'''
Use as a class decorator to add extra methods to your model manager.
Example usage:
class Article(django.db.models.Model):
published = models.DateTimeField()
...
@extendManager
class objects(object):
def getPublished(self):
return self.filter(published__lte = django.utils.timezone.now()).order_by('-published')
...
publishedArticles = Article.objects.getPublished()
'''
class MixinManager(models.Manager, mixinClass):
class MixinQuerySet(models.query.QuerySet, mixinClass):
pass
def get_queryset(self):
return self.MixinQuerySet(self.model, using = self._db)
return MixinManager() | Use as a class decorator to add extra methods to your model manager.
Example usage:
class Article(django.db.models.Model):
published = models.DateTimeField()
...
@extendManager
class objects(object):
def getPublished(self):
return self.filter(published__lte = django.utils.timezone.now()).order_by('-published')
...
publishedArticles = Article.objects.getPublished() | Below is the the instruction that describes the task:
### Input:
Use as a class decorator to add extra methods to your model manager.
Example usage:
class Article(django.db.models.Model):
published = models.DateTimeField()
...
@extendManager
class objects(object):
def getPublished(self):
return self.filter(published__lte = django.utils.timezone.now()).order_by('-published')
...
publishedArticles = Article.objects.getPublished()
### Response:
def extendManager(mixinClass):
'''
Use as a class decorator to add extra methods to your model manager.
Example usage:
class Article(django.db.models.Model):
published = models.DateTimeField()
...
@extendManager
class objects(object):
def getPublished(self):
return self.filter(published__lte = django.utils.timezone.now()).order_by('-published')
...
publishedArticles = Article.objects.getPublished()
'''
class MixinManager(models.Manager, mixinClass):
class MixinQuerySet(models.query.QuerySet, mixinClass):
pass
def get_queryset(self):
return self.MixinQuerySet(self.model, using = self._db)
return MixinManager() |
def get_deployments(self, prefix=""):
""" This endpoint lists all deployments.
https://www.nomadproject.io/docs/http/deployments.html
optional_arguments:
- prefix, (default "") Specifies a string to filter deployments on based on an index prefix.
This is specified as a querystring parameter.
returns: list of dicts
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"prefix": prefix}
return self.request(params=params, method="get").json() | This endpoint lists all deployments.
https://www.nomadproject.io/docs/http/deployments.html
optional_arguments:
- prefix, (default "") Specifies a string to filter deployments on based on an index prefix.
This is specified as a querystring parameter.
returns: list of dicts
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException | Below is the the instruction that describes the task:
### Input:
This endpoint lists all deployments.
https://www.nomadproject.io/docs/http/deployments.html
optional_arguments:
- prefix, (default "") Specifies a string to filter deployments on based on an index prefix.
This is specified as a querystring parameter.
returns: list of dicts
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
### Response:
def get_deployments(self, prefix=""):
""" This endpoint lists all deployments.
https://www.nomadproject.io/docs/http/deployments.html
optional_arguments:
- prefix, (default "") Specifies a string to filter deployments on based on an index prefix.
This is specified as a querystring parameter.
returns: list of dicts
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"prefix": prefix}
return self.request(params=params, method="get").json() |
def readbdfv1(filename, bdfext='.bdf', bhfext='.bhf'):
"""Read bdf file (Bessy Data Format v1)
Input
-----
filename: string
the name of the file
Output
------
the BDF structure in a dict
Notes
-----
This is an adaptation of the bdf_read.m macro of Sylvio Haas.
"""
return header.readbhfv1(filename, True, bdfext, bhfext) | Read bdf file (Bessy Data Format v1)
Input
-----
filename: string
the name of the file
Output
------
the BDF structure in a dict
Notes
-----
This is an adaptation of the bdf_read.m macro of Sylvio Haas. | Below is the the instruction that describes the task:
### Input:
Read bdf file (Bessy Data Format v1)
Input
-----
filename: string
the name of the file
Output
------
the BDF structure in a dict
Notes
-----
This is an adaptation of the bdf_read.m macro of Sylvio Haas.
### Response:
def readbdfv1(filename, bdfext='.bdf', bhfext='.bhf'):
"""Read bdf file (Bessy Data Format v1)
Input
-----
filename: string
the name of the file
Output
------
the BDF structure in a dict
Notes
-----
This is an adaptation of the bdf_read.m macro of Sylvio Haas.
"""
return header.readbhfv1(filename, True, bdfext, bhfext) |
def rewrite_paths(self, local_path, remote_path):
"""
Rewrite references to `local_path` with `remote_path` in job inputs.
"""
self.__rewrite_command_line(local_path, remote_path)
self.__rewrite_config_files(local_path, remote_path) | Rewrite references to `local_path` with `remote_path` in job inputs. | Below is the the instruction that describes the task:
### Input:
Rewrite references to `local_path` with `remote_path` in job inputs.
### Response:
def rewrite_paths(self, local_path, remote_path):
"""
Rewrite references to `local_path` with `remote_path` in job inputs.
"""
self.__rewrite_command_line(local_path, remote_path)
self.__rewrite_config_files(local_path, remote_path) |
def call_at_most_every(seconds, count=1):
"""Call the decorated function at most count times every seconds seconds.
The decorated function will sleep to ensure that at most count invocations
occur within any 'seconds' second window.
"""
def decorator(func):
try:
call_history = getattr(func, '_call_history')
except AttributeError:
call_history = collections.deque(maxlen=count)
setattr(func, '_call_history', call_history)
@functools.wraps(func)
def _wrapper(*args, **kwargs):
current_time = time.time()
window_count = sum(ts > current_time - seconds for ts in call_history)
if window_count >= count:
# We need to sleep until the relevant call is outside the window. This
# should only ever be the the first entry in call_history, but if we
# somehow ended up with extra calls in the window, this recovers.
time.sleep(call_history[window_count - count] - current_time + seconds)
# Append this call, deque will automatically trim old calls using maxlen.
call_history.append(time.time())
return func(*args, **kwargs)
return _wrapper
return decorator | Call the decorated function at most count times every seconds seconds.
The decorated function will sleep to ensure that at most count invocations
occur within any 'seconds' second window. | Below is the the instruction that describes the task:
### Input:
Call the decorated function at most count times every seconds seconds.
The decorated function will sleep to ensure that at most count invocations
occur within any 'seconds' second window.
### Response:
def call_at_most_every(seconds, count=1):
"""Call the decorated function at most count times every seconds seconds.
The decorated function will sleep to ensure that at most count invocations
occur within any 'seconds' second window.
"""
def decorator(func):
try:
call_history = getattr(func, '_call_history')
except AttributeError:
call_history = collections.deque(maxlen=count)
setattr(func, '_call_history', call_history)
@functools.wraps(func)
def _wrapper(*args, **kwargs):
current_time = time.time()
window_count = sum(ts > current_time - seconds for ts in call_history)
if window_count >= count:
# We need to sleep until the relevant call is outside the window. This
# should only ever be the the first entry in call_history, but if we
# somehow ended up with extra calls in the window, this recovers.
time.sleep(call_history[window_count - count] - current_time + seconds)
# Append this call, deque will automatically trim old calls using maxlen.
call_history.append(time.time())
return func(*args, **kwargs)
return _wrapper
return decorator |
def delete_agent(self, agent_id):
"""Delete an agent.
:param str agent_id: The id of the agent to delete. It must
be an str containing only characters in "a-zA-Z0-9_-" and
must be between 1 and 36 characters.
:return: agent deleted.
:rtype: dict.
"""
# Raises an error when agent_id is invalid
self._check_agent_id(agent_id)
req_url = "{}/agents/{}".format(self._base_url, agent_id)
resp = self._requests_session.delete(req_url)
decoded_resp = self._decode_response(resp)
return decoded_resp | Delete an agent.
:param str agent_id: The id of the agent to delete. It must
be an str containing only characters in "a-zA-Z0-9_-" and
must be between 1 and 36 characters.
:return: agent deleted.
:rtype: dict. | Below is the the instruction that describes the task:
### Input:
Delete an agent.
:param str agent_id: The id of the agent to delete. It must
be an str containing only characters in "a-zA-Z0-9_-" and
must be between 1 and 36 characters.
:return: agent deleted.
:rtype: dict.
### Response:
def delete_agent(self, agent_id):
"""Delete an agent.
:param str agent_id: The id of the agent to delete. It must
be an str containing only characters in "a-zA-Z0-9_-" and
must be between 1 and 36 characters.
:return: agent deleted.
:rtype: dict.
"""
# Raises an error when agent_id is invalid
self._check_agent_id(agent_id)
req_url = "{}/agents/{}".format(self._base_url, agent_id)
resp = self._requests_session.delete(req_url)
decoded_resp = self._decode_response(resp)
return decoded_resp |
def file_copy(src=None, dest=None):
'''
Copies the file from the local device to the junos device
src
The source path where the file is kept.
dest
The destination path on the where the file will be copied
CLI Example:
.. code-block:: bash
salt 'device_name' junos.file_copy /home/m2/info.txt info_copy.txt
'''
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
if src is None:
ret['message'] = \
'Please provide the absolute path of the file to be copied.'
ret['out'] = False
return ret
if not os.path.isfile(src):
ret['message'] = 'Invalid source file path'
ret['out'] = False
return ret
if dest is None:
ret['message'] = \
'Please provide the absolute path of the destination where the file is to be copied.'
ret['out'] = False
return ret
try:
with SCP(conn, progress=True) as scp:
scp.put(src, dest)
ret['message'] = 'Successfully copied file from {0} to {1}'.format(
src, dest)
except Exception as exception:
ret['message'] = 'Could not copy file : "{0}"'.format(exception)
ret['out'] = False
return ret | Copies the file from the local device to the junos device
src
The source path where the file is kept.
dest
The destination path on the where the file will be copied
CLI Example:
.. code-block:: bash
salt 'device_name' junos.file_copy /home/m2/info.txt info_copy.txt | Below is the the instruction that describes the task:
### Input:
Copies the file from the local device to the junos device
src
The source path where the file is kept.
dest
The destination path on the where the file will be copied
CLI Example:
.. code-block:: bash
salt 'device_name' junos.file_copy /home/m2/info.txt info_copy.txt
### Response:
def file_copy(src=None, dest=None):
'''
Copies the file from the local device to the junos device
src
The source path where the file is kept.
dest
The destination path on the where the file will be copied
CLI Example:
.. code-block:: bash
salt 'device_name' junos.file_copy /home/m2/info.txt info_copy.txt
'''
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
if src is None:
ret['message'] = \
'Please provide the absolute path of the file to be copied.'
ret['out'] = False
return ret
if not os.path.isfile(src):
ret['message'] = 'Invalid source file path'
ret['out'] = False
return ret
if dest is None:
ret['message'] = \
'Please provide the absolute path of the destination where the file is to be copied.'
ret['out'] = False
return ret
try:
with SCP(conn, progress=True) as scp:
scp.put(src, dest)
ret['message'] = 'Successfully copied file from {0} to {1}'.format(
src, dest)
except Exception as exception:
ret['message'] = 'Could not copy file : "{0}"'.format(exception)
ret['out'] = False
return ret |
def init():
""" Initializes the preprocessor
"""
global OUTPUT
global INCLUDED
global CURRENT_DIR
global ENABLED
global INCLUDEPATH
global IFDEFS
global ID_TABLE
global CURRENT_FILE
global_.FILENAME = '(stdin)'
OUTPUT = ''
INCLUDED = {}
CURRENT_DIR = ''
pwd = get_include_path()
INCLUDEPATH = [os.path.join(pwd, 'library'), os.path.join(pwd, 'library-asm')]
ENABLED = True
IFDEFS = []
global_.has_errors = 0
global_.error_msg_cache.clear()
parser.defaulted_states = {}
ID_TABLE = DefinesTable()
del CURRENT_FILE[:] | Initializes the preprocessor | Below is the the instruction that describes the task:
### Input:
Initializes the preprocessor
### Response:
def init():
""" Initializes the preprocessor
"""
global OUTPUT
global INCLUDED
global CURRENT_DIR
global ENABLED
global INCLUDEPATH
global IFDEFS
global ID_TABLE
global CURRENT_FILE
global_.FILENAME = '(stdin)'
OUTPUT = ''
INCLUDED = {}
CURRENT_DIR = ''
pwd = get_include_path()
INCLUDEPATH = [os.path.join(pwd, 'library'), os.path.join(pwd, 'library-asm')]
ENABLED = True
IFDEFS = []
global_.has_errors = 0
global_.error_msg_cache.clear()
parser.defaulted_states = {}
ID_TABLE = DefinesTable()
del CURRENT_FILE[:] |
def read(self):
"""
Read a given file path and return its content.
:return: The content of the given file path.
:rtype: str
"""
try:
with open(self.file, "r", encoding="utf-8") as file:
# We open and read a file.
# We get the file content.
funilrys = file.read()
except UnicodeDecodeError: # pragma: no cover
with open(self.file, "r") as file:
# We open and read a file.
# We get the file content.
funilrys = file.read()
# We return the file content.
return funilrys | Read a given file path and return its content.
:return: The content of the given file path.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Read a given file path and return its content.
:return: The content of the given file path.
:rtype: str
### Response:
def read(self):
"""
Read a given file path and return its content.
:return: The content of the given file path.
:rtype: str
"""
try:
with open(self.file, "r", encoding="utf-8") as file:
# We open and read a file.
# We get the file content.
funilrys = file.read()
except UnicodeDecodeError: # pragma: no cover
with open(self.file, "r") as file:
# We open and read a file.
# We get the file content.
funilrys = file.read()
# We return the file content.
return funilrys |
def _load_file_from_gcs(gcs_file_path, credentials=None):
"""Load context from a text file in gcs.
Args:
gcs_file_path: The target file path; should have the 'gs://' prefix.
credentials: Optional credential to be used to load the file from gcs.
Returns:
The content of the text file as a string.
"""
gcs_service = _get_storage_service(credentials)
bucket_name, object_name = gcs_file_path[len('gs://'):].split('/', 1)
request = gcs_service.objects().get_media(
bucket=bucket_name, object=object_name)
file_handle = io.BytesIO()
downloader = MediaIoBaseDownload(file_handle, request, chunksize=1024 * 1024)
done = False
while not done:
_, done = _downloader_next_chunk(downloader)
filevalue = file_handle.getvalue()
if not isinstance(filevalue, six.string_types):
filevalue = filevalue.decode()
return six.StringIO(filevalue) | Load context from a text file in gcs.
Args:
gcs_file_path: The target file path; should have the 'gs://' prefix.
credentials: Optional credential to be used to load the file from gcs.
Returns:
The content of the text file as a string. | Below is the the instruction that describes the task:
### Input:
Load context from a text file in gcs.
Args:
gcs_file_path: The target file path; should have the 'gs://' prefix.
credentials: Optional credential to be used to load the file from gcs.
Returns:
The content of the text file as a string.
### Response:
def _load_file_from_gcs(gcs_file_path, credentials=None):
"""Load context from a text file in gcs.
Args:
gcs_file_path: The target file path; should have the 'gs://' prefix.
credentials: Optional credential to be used to load the file from gcs.
Returns:
The content of the text file as a string.
"""
gcs_service = _get_storage_service(credentials)
bucket_name, object_name = gcs_file_path[len('gs://'):].split('/', 1)
request = gcs_service.objects().get_media(
bucket=bucket_name, object=object_name)
file_handle = io.BytesIO()
downloader = MediaIoBaseDownload(file_handle, request, chunksize=1024 * 1024)
done = False
while not done:
_, done = _downloader_next_chunk(downloader)
filevalue = file_handle.getvalue()
if not isinstance(filevalue, six.string_types):
filevalue = filevalue.decode()
return six.StringIO(filevalue) |
def _binary(space,const,name1,name2):
"""
reduce the domain of variable name1 to be two-consistent (arc-consistent)
with this constraint, i.e. remove those values for the variable name1,
for which no values for name2 exist such that this pair is consistent
with the constraint
returns True if the domain of name1 was modified
"""
if not (name1 in const.vnames and name2 in const.vnames):
return False
remove = set([])
for v1 in space.domains[name1].iter_members():
for v2 in space.domains[name2].iter_members():
if const.consistent({name1 : v1, name2 : v2}):
break
else:
remove.add(v1)
if len(remove) > 0:
if space.variables[name1].discrete:
remove = DiscreteSet(remove)
else:
remove = IntervalSet.from_values(remove)
space.domains[name1] = space.domains[name1].difference(remove)
return True
else:
return False | reduce the domain of variable name1 to be two-consistent (arc-consistent)
with this constraint, i.e. remove those values for the variable name1,
for which no values for name2 exist such that this pair is consistent
with the constraint
returns True if the domain of name1 was modified | Below is the the instruction that describes the task:
### Input:
reduce the domain of variable name1 to be two-consistent (arc-consistent)
with this constraint, i.e. remove those values for the variable name1,
for which no values for name2 exist such that this pair is consistent
with the constraint
returns True if the domain of name1 was modified
### Response:
def _binary(space,const,name1,name2):
"""
reduce the domain of variable name1 to be two-consistent (arc-consistent)
with this constraint, i.e. remove those values for the variable name1,
for which no values for name2 exist such that this pair is consistent
with the constraint
returns True if the domain of name1 was modified
"""
if not (name1 in const.vnames and name2 in const.vnames):
return False
remove = set([])
for v1 in space.domains[name1].iter_members():
for v2 in space.domains[name2].iter_members():
if const.consistent({name1 : v1, name2 : v2}):
break
else:
remove.add(v1)
if len(remove) > 0:
if space.variables[name1].discrete:
remove = DiscreteSet(remove)
else:
remove = IntervalSet.from_values(remove)
space.domains[name1] = space.domains[name1].difference(remove)
return True
else:
return False |
def samples(self, gp, Y_metadata=None):
"""
Returns a set of samples of observations conditioned on a given value of latent variable f.
:param gp: latent variable
"""
orig_shape = gp.shape
gp = gp.flatten()
weibull_samples = np.array([sp.stats.weibull_min.rvs(self.r, loc=0, scale=self.gp_link.transf(f)) for f in gp])
return weibull_samples.reshape(orig_shape) | Returns a set of samples of observations conditioned on a given value of latent variable f.
:param gp: latent variable | Below is the the instruction that describes the task:
### Input:
Returns a set of samples of observations conditioned on a given value of latent variable f.
:param gp: latent variable
### Response:
def samples(self, gp, Y_metadata=None):
"""
Returns a set of samples of observations conditioned on a given value of latent variable f.
:param gp: latent variable
"""
orig_shape = gp.shape
gp = gp.flatten()
weibull_samples = np.array([sp.stats.weibull_min.rvs(self.r, loc=0, scale=self.gp_link.transf(f)) for f in gp])
return weibull_samples.reshape(orig_shape) |
def init_publisher(app):
"""Calling this with your flask app as argument is required for the
publisher decorator to work.
"""
@app.context_processor
def inject_links():
return {
'websub_self_url': stack.top.websub_self_url,
'websub_hub_url': stack.top.websub_hub_url,
'websub_self_link': stack.top.websub_self_link,
'websub_hub_link': stack.top.websub_hub_link,
} | Calling this with your flask app as argument is required for the
publisher decorator to work. | Below is the the instruction that describes the task:
### Input:
Calling this with your flask app as argument is required for the
publisher decorator to work.
### Response:
def init_publisher(app):
"""Calling this with your flask app as argument is required for the
publisher decorator to work.
"""
@app.context_processor
def inject_links():
return {
'websub_self_url': stack.top.websub_self_url,
'websub_hub_url': stack.top.websub_hub_url,
'websub_self_link': stack.top.websub_self_link,
'websub_hub_link': stack.top.websub_hub_link,
} |
def validate(self, data):
"""
Validated data using defined regex.
:param data: data to be validated
:return: return validated data.
"""
e = self._error
try:
if self._pattern.search(data):
return data
else:
raise SchemaError("%r does not match %r" % (self, data), e)
except TypeError:
raise SchemaError("%r is not string nor buffer" % data, e) | Validated data using defined regex.
:param data: data to be validated
:return: return validated data. | Below is the the instruction that describes the task:
### Input:
Validated data using defined regex.
:param data: data to be validated
:return: return validated data.
### Response:
def validate(self, data):
"""
Validated data using defined regex.
:param data: data to be validated
:return: return validated data.
"""
e = self._error
try:
if self._pattern.search(data):
return data
else:
raise SchemaError("%r does not match %r" % (self, data), e)
except TypeError:
raise SchemaError("%r is not string nor buffer" % data, e) |
def sina_download_by_vkey(vkey, title=None, output_dir='.', merge=True, info_only=False):
"""Downloads a Sina video by its unique vkey.
http://video.sina.com/
"""
url = 'http://video.sina.com/v/flvideo/%s_0.flv' % vkey
type, ext, size = url_info(url)
print_info(site_info, title, 'flv', size)
if not info_only:
download_urls([url], title, 'flv', size, output_dir = output_dir, merge = merge) | Downloads a Sina video by its unique vkey.
http://video.sina.com/ | Below is the the instruction that describes the task:
### Input:
Downloads a Sina video by its unique vkey.
http://video.sina.com/
### Response:
def sina_download_by_vkey(vkey, title=None, output_dir='.', merge=True, info_only=False):
"""Downloads a Sina video by its unique vkey.
http://video.sina.com/
"""
url = 'http://video.sina.com/v/flvideo/%s_0.flv' % vkey
type, ext, size = url_info(url)
print_info(site_info, title, 'flv', size)
if not info_only:
download_urls([url], title, 'flv', size, output_dir = output_dir, merge = merge) |
def normalize_uri_result(uri):
"""
Normalize a URI (And return a URIResult)
"""
ref = uri_reference(uri).normalize()
return ref._replace(
authority=normalize_uri_authority(ref),
query=normalize_uri_query(ref),
path=normalize_uri_path(ref),
) | Normalize a URI (And return a URIResult) | Below is the the instruction that describes the task:
### Input:
Normalize a URI (And return a URIResult)
### Response:
def normalize_uri_result(uri):
"""
Normalize a URI (And return a URIResult)
"""
ref = uri_reference(uri).normalize()
return ref._replace(
authority=normalize_uri_authority(ref),
query=normalize_uri_query(ref),
path=normalize_uri_path(ref),
) |
def _recv_callback(self, msg):
"""
Method is called when there is a message coming from a Mongrel2 server.
This message should be a valid Request String.
"""
m2req = MongrelRequest.parse(msg[0])
MongrelConnection(m2req, self._sending_stream, self.request_callback,
no_keep_alive=self.no_keep_alive, xheaders=self.xheaders) | Method is called when there is a message coming from a Mongrel2 server.
This message should be a valid Request String. | Below is the the instruction that describes the task:
### Input:
Method is called when there is a message coming from a Mongrel2 server.
This message should be a valid Request String.
### Response:
def _recv_callback(self, msg):
"""
Method is called when there is a message coming from a Mongrel2 server.
This message should be a valid Request String.
"""
m2req = MongrelRequest.parse(msg[0])
MongrelConnection(m2req, self._sending_stream, self.request_callback,
no_keep_alive=self.no_keep_alive, xheaders=self.xheaders) |
def iter_chain(cur):
"""Iterate over all of the chains in the database.
Args:
cur (:class:`sqlite3.Cursor`):
An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.
Yields:
list: The chain.
"""
select = "SELECT nodes FROM chain"
for nodes, in cur.execute(select):
yield json.loads(nodes) | Iterate over all of the chains in the database.
Args:
cur (:class:`sqlite3.Cursor`):
An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.
Yields:
list: The chain. | Below is the the instruction that describes the task:
### Input:
Iterate over all of the chains in the database.
Args:
cur (:class:`sqlite3.Cursor`):
An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.
Yields:
list: The chain.
### Response:
def iter_chain(cur):
"""Iterate over all of the chains in the database.
Args:
cur (:class:`sqlite3.Cursor`):
An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.
Yields:
list: The chain.
"""
select = "SELECT nodes FROM chain"
for nodes, in cur.execute(select):
yield json.loads(nodes) |
def distance_to_nearest_place(feature, parent):
"""If the impact layer has a distance field, it will return the distance to
the nearest place in metres.
e.g. distance_to_nearest_place() -> 1234
"""
_ = feature, parent # NOQA
layer = exposure_summary_layer()
if not layer:
return None
index = layer.fields().lookupField(
distance_field['field_name'])
if index < 0:
return None
feature = next(layer.getFeatures())
return feature[index] | If the impact layer has a distance field, it will return the distance to
the nearest place in metres.
e.g. distance_to_nearest_place() -> 1234 | Below is the the instruction that describes the task:
### Input:
If the impact layer has a distance field, it will return the distance to
the nearest place in metres.
e.g. distance_to_nearest_place() -> 1234
### Response:
def distance_to_nearest_place(feature, parent):
"""If the impact layer has a distance field, it will return the distance to
the nearest place in metres.
e.g. distance_to_nearest_place() -> 1234
"""
_ = feature, parent # NOQA
layer = exposure_summary_layer()
if not layer:
return None
index = layer.fields().lookupField(
distance_field['field_name'])
if index < 0:
return None
feature = next(layer.getFeatures())
return feature[index] |
def _import_matplotlib():
"""Import matplotlib safely."""
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('agg')
matplotlib_backend = matplotlib.get_backend().lower()
if matplotlib_backend != 'agg':
raise ValueError(
"Sphinx-Gallery relies on the matplotlib 'agg' backend to "
"render figures and write them to files. You are "
"currently using the {} backend. Sphinx-Gallery will "
"terminate the build now, because changing backends is "
"not well supported by matplotlib. We advise you to move "
"sphinx_gallery imports before any matplotlib-dependent "
"import. Moving sphinx_gallery imports at the top of "
"your conf.py file should fix this issue"
.format(matplotlib_backend))
import matplotlib.pyplot as plt
return matplotlib, plt | Import matplotlib safely. | Below is the the instruction that describes the task:
### Input:
Import matplotlib safely.
### Response:
def _import_matplotlib():
"""Import matplotlib safely."""
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('agg')
matplotlib_backend = matplotlib.get_backend().lower()
if matplotlib_backend != 'agg':
raise ValueError(
"Sphinx-Gallery relies on the matplotlib 'agg' backend to "
"render figures and write them to files. You are "
"currently using the {} backend. Sphinx-Gallery will "
"terminate the build now, because changing backends is "
"not well supported by matplotlib. We advise you to move "
"sphinx_gallery imports before any matplotlib-dependent "
"import. Moving sphinx_gallery imports at the top of "
"your conf.py file should fix this issue"
.format(matplotlib_backend))
import matplotlib.pyplot as plt
return matplotlib, plt |
def _structure_set(self, obj, cl):
"""Convert an iterable into a potentially generic set."""
if is_bare(cl) or cl.__args__[0] is Any:
return set(obj)
else:
elem_type = cl.__args__[0]
return {
self._structure_func.dispatch(elem_type)(e, elem_type)
for e in obj
} | Convert an iterable into a potentially generic set. | Below is the the instruction that describes the task:
### Input:
Convert an iterable into a potentially generic set.
### Response:
def _structure_set(self, obj, cl):
"""Convert an iterable into a potentially generic set."""
if is_bare(cl) or cl.__args__[0] is Any:
return set(obj)
else:
elem_type = cl.__args__[0]
return {
self._structure_func.dispatch(elem_type)(e, elem_type)
for e in obj
} |
def send_metrics_to_cloudwatch(self, rule, metric, dimensions):
"""
Send metrics to CloudWatch for the given dimensions
"""
timestamp = datetime.datetime.utcfromtimestamp(metric.timestamp)
self.log.debug(
"CloudWatch: Attempting to publish metric: %s to %s "
"with value (%s) for dimensions %s @%s",
rule['name'],
rule['namespace'],
str(metric.value),
str(dimensions),
str(metric.timestamp)
)
try:
self.connection.put_metric_data(
str(rule['namespace']),
str(rule['name']),
str(metric.value),
timestamp, str(rule['unit']),
dimensions)
self.log.debug(
"CloudWatch: Successfully published metric: %s to"
" %s with value (%s) for dimensions %s",
rule['name'],
rule['namespace'],
str(metric.value),
str(dimensions))
except AttributeError as e:
self.log.error(
"CloudWatch: Failed publishing - %s ", str(e))
except Exception as e: # Rough connection re-try logic.
self.log.error(
"CloudWatch: Failed publishing - %s\n%s ",
str(e),
str(sys.exc_info()[0]))
self._bind() | Send metrics to CloudWatch for the given dimensions | Below is the the instruction that describes the task:
### Input:
Send metrics to CloudWatch for the given dimensions
### Response:
def send_metrics_to_cloudwatch(self, rule, metric, dimensions):
"""
Send metrics to CloudWatch for the given dimensions
"""
timestamp = datetime.datetime.utcfromtimestamp(metric.timestamp)
self.log.debug(
"CloudWatch: Attempting to publish metric: %s to %s "
"with value (%s) for dimensions %s @%s",
rule['name'],
rule['namespace'],
str(metric.value),
str(dimensions),
str(metric.timestamp)
)
try:
self.connection.put_metric_data(
str(rule['namespace']),
str(rule['name']),
str(metric.value),
timestamp, str(rule['unit']),
dimensions)
self.log.debug(
"CloudWatch: Successfully published metric: %s to"
" %s with value (%s) for dimensions %s",
rule['name'],
rule['namespace'],
str(metric.value),
str(dimensions))
except AttributeError as e:
self.log.error(
"CloudWatch: Failed publishing - %s ", str(e))
except Exception as e: # Rough connection re-try logic.
self.log.error(
"CloudWatch: Failed publishing - %s\n%s ",
str(e),
str(sys.exc_info()[0]))
self._bind() |
def run_query(self, query, new_line=True):
"""Runs *query*."""
if (self.destructive_warning and
confirm_destructive_query(query) is False):
message = 'Wise choice. Command execution stopped.'
click.echo(message)
return
results = self.sqlexecute.run(query)
for result in results:
title, rows, headers, _ = result
self.formatter.query = query
output = self.format_output(title, rows, headers)
for line in output:
click.echo(line, nl=new_line) | Runs *query*. | Below is the the instruction that describes the task:
### Input:
Runs *query*.
### Response:
def run_query(self, query, new_line=True):
"""Runs *query*."""
if (self.destructive_warning and
confirm_destructive_query(query) is False):
message = 'Wise choice. Command execution stopped.'
click.echo(message)
return
results = self.sqlexecute.run(query)
for result in results:
title, rows, headers, _ = result
self.formatter.query = query
output = self.format_output(title, rows, headers)
for line in output:
click.echo(line, nl=new_line) |
def awsRetry(f):
"""
This decorator retries the wrapped function if aws throws unexpected errors
errors.
It should wrap any function that makes use of boto
"""
@wraps(f)
def wrapper(*args, **kwargs):
for attempt in retry(delays=truncExpBackoff(),
timeout=300,
predicate=awsRetryPredicate):
with attempt:
return f(*args, **kwargs)
return wrapper | This decorator retries the wrapped function if aws throws unexpected errors
errors.
It should wrap any function that makes use of boto | Below is the the instruction that describes the task:
### Input:
This decorator retries the wrapped function if aws throws unexpected errors
errors.
It should wrap any function that makes use of boto
### Response:
def awsRetry(f):
"""
This decorator retries the wrapped function if aws throws unexpected errors
errors.
It should wrap any function that makes use of boto
"""
@wraps(f)
def wrapper(*args, **kwargs):
for attempt in retry(delays=truncExpBackoff(),
timeout=300,
predicate=awsRetryPredicate):
with attempt:
return f(*args, **kwargs)
return wrapper |
def read_remote(self):
'''Send a message back to the server (in contrast to
the local user output channel).'''
coded_line = self.inout.read_msg()
if isinstance(coded_line, bytes):
coded_line = coded_line.decode("utf-8")
control = coded_line[0]
remote_line = coded_line[1:]
return (control, remote_line) | Send a message back to the server (in contrast to
the local user output channel). | Below is the the instruction that describes the task:
### Input:
Send a message back to the server (in contrast to
the local user output channel).
### Response:
def read_remote(self):
'''Send a message back to the server (in contrast to
the local user output channel).'''
coded_line = self.inout.read_msg()
if isinstance(coded_line, bytes):
coded_line = coded_line.decode("utf-8")
control = coded_line[0]
remote_line = coded_line[1:]
return (control, remote_line) |
def parse_ref(self, field: Field) -> str:
"""
Parse the reference type for nested fields, if any.
"""
ref_name = type_name(name_for(field.schema))
return f"#/definitions/{ref_name}" | Parse the reference type for nested fields, if any. | Below is the the instruction that describes the task:
### Input:
Parse the reference type for nested fields, if any.
### Response:
def parse_ref(self, field: Field) -> str:
"""
Parse the reference type for nested fields, if any.
"""
ref_name = type_name(name_for(field.schema))
return f"#/definitions/{ref_name}" |
def decompose(self, frequency, window=None, periodic=False):
'''Use STL to decompose the time series into seasonal, trend, and
residual components.'''
R = LazyImport.rpy2()
if periodic:
window = 'periodic'
elif window is None:
window = frequency
timestamps = self.timestamps
series = LazyImport.numpy().array(self.values)
length = len(series)
series = R.ts(series, frequency=frequency)
kwargs = { 's.window': window }
decomposed = R.robjects.r['stl'](series, **kwargs).rx2('time.series')
decomposed = [ row for row in decomposed ]
seasonal = decomposed[0:length]
trend = decomposed[length:2*length]
residual = decomposed[2*length:3*length]
seasonal = TimeSeries(zip(timestamps, seasonal))
trend = TimeSeries(zip(timestamps, trend))
residual = TimeSeries(zip(timestamps, residual))
return DataFrame(seasonal=seasonal, trend=trend, residual=residual) | Use STL to decompose the time series into seasonal, trend, and
residual components. | Below is the the instruction that describes the task:
### Input:
Use STL to decompose the time series into seasonal, trend, and
residual components.
### Response:
def decompose(self, frequency, window=None, periodic=False):
'''Use STL to decompose the time series into seasonal, trend, and
residual components.'''
R = LazyImport.rpy2()
if periodic:
window = 'periodic'
elif window is None:
window = frequency
timestamps = self.timestamps
series = LazyImport.numpy().array(self.values)
length = len(series)
series = R.ts(series, frequency=frequency)
kwargs = { 's.window': window }
decomposed = R.robjects.r['stl'](series, **kwargs).rx2('time.series')
decomposed = [ row for row in decomposed ]
seasonal = decomposed[0:length]
trend = decomposed[length:2*length]
residual = decomposed[2*length:3*length]
seasonal = TimeSeries(zip(timestamps, seasonal))
trend = TimeSeries(zip(timestamps, trend))
residual = TimeSeries(zip(timestamps, residual))
return DataFrame(seasonal=seasonal, trend=trend, residual=residual) |
def update_database(self, instance_id, database_id, ddl_statements,
project_id=None,
operation_id=None):
"""
Updates DDL of a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:param operation_id: (Optional) The unique per database operation ID that can be
specified to implement idempotency check.
:type operation_id: str
:return: None
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
try:
operation = database.update_ddl(
ddl_statements=ddl_statements, operation_id=operation_id)
if operation:
result = operation.result()
self.log.info(result)
return
except AlreadyExists as e:
if e.code == 409 and operation_id in e.message:
self.log.info("Replayed update_ddl message - the operation id %s "
"was already done before.", operation_id)
return
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e | Updates DDL of a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:param operation_id: (Optional) The unique per database operation ID that can be
specified to implement idempotency check.
:type operation_id: str
:return: None | Below is the the instruction that describes the task:
### Input:
Updates DDL of a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:param operation_id: (Optional) The unique per database operation ID that can be
specified to implement idempotency check.
:type operation_id: str
:return: None
### Response:
def update_database(self, instance_id, database_id, ddl_statements,
project_id=None,
operation_id=None):
"""
Updates DDL of a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:param operation_id: (Optional) The unique per database operation ID that can be
specified to implement idempotency check.
:type operation_id: str
:return: None
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
try:
operation = database.update_ddl(
ddl_statements=ddl_statements, operation_id=operation_id)
if operation:
result = operation.result()
self.log.info(result)
return
except AlreadyExists as e:
if e.code == 409 and operation_id in e.message:
self.log.info("Replayed update_ddl message - the operation id %s "
"was already done before.", operation_id)
return
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e |
def install_cub(mb_inc_path):
""" Downloads and installs cub into mb_inc_path """
cub_url = 'https://github.com/NVlabs/cub/archive/1.6.4.zip'
cub_sha_hash = '0d5659200132c2576be0b3959383fa756de6105d'
cub_version_str = 'Current release: v1.6.4 (12/06/2016)'
cub_zip_file = 'cub.zip'
cub_zip_dir = 'cub-1.6.4'
cub_unzipped_path = os.path.join(mb_inc_path, cub_zip_dir)
cub_new_unzipped_path = os.path.join(mb_inc_path, 'cub')
cub_header = os.path.join(cub_new_unzipped_path, 'cub', 'cub.cuh')
cub_readme = os.path.join(cub_new_unzipped_path, 'README.md' )
# Check for a reasonably valid install
cub_installed, _ = is_cub_installed(cub_readme, cub_header, cub_version_str)
if cub_installed:
log.info("NVIDIA cub installation found "
"at '{}'".format(cub_new_unzipped_path))
return
log.info("No NVIDIA cub installation found")
# Do we already have a valid cub zip file
have_valid_cub_file = (os.path.exists(cub_zip_file) and
os.path.isfile(cub_zip_file) and
sha_hash_file(cub_zip_file) == cub_sha_hash)
if have_valid_cub_file:
log.info("Valid NVIDIA cub archive found '{}'".format(cub_zip_file))
# Download if we don't have a valid file
else:
log.info("Downloading cub archive '{}'".format(cub_url))
dl_cub(cub_url, cub_zip_file)
cub_file_sha_hash = sha_hash_file(cub_zip_file)
# Compare against our supplied hash
if cub_sha_hash != cub_file_sha_hash:
msg = ('Hash of file %s downloaded from %s '
'is %s and does not match the expected '
'hash of %s. Please manually download '
'as per the README.md instructions.') % (
cub_zip_file, cub_url,
cub_file_sha_hash, cub_sha_hash)
raise InstallCubException(msg)
# Unzip into montblanc/include/cub
with zipfile.ZipFile(cub_zip_file, 'r') as zip_file:
# Remove any existing installs
shutil.rmtree(cub_unzipped_path, ignore_errors=True)
shutil.rmtree(cub_new_unzipped_path, ignore_errors=True)
# Unzip
zip_file.extractall(mb_inc_path)
# Rename. cub_unzipped_path is mb_inc_path/cub_zip_dir
shutil.move(cub_unzipped_path, cub_new_unzipped_path)
log.info("NVIDIA cub archive unzipped into '{}'".format(
cub_new_unzipped_path))
there, reason = is_cub_installed(cub_readme, cub_header, cub_version_str)
if not there:
raise InstallCubException(reason) | Downloads and installs cub into mb_inc_path | Below is the the instruction that describes the task:
### Input:
Downloads and installs cub into mb_inc_path
### Response:
def install_cub(mb_inc_path):
""" Downloads and installs cub into mb_inc_path """
cub_url = 'https://github.com/NVlabs/cub/archive/1.6.4.zip'
cub_sha_hash = '0d5659200132c2576be0b3959383fa756de6105d'
cub_version_str = 'Current release: v1.6.4 (12/06/2016)'
cub_zip_file = 'cub.zip'
cub_zip_dir = 'cub-1.6.4'
cub_unzipped_path = os.path.join(mb_inc_path, cub_zip_dir)
cub_new_unzipped_path = os.path.join(mb_inc_path, 'cub')
cub_header = os.path.join(cub_new_unzipped_path, 'cub', 'cub.cuh')
cub_readme = os.path.join(cub_new_unzipped_path, 'README.md' )
# Check for a reasonably valid install
cub_installed, _ = is_cub_installed(cub_readme, cub_header, cub_version_str)
if cub_installed:
log.info("NVIDIA cub installation found "
"at '{}'".format(cub_new_unzipped_path))
return
log.info("No NVIDIA cub installation found")
# Do we already have a valid cub zip file
have_valid_cub_file = (os.path.exists(cub_zip_file) and
os.path.isfile(cub_zip_file) and
sha_hash_file(cub_zip_file) == cub_sha_hash)
if have_valid_cub_file:
log.info("Valid NVIDIA cub archive found '{}'".format(cub_zip_file))
# Download if we don't have a valid file
else:
log.info("Downloading cub archive '{}'".format(cub_url))
dl_cub(cub_url, cub_zip_file)
cub_file_sha_hash = sha_hash_file(cub_zip_file)
# Compare against our supplied hash
if cub_sha_hash != cub_file_sha_hash:
msg = ('Hash of file %s downloaded from %s '
'is %s and does not match the expected '
'hash of %s. Please manually download '
'as per the README.md instructions.') % (
cub_zip_file, cub_url,
cub_file_sha_hash, cub_sha_hash)
raise InstallCubException(msg)
# Unzip into montblanc/include/cub
with zipfile.ZipFile(cub_zip_file, 'r') as zip_file:
# Remove any existing installs
shutil.rmtree(cub_unzipped_path, ignore_errors=True)
shutil.rmtree(cub_new_unzipped_path, ignore_errors=True)
# Unzip
zip_file.extractall(mb_inc_path)
# Rename. cub_unzipped_path is mb_inc_path/cub_zip_dir
shutil.move(cub_unzipped_path, cub_new_unzipped_path)
log.info("NVIDIA cub archive unzipped into '{}'".format(
cub_new_unzipped_path))
there, reason = is_cub_installed(cub_readme, cub_header, cub_version_str)
if not there:
raise InstallCubException(reason) |
def library_directories(self) -> typing.List[str]:
"""
The list of directories to all of the library locations
"""
def listify(value):
return [value] if isinstance(value, str) else list(value)
# If this is a project running remotely remove external library
# folders as the remote shared libraries folder will contain all
# of the necessary dependencies
is_local_project = not self.is_remote_project
folders = [
f
for f in listify(self.settings.fetch('library_folders', ['libs']))
if is_local_project or not f.startswith('..')
]
# Include the remote shared library folder as well
folders.append('../__cauldron_shared_libs')
# Include the project directory as well
folders.append(self.source_directory)
return [
environ.paths.clean(os.path.join(self.source_directory, folder))
for folder in folders
] | The list of directories to all of the library locations | Below is the the instruction that describes the task:
### Input:
The list of directories to all of the library locations
### Response:
def library_directories(self) -> typing.List[str]:
"""
The list of directories to all of the library locations
"""
def listify(value):
return [value] if isinstance(value, str) else list(value)
# If this is a project running remotely remove external library
# folders as the remote shared libraries folder will contain all
# of the necessary dependencies
is_local_project = not self.is_remote_project
folders = [
f
for f in listify(self.settings.fetch('library_folders', ['libs']))
if is_local_project or not f.startswith('..')
]
# Include the remote shared library folder as well
folders.append('../__cauldron_shared_libs')
# Include the project directory as well
folders.append(self.source_directory)
return [
environ.paths.clean(os.path.join(self.source_directory, folder))
for folder in folders
] |
def cmd(send, _, args):
"""Returns a list of admins.
V = Verified (authed to NickServ), U = Unverified.
Syntax: {command}
"""
adminlist = []
for admin in args['db'].query(Permissions).order_by(Permissions.nick).all():
if admin.registered:
adminlist.append("%s (V)" % admin.nick)
else:
adminlist.append("%s (U)" % admin.nick)
send(", ".join(adminlist), target=args['nick']) | Returns a list of admins.
V = Verified (authed to NickServ), U = Unverified.
Syntax: {command} | Below is the the instruction that describes the task:
### Input:
Returns a list of admins.
V = Verified (authed to NickServ), U = Unverified.
Syntax: {command}
### Response:
def cmd(send, _, args):
"""Returns a list of admins.
V = Verified (authed to NickServ), U = Unverified.
Syntax: {command}
"""
adminlist = []
for admin in args['db'].query(Permissions).order_by(Permissions.nick).all():
if admin.registered:
adminlist.append("%s (V)" % admin.nick)
else:
adminlist.append("%s (U)" % admin.nick)
send(", ".join(adminlist), target=args['nick']) |
def parse_plotCoverage(self):
"""Find plotCoverage output. Both stdout and --outRawCounts"""
self.deeptools_plotCoverageStdout = dict()
for f in self.find_log_files('deeptools/plotCoverageStdout'):
parsed_data = self.parsePlotCoverageStdout(f)
for k, v in parsed_data.items():
if k in self.deeptools_plotCoverageStdout:
log.warning("Replacing duplicate sample {}.".format(k))
self.deeptools_plotCoverageStdout[k] = v
if len(parsed_data) > 0:
self.add_data_source(f, section='plotCoverage')
self.deeptools_plotCoverageOutRawCounts= dict()
for f in self.find_log_files('deeptools/plotCoverageOutRawCounts'):
parsed_data = self.parsePlotCoverageOutRawCounts(f)
for k, v in parsed_data.items():
if k in self.deeptools_plotCoverageOutRawCounts:
log.warning("Replacing duplicate sample {}.".format(k))
self.deeptools_plotCoverageOutRawCounts[k] = v
if len(parsed_data) > 0:
self.add_data_source(f, section='plotCoverage')
if len(self.deeptools_plotCoverageStdout) > 0:
header = OrderedDict()
header["min"] = {
'title': 'Min',
'description': 'Minimum Coverage',
'shared_key': 'coverage'
}
header["25%"] = {
'rid': 'first_quartile',
'title': '1st Quartile',
'description': 'First quartile coverage',
'shared_key': 'coverage'
}
header["50%"] = {
'rid': 'median',
'title': 'Median',
'description': 'Median coverage (second quartile)',
'shared_key': 'coverage'
}
header["mean"] = {
'title': 'Mean',
'description': 'Mean coverage',
'shared_key': 'coverage'
}
header["75%"] = {
'rid': 'third_quartile',
'title': '3rd Quartile',
'description': 'Third quartile coverage',
'shared_key': 'coverage'
}
header["max"] = {
'title': 'Max',
'description': 'Maximum coverage',
'shared_key': 'coverage'
}
header["std"] = {
'title': 'Std. Dev.',
'description': 'Coverage standard deviation',
'shared_key': 'coverage'
}
config = {'namespace': 'deepTools plotCoverage'}
self.add_section(
name = "Coverage metrics",
anchor = "deeptools_coverage_metrics",
plot = table.plot(self.deeptools_plotCoverageStdout, header, config)
)
if len(self.deeptools_plotCoverageOutRawCounts) > 0:
config = {
'id': 'deeptools_coverage_metrics_plot',
'title': 'deepTools: Coverage distribution',
'xlab': 'Coverage',
'ylab': 'Fraction of bases sampled'
}
self.add_section(
name = "Coverage distribution",
anchor = "deeptools_coverage_distribution",
description = "The fraction of bases with a given number of read/fragment coverage",
plot = linegraph.plot(self.deeptools_plotCoverageOutRawCounts, config)
)
return len(self.deeptools_plotCoverageStdout), len(self.deeptools_plotCoverageOutRawCounts) | Find plotCoverage output. Both stdout and --outRawCounts | Below is the the instruction that describes the task:
### Input:
Find plotCoverage output. Both stdout and --outRawCounts
### Response:
def parse_plotCoverage(self):
"""Find plotCoverage output. Both stdout and --outRawCounts"""
self.deeptools_plotCoverageStdout = dict()
for f in self.find_log_files('deeptools/plotCoverageStdout'):
parsed_data = self.parsePlotCoverageStdout(f)
for k, v in parsed_data.items():
if k in self.deeptools_plotCoverageStdout:
log.warning("Replacing duplicate sample {}.".format(k))
self.deeptools_plotCoverageStdout[k] = v
if len(parsed_data) > 0:
self.add_data_source(f, section='plotCoverage')
self.deeptools_plotCoverageOutRawCounts= dict()
for f in self.find_log_files('deeptools/plotCoverageOutRawCounts'):
parsed_data = self.parsePlotCoverageOutRawCounts(f)
for k, v in parsed_data.items():
if k in self.deeptools_plotCoverageOutRawCounts:
log.warning("Replacing duplicate sample {}.".format(k))
self.deeptools_plotCoverageOutRawCounts[k] = v
if len(parsed_data) > 0:
self.add_data_source(f, section='plotCoverage')
if len(self.deeptools_plotCoverageStdout) > 0:
header = OrderedDict()
header["min"] = {
'title': 'Min',
'description': 'Minimum Coverage',
'shared_key': 'coverage'
}
header["25%"] = {
'rid': 'first_quartile',
'title': '1st Quartile',
'description': 'First quartile coverage',
'shared_key': 'coverage'
}
header["50%"] = {
'rid': 'median',
'title': 'Median',
'description': 'Median coverage (second quartile)',
'shared_key': 'coverage'
}
header["mean"] = {
'title': 'Mean',
'description': 'Mean coverage',
'shared_key': 'coverage'
}
header["75%"] = {
'rid': 'third_quartile',
'title': '3rd Quartile',
'description': 'Third quartile coverage',
'shared_key': 'coverage'
}
header["max"] = {
'title': 'Max',
'description': 'Maximum coverage',
'shared_key': 'coverage'
}
header["std"] = {
'title': 'Std. Dev.',
'description': 'Coverage standard deviation',
'shared_key': 'coverage'
}
config = {'namespace': 'deepTools plotCoverage'}
self.add_section(
name = "Coverage metrics",
anchor = "deeptools_coverage_metrics",
plot = table.plot(self.deeptools_plotCoverageStdout, header, config)
)
if len(self.deeptools_plotCoverageOutRawCounts) > 0:
config = {
'id': 'deeptools_coverage_metrics_plot',
'title': 'deepTools: Coverage distribution',
'xlab': 'Coverage',
'ylab': 'Fraction of bases sampled'
}
self.add_section(
name = "Coverage distribution",
anchor = "deeptools_coverage_distribution",
description = "The fraction of bases with a given number of read/fragment coverage",
plot = linegraph.plot(self.deeptools_plotCoverageOutRawCounts, config)
)
return len(self.deeptools_plotCoverageStdout), len(self.deeptools_plotCoverageOutRawCounts) |
def unassign_agent_from_resource(self, agent_id, resource_id):
"""Removes an ``Agent`` from a ``Resource``.
arg: agent_id (osid.id.Id): the ``Id`` of the ``Agent``
arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource``
raise: NotFound - ``agent_id`` or ``resource_id`` not found or
``agent_id`` not assigned to ``resource_id``
raise: NullArgument - ``agent_id`` or ``resource_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('resource',
collection='Resource',
runtime=self._runtime)
resource = collection.find_one({'_id': ObjectId(resource_id.get_identifier())})
try:
resource['agentIds'].remove(str(agent_id))
except (KeyError, ValueError):
raise errors.NotFound('agent_id not assigned to resource')
collection.save(resource) | Removes an ``Agent`` from a ``Resource``.
arg: agent_id (osid.id.Id): the ``Id`` of the ``Agent``
arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource``
raise: NotFound - ``agent_id`` or ``resource_id`` not found or
``agent_id`` not assigned to ``resource_id``
raise: NullArgument - ``agent_id`` or ``resource_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Removes an ``Agent`` from a ``Resource``.
arg: agent_id (osid.id.Id): the ``Id`` of the ``Agent``
arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource``
raise: NotFound - ``agent_id`` or ``resource_id`` not found or
``agent_id`` not assigned to ``resource_id``
raise: NullArgument - ``agent_id`` or ``resource_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def unassign_agent_from_resource(self, agent_id, resource_id):
"""Removes an ``Agent`` from a ``Resource``.
arg: agent_id (osid.id.Id): the ``Id`` of the ``Agent``
arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource``
raise: NotFound - ``agent_id`` or ``resource_id`` not found or
``agent_id`` not assigned to ``resource_id``
raise: NullArgument - ``agent_id`` or ``resource_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('resource',
collection='Resource',
runtime=self._runtime)
resource = collection.find_one({'_id': ObjectId(resource_id.get_identifier())})
try:
resource['agentIds'].remove(str(agent_id))
except (KeyError, ValueError):
raise errors.NotFound('agent_id not assigned to resource')
collection.save(resource) |
def parse_config(options):
"""
Get settings from config file.
"""
if os.path.exists(options.config):
config = ConfigParser.ConfigParser()
try:
config.read(options.config)
except Exception, err:
if not options.quiet:
sys.stderr.write("ERROR: Config file read {config} error. {err}".format(config=options.config, err=err))
sys.exit(-1)
try:
configdata = {
"secrets": config.get("GOOGLE", "secrets"),
"credentials": config.get("nagios-notification-google-calendar", "credentials"),
"start": config.get("nagios-notification-google-calendar", "start"),
"end": config.get("nagios-notification-google-calendar", "end"),
"message": config.get("nagios-notification-google-calendar", "message"),
}
except ConfigParser.NoOptionError, err:
if not options.quiet:
sys.stderr.write("ERROR: Config file missing option error. {err}\n".format(err=err))
sys.exit(-1)
# check mandatory config options supplied
mandatories = ["secrets", "credentials", "start", "end", "message", ]
if not all(configdata[mandatory] for mandatory in mandatories):
if not options.quiet:
sys.stdout.write("Mandatory config option missing\n")
sys.exit(0)
return configdata
else:
if not options.quiet:
sys.stderr.write("ERROR: Config file {config} does not exist\n".format(config=options.config))
sys.exit(0) | Get settings from config file. | Below is the the instruction that describes the task:
### Input:
Get settings from config file.
### Response:
def parse_config(options):
"""
Get settings from config file.
"""
if os.path.exists(options.config):
config = ConfigParser.ConfigParser()
try:
config.read(options.config)
except Exception, err:
if not options.quiet:
sys.stderr.write("ERROR: Config file read {config} error. {err}".format(config=options.config, err=err))
sys.exit(-1)
try:
configdata = {
"secrets": config.get("GOOGLE", "secrets"),
"credentials": config.get("nagios-notification-google-calendar", "credentials"),
"start": config.get("nagios-notification-google-calendar", "start"),
"end": config.get("nagios-notification-google-calendar", "end"),
"message": config.get("nagios-notification-google-calendar", "message"),
}
except ConfigParser.NoOptionError, err:
if not options.quiet:
sys.stderr.write("ERROR: Config file missing option error. {err}\n".format(err=err))
sys.exit(-1)
# check mandatory config options supplied
mandatories = ["secrets", "credentials", "start", "end", "message", ]
if not all(configdata[mandatory] for mandatory in mandatories):
if not options.quiet:
sys.stdout.write("Mandatory config option missing\n")
sys.exit(0)
return configdata
else:
if not options.quiet:
sys.stderr.write("ERROR: Config file {config} does not exist\n".format(config=options.config))
sys.exit(0) |
def camel_2_snake(name):
"Converts CamelCase to camel_case"
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() | Converts CamelCase to camel_case | Below is the the instruction that describes the task:
### Input:
Converts CamelCase to camel_case
### Response:
def camel_2_snake(name):
"Converts CamelCase to camel_case"
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() |
def list(self, **params):
"""
Retrieve all sources
Returns all lead sources available to the user according to the parameters provided
:calls: ``get /lead_sources``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of LeadSources.
:rtype: list
"""
_, _, lead_sources = self.http_client.get("/lead_sources", params=params)
return lead_sources | Retrieve all sources
Returns all lead sources available to the user according to the parameters provided
:calls: ``get /lead_sources``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of LeadSources.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Retrieve all sources
Returns all lead sources available to the user according to the parameters provided
:calls: ``get /lead_sources``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of LeadSources.
:rtype: list
### Response:
def list(self, **params):
"""
Retrieve all sources
Returns all lead sources available to the user according to the parameters provided
:calls: ``get /lead_sources``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of LeadSources.
:rtype: list
"""
_, _, lead_sources = self.http_client.get("/lead_sources", params=params)
return lead_sources |
def _select_best_remaining_qubit(self, prog_qubit):
"""
Select the best remaining hardware qubit for the next program qubit.
"""
reliab_store = {}
for hw_qubit in self.available_hw_qubits:
reliab = 1
for n in self.prog_graph.neighbors(prog_qubit):
if n in self.prog2hw:
reliab *= self.swap_costs[self.prog2hw[n]][hw_qubit]
reliab *= self.readout_errors[hw_qubit]
reliab_store[hw_qubit] = reliab
max_reliab = 0
best_hw_qubit = None
for hw_qubit in reliab_store:
if reliab_store[hw_qubit] > max_reliab:
max_reliab = reliab_store[hw_qubit]
best_hw_qubit = hw_qubit
return best_hw_qubit | Select the best remaining hardware qubit for the next program qubit. | Below is the the instruction that describes the task:
### Input:
Select the best remaining hardware qubit for the next program qubit.
### Response:
def _select_best_remaining_qubit(self, prog_qubit):
"""
Select the best remaining hardware qubit for the next program qubit.
"""
reliab_store = {}
for hw_qubit in self.available_hw_qubits:
reliab = 1
for n in self.prog_graph.neighbors(prog_qubit):
if n in self.prog2hw:
reliab *= self.swap_costs[self.prog2hw[n]][hw_qubit]
reliab *= self.readout_errors[hw_qubit]
reliab_store[hw_qubit] = reliab
max_reliab = 0
best_hw_qubit = None
for hw_qubit in reliab_store:
if reliab_store[hw_qubit] > max_reliab:
max_reliab = reliab_store[hw_qubit]
best_hw_qubit = hw_qubit
return best_hw_qubit |
def initial_security_hash(self, timestamp):
"""
Generate the initial security hash from self.content_object
and a (unix) timestamp.
"""
initial_security_dict = {
'content_type': str(self.target_object._meta),
'object_pk': str(self.target_object._get_pk_val()),
'timestamp': str(timestamp),
}
return self.generate_security_hash(**initial_security_dict) | Generate the initial security hash from self.content_object
and a (unix) timestamp. | Below is the the instruction that describes the task:
### Input:
Generate the initial security hash from self.content_object
and a (unix) timestamp.
### Response:
def initial_security_hash(self, timestamp):
"""
Generate the initial security hash from self.content_object
and a (unix) timestamp.
"""
initial_security_dict = {
'content_type': str(self.target_object._meta),
'object_pk': str(self.target_object._get_pk_val()),
'timestamp': str(timestamp),
}
return self.generate_security_hash(**initial_security_dict) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.