code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def save_family(self, family_form, *args, **kwargs):
"""Pass through to provider FamilyAdminSession.update_family"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.update_bin
if family_form.is_for_update():
return self.update_family(family_form, *args, **kwargs)
else:
return self.create_family(family_form, *args, **kwargs) | Pass through to provider FamilyAdminSession.update_family | Below is the the instruction that describes the task:
### Input:
Pass through to provider FamilyAdminSession.update_family
### Response:
def save_family(self, family_form, *args, **kwargs):
"""Pass through to provider FamilyAdminSession.update_family"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.update_bin
if family_form.is_for_update():
return self.update_family(family_form, *args, **kwargs)
else:
return self.create_family(family_form, *args, **kwargs) |
def fill_subparser(subparser):
"""Sets up a subparser to convert the MNIST dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command.
"""
subparser.add_argument(
"--dtype", help="dtype to save to; by default, images will be " +
"returned in their original unsigned byte format",
choices=('float32', 'float64', 'bool'), type=str, default=None)
return convert_mnist | Sets up a subparser to convert the MNIST dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command. | Below is the the instruction that describes the task:
### Input:
Sets up a subparser to convert the MNIST dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command.
### Response:
def fill_subparser(subparser):
"""Sets up a subparser to convert the MNIST dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command.
"""
subparser.add_argument(
"--dtype", help="dtype to save to; by default, images will be " +
"returned in their original unsigned byte format",
choices=('float32', 'float64', 'bool'), type=str, default=None)
return convert_mnist |
def _get_registered_executable(exe_name):
"""Windows allow application paths to be registered in the registry."""
registered = None
if sys.platform.startswith('win'):
if os.path.splitext(exe_name)[1].lower() != '.exe':
exe_name += '.exe'
import _winreg # pylint: disable=import-error
try:
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" + exe_name
value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key)
registered = (value, "from HKLM\\"+key)
except _winreg.error:
pass
if registered and not os.path.exists(registered[0]):
registered = None
return registered | Windows allow application paths to be registered in the registry. | Below is the the instruction that describes the task:
### Input:
Windows allow application paths to be registered in the registry.
### Response:
def _get_registered_executable(exe_name):
"""Windows allow application paths to be registered in the registry."""
registered = None
if sys.platform.startswith('win'):
if os.path.splitext(exe_name)[1].lower() != '.exe':
exe_name += '.exe'
import _winreg # pylint: disable=import-error
try:
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" + exe_name
value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key)
registered = (value, "from HKLM\\"+key)
except _winreg.error:
pass
if registered and not os.path.exists(registered[0]):
registered = None
return registered |
def encode_w_salt(self, salt, key):
"""Encodes a user key with salt into a particular format. The result of
this method will be used internally.
:param salt: Salt for hashing
:param key: User's secret key
:returns: A string representing user credentials
"""
enc_key = '%s%s' % (salt, key)
enc_val = hashlib.sha1(enc_key).hexdigest()
return "sha1:%s$%s" % (salt, enc_val) | Encodes a user key with salt into a particular format. The result of
this method will be used internally.
:param salt: Salt for hashing
:param key: User's secret key
:returns: A string representing user credentials | Below is the the instruction that describes the task:
### Input:
Encodes a user key with salt into a particular format. The result of
this method will be used internally.
:param salt: Salt for hashing
:param key: User's secret key
:returns: A string representing user credentials
### Response:
def encode_w_salt(self, salt, key):
"""Encodes a user key with salt into a particular format. The result of
this method will be used internally.
:param salt: Salt for hashing
:param key: User's secret key
:returns: A string representing user credentials
"""
enc_key = '%s%s' % (salt, key)
enc_val = hashlib.sha1(enc_key).hexdigest()
return "sha1:%s$%s" % (salt, enc_val) |
def by_bounding_box(self, tl_lat, tl_long, br_lat, br_long, term=None, num_biz_requested=None, category=None):
"""
Perform a Yelp Review Search based on a map bounding box.
Args:
tl_lat - bounding box top left latitude
tl_long - bounding box top left longitude
br_lat - bounding box bottom right latitude
br_long - bounding box bottom right longitude
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional)
"""
header, content = self._http_request(
self.BASE_URL,
tl_lat = tl_lat,
tl_long = tl_long,
br_lat = br_lat,
br_long = br_long,
term = term,
category = category,
num_biz_requested = num_biz_requested
)
return json.loads(content) | Perform a Yelp Review Search based on a map bounding box.
Args:
tl_lat - bounding box top left latitude
tl_long - bounding box top left longitude
br_lat - bounding box bottom right latitude
br_long - bounding box bottom right longitude
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional) | Below is the the instruction that describes the task:
### Input:
Perform a Yelp Review Search based on a map bounding box.
Args:
tl_lat - bounding box top left latitude
tl_long - bounding box top left longitude
br_lat - bounding box bottom right latitude
br_long - bounding box bottom right longitude
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional)
### Response:
def by_bounding_box(self, tl_lat, tl_long, br_lat, br_long, term=None, num_biz_requested=None, category=None):
"""
Perform a Yelp Review Search based on a map bounding box.
Args:
tl_lat - bounding box top left latitude
tl_long - bounding box top left longitude
br_lat - bounding box bottom right latitude
br_long - bounding box bottom right longitude
term - Search term to filter by (Optional)
num_biz_requested - Maximum number of matching results to return (Optional)
category - '+'-seperated list of categories to filter by. See
http://www.yelp.com/developers/documentation/category_list
for list of valid categories. (Optional)
"""
header, content = self._http_request(
self.BASE_URL,
tl_lat = tl_lat,
tl_long = tl_long,
br_lat = br_lat,
br_long = br_long,
term = term,
category = category,
num_biz_requested = num_biz_requested
)
return json.loads(content) |
def from_surface(renderer, surface):
"""Create a texture from an existing surface.
Args:
surface (Surface): The surface containing pixel data used to fill the texture.
Returns:
Texture: A texture containing the pixels from surface.
Raises:
SDLError: If an error is encountered.
"""
texture = object.__new__(Texture)
texture._ptr = check_ptr_err(lib.SDL_CreateTextureFromSurface(renderer._ptr, surface._ptr))
return texture | Create a texture from an existing surface.
Args:
surface (Surface): The surface containing pixel data used to fill the texture.
Returns:
Texture: A texture containing the pixels from surface.
Raises:
SDLError: If an error is encountered. | Below is the the instruction that describes the task:
### Input:
Create a texture from an existing surface.
Args:
surface (Surface): The surface containing pixel data used to fill the texture.
Returns:
Texture: A texture containing the pixels from surface.
Raises:
SDLError: If an error is encountered.
### Response:
def from_surface(renderer, surface):
"""Create a texture from an existing surface.
Args:
surface (Surface): The surface containing pixel data used to fill the texture.
Returns:
Texture: A texture containing the pixels from surface.
Raises:
SDLError: If an error is encountered.
"""
texture = object.__new__(Texture)
texture._ptr = check_ptr_err(lib.SDL_CreateTextureFromSurface(renderer._ptr, surface._ptr))
return texture |
def _sync_columns(self, row, ensure, types=None):
"""Create missing columns (or the table) prior to writes.
If automatic schema generation is disabled (``ensure`` is ``False``),
this will remove any keys from the ``row`` for which there is no
matching column.
"""
columns = self.columns
ensure = self._check_ensure(ensure)
types = types or {}
types = {normalize_column_name(k): v for (k, v) in types.items()}
out = {}
sync_columns = []
for name, value in row.items():
name = normalize_column_name(name)
if ensure and name not in columns:
_type = types.get(name)
if _type is None:
_type = self.db.types.guess(value)
sync_columns.append(Column(name, _type))
columns.append(name)
if name in columns:
out[name] = value
self._sync_table(sync_columns)
return out | Create missing columns (or the table) prior to writes.
If automatic schema generation is disabled (``ensure`` is ``False``),
this will remove any keys from the ``row`` for which there is no
matching column. | Below is the the instruction that describes the task:
### Input:
Create missing columns (or the table) prior to writes.
If automatic schema generation is disabled (``ensure`` is ``False``),
this will remove any keys from the ``row`` for which there is no
matching column.
### Response:
def _sync_columns(self, row, ensure, types=None):
"""Create missing columns (or the table) prior to writes.
If automatic schema generation is disabled (``ensure`` is ``False``),
this will remove any keys from the ``row`` for which there is no
matching column.
"""
columns = self.columns
ensure = self._check_ensure(ensure)
types = types or {}
types = {normalize_column_name(k): v for (k, v) in types.items()}
out = {}
sync_columns = []
for name, value in row.items():
name = normalize_column_name(name)
if ensure and name not in columns:
_type = types.get(name)
if _type is None:
_type = self.db.types.guess(value)
sync_columns.append(Column(name, _type))
columns.append(name)
if name in columns:
out[name] = value
self._sync_table(sync_columns)
return out |
def move(self, target):
""" Moves this DriveItem to another Folder.
Can't move between different Drives.
:param target: a Folder, Drive item or Item Id string.
If it's a drive the item will be moved to the root folder.
:type target: drive.Folder or DriveItem or str
:return: Success / Failure
:rtype: bool
"""
if isinstance(target, Folder):
target_id = target.object_id
elif isinstance(target, Drive):
# we need the root folder id
root_folder = target.get_root_folder()
if not root_folder:
return False
target_id = root_folder.object_id
elif isinstance(target, str):
target_id = target
else:
raise ValueError('Target must be a Folder or Drive')
if not self.object_id or not target_id:
raise ValueError(
'Both self, and target must have a valid object_id.')
if target_id == 'root':
raise ValueError("When moving, target id can't be 'root'")
url = self.build_url(
self._endpoints.get('item').format(id=self.object_id))
data = {'parentReference': {'id': target_id}}
response = self.con.patch(url, data=data)
if not response:
return False
self.parent_id = target_id
return True | Moves this DriveItem to another Folder.
Can't move between different Drives.
:param target: a Folder, Drive item or Item Id string.
If it's a drive the item will be moved to the root folder.
:type target: drive.Folder or DriveItem or str
:return: Success / Failure
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Moves this DriveItem to another Folder.
Can't move between different Drives.
:param target: a Folder, Drive item or Item Id string.
If it's a drive the item will be moved to the root folder.
:type target: drive.Folder or DriveItem or str
:return: Success / Failure
:rtype: bool
### Response:
def move(self, target):
""" Moves this DriveItem to another Folder.
Can't move between different Drives.
:param target: a Folder, Drive item or Item Id string.
If it's a drive the item will be moved to the root folder.
:type target: drive.Folder or DriveItem or str
:return: Success / Failure
:rtype: bool
"""
if isinstance(target, Folder):
target_id = target.object_id
elif isinstance(target, Drive):
# we need the root folder id
root_folder = target.get_root_folder()
if not root_folder:
return False
target_id = root_folder.object_id
elif isinstance(target, str):
target_id = target
else:
raise ValueError('Target must be a Folder or Drive')
if not self.object_id or not target_id:
raise ValueError(
'Both self, and target must have a valid object_id.')
if target_id == 'root':
raise ValueError("When moving, target id can't be 'root'")
url = self.build_url(
self._endpoints.get('item').format(id=self.object_id))
data = {'parentReference': {'id': target_id}}
response = self.con.patch(url, data=data)
if not response:
return False
self.parent_id = target_id
return True |
def connect(self):
""" Todo connect """
self.transport = Transport(self.token, on_connect=self.on_connect, on_message=self.on_message) | Todo connect | Below is the the instruction that describes the task:
### Input:
Todo connect
### Response:
def connect(self):
""" Todo connect """
self.transport = Transport(self.token, on_connect=self.on_connect, on_message=self.on_message) |
def multiply_slow(x, y, prim=0x11b):
'''Another equivalent (but even slower) way to compute multiplication in Galois Fields without using a precomputed look-up table.
This is the form you will most often see in academic literature, by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.'''
### Define bitwise carry-less operations as inner functions ###
def cl_mult(x,y):
'''Bitwise carry-less multiplication on integers'''
z = 0
i = 0
while (y>>i) > 0:
if y & (1<<i):
z ^= x<<i
i += 1
return z
def bit_length(n):
'''Compute the position of the most significant bit (1) of an integer. Equivalent to int.bit_length()'''
bits = 0
while n >> bits: bits += 1
return bits
def cl_div(dividend, divisor=None):
'''Bitwise carry-less long division on integers and returns the remainder'''
# Compute the position of the most significant bit for each integers
dl1 = bit_length(dividend)
dl2 = bit_length(divisor)
# If the dividend is smaller than the divisor, just exit
if dl1 < dl2:
return dividend
# Else, align the most significant 1 of the divisor to the most significant 1 of the dividend (by shifting the divisor)
for i in _range(dl1-dl2,-1,-1):
# Check that the dividend is divisible (useless for the first iteration but important for the next ones)
if dividend & (1 << i+dl2-1):
# If divisible, then shift the divisor to align the most significant bits and XOR (carry-less substraction)
dividend ^= divisor << i
return dividend
### Main GF multiplication routine ###
# Multiply the gf numbers
result = cl_mult(x,y)
# Then do a modular reduction (ie, remainder from the division) with an irreducible primitive polynomial so that it stays inside GF bounds
if prim > 0:
result = cl_div(result, prim)
return result | Another equivalent (but even slower) way to compute multiplication in Galois Fields without using a precomputed look-up table.
This is the form you will most often see in academic literature, by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial. | Below is the the instruction that describes the task:
### Input:
Another equivalent (but even slower) way to compute multiplication in Galois Fields without using a precomputed look-up table.
This is the form you will most often see in academic literature, by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.
### Response:
def multiply_slow(x, y, prim=0x11b):
'''Another equivalent (but even slower) way to compute multiplication in Galois Fields without using a precomputed look-up table.
This is the form you will most often see in academic literature, by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.'''
### Define bitwise carry-less operations as inner functions ###
def cl_mult(x,y):
'''Bitwise carry-less multiplication on integers'''
z = 0
i = 0
while (y>>i) > 0:
if y & (1<<i):
z ^= x<<i
i += 1
return z
def bit_length(n):
'''Compute the position of the most significant bit (1) of an integer. Equivalent to int.bit_length()'''
bits = 0
while n >> bits: bits += 1
return bits
def cl_div(dividend, divisor=None):
'''Bitwise carry-less long division on integers and returns the remainder'''
# Compute the position of the most significant bit for each integers
dl1 = bit_length(dividend)
dl2 = bit_length(divisor)
# If the dividend is smaller than the divisor, just exit
if dl1 < dl2:
return dividend
# Else, align the most significant 1 of the divisor to the most significant 1 of the dividend (by shifting the divisor)
for i in _range(dl1-dl2,-1,-1):
# Check that the dividend is divisible (useless for the first iteration but important for the next ones)
if dividend & (1 << i+dl2-1):
# If divisible, then shift the divisor to align the most significant bits and XOR (carry-less substraction)
dividend ^= divisor << i
return dividend
### Main GF multiplication routine ###
# Multiply the gf numbers
result = cl_mult(x,y)
# Then do a modular reduction (ie, remainder from the division) with an irreducible primitive polynomial so that it stays inside GF bounds
if prim > 0:
result = cl_div(result, prim)
return result |
def merge_dicts(dicts, op=operator.add):
"""Merge a list of dictionaries.
Args:
dicts (list): a list of dictionary objects
op (operator): an operator item used to merge the dictionaries. Defaults to :py:func:`operator.add`.
Returns:
dict: the merged dictionary
"""
a = None
for b in dicts:
if a is None:
a = b.copy()
else:
a = dict(a.items() + b.items() + [(k, op(a[k], b[k])) for k in set(b) & set(a)])
return a | Merge a list of dictionaries.
Args:
dicts (list): a list of dictionary objects
op (operator): an operator item used to merge the dictionaries. Defaults to :py:func:`operator.add`.
Returns:
dict: the merged dictionary | Below is the the instruction that describes the task:
### Input:
Merge a list of dictionaries.
Args:
dicts (list): a list of dictionary objects
op (operator): an operator item used to merge the dictionaries. Defaults to :py:func:`operator.add`.
Returns:
dict: the merged dictionary
### Response:
def merge_dicts(dicts, op=operator.add):
"""Merge a list of dictionaries.
Args:
dicts (list): a list of dictionary objects
op (operator): an operator item used to merge the dictionaries. Defaults to :py:func:`operator.add`.
Returns:
dict: the merged dictionary
"""
a = None
for b in dicts:
if a is None:
a = b.copy()
else:
a = dict(a.items() + b.items() + [(k, op(a[k], b[k])) for k in set(b) & set(a)])
return a |
def query(self, value):
"""
Serialize an ORM query, Base-64 encode it and set it to
the b64_query field
"""
if not isinstance(value, Q):
raise Exception('Must only be passed a Django (Q)uery object')
s = QSerializer(base64=True)
self.b64_query = s.dumps(value) | Serialize an ORM query, Base-64 encode it and set it to
the b64_query field | Below is the the instruction that describes the task:
### Input:
Serialize an ORM query, Base-64 encode it and set it to
the b64_query field
### Response:
def query(self, value):
"""
Serialize an ORM query, Base-64 encode it and set it to
the b64_query field
"""
if not isinstance(value, Q):
raise Exception('Must only be passed a Django (Q)uery object')
s = QSerializer(base64=True)
self.b64_query = s.dumps(value) |
def set_value(self, value):
"""Set value of the checkbox.
Parameters
----------
value : bool
value for the checkbox
"""
if value:
self.setChecked(Qt.Checked)
else:
self.setChecked(Qt.Unchecked) | Set value of the checkbox.
Parameters
----------
value : bool
value for the checkbox | Below is the the instruction that describes the task:
### Input:
Set value of the checkbox.
Parameters
----------
value : bool
value for the checkbox
### Response:
def set_value(self, value):
"""Set value of the checkbox.
Parameters
----------
value : bool
value for the checkbox
"""
if value:
self.setChecked(Qt.Checked)
else:
self.setChecked(Qt.Unchecked) |
def _hook_variable_gradient_stats(self, var, name, log_track):
"""Logs a Variable's gradient's distribution statistics next time backward()
is called on it.
"""
if not isinstance(var, torch.autograd.Variable):
cls = type(var)
raise TypeError('Expected torch.Variable, not {}.{}'.format(
cls.__module__, cls.__name__))
handle = self._hook_handles.get(name)
if handle is not None and self._torch_hook_handle_is_valid(handle):
raise ValueError(
'A hook has already been set under name "{}"'.format(name))
def _callback(grad, log_track):
if not log_track_update(log_track):
return
self.log_tensor_stats(grad.data, name)
handle = var.register_hook(lambda grad: _callback(grad, log_track))
self._hook_handles[name] = handle
return handle | Logs a Variable's gradient's distribution statistics next time backward()
is called on it. | Below is the the instruction that describes the task:
### Input:
Logs a Variable's gradient's distribution statistics next time backward()
is called on it.
### Response:
def _hook_variable_gradient_stats(self, var, name, log_track):
"""Logs a Variable's gradient's distribution statistics next time backward()
is called on it.
"""
if not isinstance(var, torch.autograd.Variable):
cls = type(var)
raise TypeError('Expected torch.Variable, not {}.{}'.format(
cls.__module__, cls.__name__))
handle = self._hook_handles.get(name)
if handle is not None and self._torch_hook_handle_is_valid(handle):
raise ValueError(
'A hook has already been set under name "{}"'.format(name))
def _callback(grad, log_track):
if not log_track_update(log_track):
return
self.log_tensor_stats(grad.data, name)
handle = var.register_hook(lambda grad: _callback(grad, log_track))
self._hook_handles[name] = handle
return handle |
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num] | Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10 | Below is the the instruction that describes the task:
### Input:
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
### Response:
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num] |
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Padded heatmaps as HeatmapsOnImage object.
pad_amounts : tuple of int
Amounts by which the heatmaps were padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_0to1_padded, pad_amounts = ia.pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode,
cval=cval, return_pad_amounts=True)
heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
else:
return heatmaps | Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Padded heatmaps as HeatmapsOnImage object.
pad_amounts : tuple of int
Amounts by which the heatmaps were padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True. | Below is the the instruction that describes the task:
### Input:
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Padded heatmaps as HeatmapsOnImage object.
pad_amounts : tuple of int
Amounts by which the heatmaps were padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
### Response:
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0, return_pad_amounts=False):
"""
Pad the heatmaps on their sides so that they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the corresponding
sides (left/right or top/bottom) will be padded. In each case, both of the sides will
be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. 2.0 denotes the image having twice
as much width as height.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``. See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If False, then only the padded image will be returned. If True, a tuple with two
entries will be returned, where the first entry is the padded image and the second
entry are the amounts by which each image side was padded. These amounts are again a
tuple of the form (top, right, bottom, left), with each value being an integer.
Returns
-------
heatmaps : imgaug.HeatmapsOnImage
Padded heatmaps as HeatmapsOnImage object.
pad_amounts : tuple of int
Amounts by which the heatmaps were padded on each side, given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to True.
"""
arr_0to1_padded, pad_amounts = ia.pad_to_aspect_ratio(self.arr_0to1, aspect_ratio=aspect_ratio, mode=mode,
cval=cval, return_pad_amounts=True)
heatmaps = HeatmapsOnImage.from_0to1(arr_0to1_padded, shape=self.shape, min_value=self.min_value,
max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
else:
return heatmaps |
def get_task_indicator(self, task_level=None):
"""
Args:
task_level (int or None): task depth level to get the indicator
for, if None, will use the current tasks depth
Returns:
str: char to prepend to the task logs to indicate it's level
"""
if task_level is None:
task_level = len(self.tasks)
return self.TASK_INDICATORS[task_level % len(self.TASK_INDICATORS)] | Args:
task_level (int or None): task depth level to get the indicator
for, if None, will use the current tasks depth
Returns:
str: char to prepend to the task logs to indicate it's level | Below is the the instruction that describes the task:
### Input:
Args:
task_level (int or None): task depth level to get the indicator
for, if None, will use the current tasks depth
Returns:
str: char to prepend to the task logs to indicate it's level
### Response:
def get_task_indicator(self, task_level=None):
"""
Args:
task_level (int or None): task depth level to get the indicator
for, if None, will use the current tasks depth
Returns:
str: char to prepend to the task logs to indicate it's level
"""
if task_level is None:
task_level = len(self.tasks)
return self.TASK_INDICATORS[task_level % len(self.TASK_INDICATORS)] |
def __clear_bp(self, aThread):
"""
Clears this breakpoint from the debug registers.
@type aThread: L{Thread}
@param aThread: Thread object.
"""
if self.__slot is not None:
aThread.suspend()
try:
ctx = aThread.get_context(win32.CONTEXT_DEBUG_REGISTERS)
DebugRegister.clear_bp(ctx, self.__slot)
aThread.set_context(ctx)
self.__slot = None
finally:
aThread.resume() | Clears this breakpoint from the debug registers.
@type aThread: L{Thread}
@param aThread: Thread object. | Below is the the instruction that describes the task:
### Input:
Clears this breakpoint from the debug registers.
@type aThread: L{Thread}
@param aThread: Thread object.
### Response:
def __clear_bp(self, aThread):
"""
Clears this breakpoint from the debug registers.
@type aThread: L{Thread}
@param aThread: Thread object.
"""
if self.__slot is not None:
aThread.suspend()
try:
ctx = aThread.get_context(win32.CONTEXT_DEBUG_REGISTERS)
DebugRegister.clear_bp(ctx, self.__slot)
aThread.set_context(ctx)
self.__slot = None
finally:
aThread.resume() |
def pretty_duration(seconds):
""" Returns a user-friendly representation of the provided duration in seconds.
For example: 62.8 => "1m2.8s", or 129837.8 => "2d12h4m57.8s"
"""
if seconds is None:
return ''
ret = ''
if seconds >= 86400:
ret += '{:.0f}d'.format(int(seconds / 86400))
seconds = seconds % 86400
if seconds >= 3600:
ret += '{:.0f}h'.format(int(seconds / 3600))
seconds = seconds % 3600
if seconds >= 60:
ret += '{:.0f}m'.format(int(seconds / 60))
seconds = seconds % 60
if seconds > 0:
ret += '{:.1f}s'.format(seconds)
return ret | Returns a user-friendly representation of the provided duration in seconds.
For example: 62.8 => "1m2.8s", or 129837.8 => "2d12h4m57.8s" | Below is the the instruction that describes the task:
### Input:
Returns a user-friendly representation of the provided duration in seconds.
For example: 62.8 => "1m2.8s", or 129837.8 => "2d12h4m57.8s"
### Response:
def pretty_duration(seconds):
""" Returns a user-friendly representation of the provided duration in seconds.
For example: 62.8 => "1m2.8s", or 129837.8 => "2d12h4m57.8s"
"""
if seconds is None:
return ''
ret = ''
if seconds >= 86400:
ret += '{:.0f}d'.format(int(seconds / 86400))
seconds = seconds % 86400
if seconds >= 3600:
ret += '{:.0f}h'.format(int(seconds / 3600))
seconds = seconds % 3600
if seconds >= 60:
ret += '{:.0f}m'.format(int(seconds / 60))
seconds = seconds % 60
if seconds > 0:
ret += '{:.1f}s'.format(seconds)
return ret |
def upgrade(self):
"""
Upgrade packages
"""
for pkg in self.upgrade_all:
check_md5(pkg_checksum(pkg, "slack_patches"), self.patch_path + pkg)
pkg_ver = "{0}-{1}".format(split_package(pkg)[0],
split_package(pkg)[1])
if find_package(split_package(pkg)[0] + self.meta.sp,
self.meta.pkg_path):
print("[ {0}upgrading{1} ] --> {2}".format(
self.meta.color["YELLOW"], self.meta.color["ENDC"],
pkg[:-4]))
PackageManager((self.patch_path + pkg).split()).upgrade(
"--install-new")
self.upgraded.append(pkg_ver)
else:
print("[ {0}installing{1} ] --> {2}".format(
self.meta.color["GREEN"], self.meta.color["ENDC"],
pkg[:-4]))
PackageManager((self.patch_path + pkg).split()).upgrade(
"--install-new")
self.installed.append(pkg_ver) | Upgrade packages | Below is the the instruction that describes the task:
### Input:
Upgrade packages
### Response:
def upgrade(self):
"""
Upgrade packages
"""
for pkg in self.upgrade_all:
check_md5(pkg_checksum(pkg, "slack_patches"), self.patch_path + pkg)
pkg_ver = "{0}-{1}".format(split_package(pkg)[0],
split_package(pkg)[1])
if find_package(split_package(pkg)[0] + self.meta.sp,
self.meta.pkg_path):
print("[ {0}upgrading{1} ] --> {2}".format(
self.meta.color["YELLOW"], self.meta.color["ENDC"],
pkg[:-4]))
PackageManager((self.patch_path + pkg).split()).upgrade(
"--install-new")
self.upgraded.append(pkg_ver)
else:
print("[ {0}installing{1} ] --> {2}".format(
self.meta.color["GREEN"], self.meta.color["ENDC"],
pkg[:-4]))
PackageManager((self.patch_path + pkg).split()).upgrade(
"--install-new")
self.installed.append(pkg_ver) |
def getbugs(self, idlist,
include_fields=None, exclude_fields=None, extra_fields=None,
permissive=True):
"""
Return a list of Bug objects with the full complement of bug data
already loaded. If there's a problem getting the data for a given id,
the corresponding item in the returned list will be None.
"""
data = self._getbugs(idlist, include_fields=include_fields,
exclude_fields=exclude_fields, extra_fields=extra_fields,
permissive=permissive)
return [(b and Bug(self, dict=b,
autorefresh=self.bug_autorefresh)) or None
for b in data] | Return a list of Bug objects with the full complement of bug data
already loaded. If there's a problem getting the data for a given id,
the corresponding item in the returned list will be None. | Below is the the instruction that describes the task:
### Input:
Return a list of Bug objects with the full complement of bug data
already loaded. If there's a problem getting the data for a given id,
the corresponding item in the returned list will be None.
### Response:
def getbugs(self, idlist,
include_fields=None, exclude_fields=None, extra_fields=None,
permissive=True):
"""
Return a list of Bug objects with the full complement of bug data
already loaded. If there's a problem getting the data for a given id,
the corresponding item in the returned list will be None.
"""
data = self._getbugs(idlist, include_fields=include_fields,
exclude_fields=exclude_fields, extra_fields=extra_fields,
permissive=permissive)
return [(b and Bug(self, dict=b,
autorefresh=self.bug_autorefresh)) or None
for b in data] |
def get(self):
"""Reloads the check with its current values."""
new = self.manager.get(self)
if new:
self._add_details(new._info) | Reloads the check with its current values. | Below is the the instruction that describes the task:
### Input:
Reloads the check with its current values.
### Response:
def get(self):
"""Reloads the check with its current values."""
new = self.manager.get(self)
if new:
self._add_details(new._info) |
def _read_sym(ctx: ReaderContext) -> MaybeSymbol:
"""Return a symbol from the input stream.
If a symbol appears in a syntax quoted form, the reader will attempt
to resolve the symbol using the resolver in the ReaderContext `ctx`.
The resolver will look into the current namespace for an alias or
namespace matching the symbol's namespace."""
ns, name = _read_namespaced(ctx, allowed_suffix="#")
if not ctx.is_syntax_quoted and name.endswith("#"):
raise SyntaxError("Gensym may not appear outside syntax quote")
if ns is not None:
if any(map(lambda s: len(s) == 0, ns.split("."))):
raise SyntaxError(
"All '.' separated segments of a namespace "
"must contain at least one character."
)
if name.startswith(".") and ns is not None:
raise SyntaxError("Symbols starting with '.' may not have a namespace")
if ns is None:
if name == "nil":
return None
elif name == "true":
return True
elif name == "false":
return False
if ctx.is_syntax_quoted and not name.endswith("#"):
return ctx.resolve(symbol.symbol(name, ns))
return symbol.symbol(name, ns=ns) | Return a symbol from the input stream.
If a symbol appears in a syntax quoted form, the reader will attempt
to resolve the symbol using the resolver in the ReaderContext `ctx`.
The resolver will look into the current namespace for an alias or
namespace matching the symbol's namespace. | Below is the the instruction that describes the task:
### Input:
Return a symbol from the input stream.
If a symbol appears in a syntax quoted form, the reader will attempt
to resolve the symbol using the resolver in the ReaderContext `ctx`.
The resolver will look into the current namespace for an alias or
namespace matching the symbol's namespace.
### Response:
def _read_sym(ctx: ReaderContext) -> MaybeSymbol:
"""Return a symbol from the input stream.
If a symbol appears in a syntax quoted form, the reader will attempt
to resolve the symbol using the resolver in the ReaderContext `ctx`.
The resolver will look into the current namespace for an alias or
namespace matching the symbol's namespace."""
ns, name = _read_namespaced(ctx, allowed_suffix="#")
if not ctx.is_syntax_quoted and name.endswith("#"):
raise SyntaxError("Gensym may not appear outside syntax quote")
if ns is not None:
if any(map(lambda s: len(s) == 0, ns.split("."))):
raise SyntaxError(
"All '.' separated segments of a namespace "
"must contain at least one character."
)
if name.startswith(".") and ns is not None:
raise SyntaxError("Symbols starting with '.' may not have a namespace")
if ns is None:
if name == "nil":
return None
elif name == "true":
return True
elif name == "false":
return False
if ctx.is_syntax_quoted and not name.endswith("#"):
return ctx.resolve(symbol.symbol(name, ns))
return symbol.symbol(name, ns=ns) |
def create_queues(self):
'''
Updates the in memory list of the redis queues
Creates new throttled queue instances if it does not have them
'''
# new config could have loaded between scrapes
newConf = self.check_config()
self.queue_keys = self.redis_conn.keys(self.spider.name + ":*:queue")
for key in self.queue_keys:
# build final queue key, depending on type and ip bools
throttle_key = ""
if self.add_type:
throttle_key = self.spider.name + ":"
if self.add_ip:
throttle_key = throttle_key + self.my_ip + ":"
# add the tld from the key `type:tld:queue`
the_domain = re.split(':', key)[1]
throttle_key = throttle_key + the_domain
if key not in self.queue_dict or newConf:
self.logger.debug("Added new Throttled Queue {q}"
.format(q=key))
q = RedisPriorityQueue(self.redis_conn, key, encoding=ujson)
# use default window and hits
if the_domain not in self.domain_config:
# this is now a tuple, all access needs to use [0] to get
# the object, use [1] to get the time
self.queue_dict[key] = [RedisThrottledQueue(self.redis_conn,
q, self.window, self.hits, self.moderated, throttle_key,
throttle_key, True), time.time()]
# use custom window and hits
else:
window = self.domain_config[the_domain]['window']
hits = self.domain_config[the_domain]['hits']
# adjust the crawl rate based on the scale if exists
if 'scale' in self.domain_config[the_domain]:
hits = int(hits * self.fit_scale(self.domain_config[the_domain]['scale']))
self.queue_dict[key] = [RedisThrottledQueue(self.redis_conn,
q, window, hits, self.moderated, throttle_key,
throttle_key, True), time.time()] | Updates the in memory list of the redis queues
Creates new throttled queue instances if it does not have them | Below is the the instruction that describes the task:
### Input:
Updates the in memory list of the redis queues
Creates new throttled queue instances if it does not have them
### Response:
def create_queues(self):
'''
Updates the in memory list of the redis queues
Creates new throttled queue instances if it does not have them
'''
# new config could have loaded between scrapes
newConf = self.check_config()
self.queue_keys = self.redis_conn.keys(self.spider.name + ":*:queue")
for key in self.queue_keys:
# build final queue key, depending on type and ip bools
throttle_key = ""
if self.add_type:
throttle_key = self.spider.name + ":"
if self.add_ip:
throttle_key = throttle_key + self.my_ip + ":"
# add the tld from the key `type:tld:queue`
the_domain = re.split(':', key)[1]
throttle_key = throttle_key + the_domain
if key not in self.queue_dict or newConf:
self.logger.debug("Added new Throttled Queue {q}"
.format(q=key))
q = RedisPriorityQueue(self.redis_conn, key, encoding=ujson)
# use default window and hits
if the_domain not in self.domain_config:
# this is now a tuple, all access needs to use [0] to get
# the object, use [1] to get the time
self.queue_dict[key] = [RedisThrottledQueue(self.redis_conn,
q, self.window, self.hits, self.moderated, throttle_key,
throttle_key, True), time.time()]
# use custom window and hits
else:
window = self.domain_config[the_domain]['window']
hits = self.domain_config[the_domain]['hits']
# adjust the crawl rate based on the scale if exists
if 'scale' in self.domain_config[the_domain]:
hits = int(hits * self.fit_scale(self.domain_config[the_domain]['scale']))
self.queue_dict[key] = [RedisThrottledQueue(self.redis_conn,
q, window, hits, self.moderated, throttle_key,
throttle_key, True), time.time()] |
def add_version_info(matrix, version):
"""\
Adds the version information to the matrix, for versions < 7 this is a no-op.
ISO/IEC 18004:2015(E) -- 7.10 Version information (page 58)
"""
#
# module 0 = least significant bit
# module 17 = most significant bit
#
# Figure 27 — Version information positioning (page 58)
#
# Lower left Upper right
# ---------- -----------
# 0 3 6 9 12 15 0 1 2
# 1 4 7 10 13 16 3 4 5
# 2 5 8 11 14 17 6 7 8
# 9 10 11
# 12 13 14
# 15 16 17
#
if version < 7:
return
version_info = consts.VERSION_INFO[version - 7]
for i in range(6):
bit1 = (version_info >> (i * 3)) & 0x01
bit2 = (version_info >> ((i * 3) + 1)) & 0x01
bit3 = (version_info >> ((i * 3) + 2)) & 0x01
# Lower left
matrix[-11][i] = bit1
matrix[-10][i] = bit2
matrix[-9][i] = bit3
# Upper right
matrix[i][-11] = bit1
matrix[i][-10] = bit2
matrix[i][-9] = bit3 | \
Adds the version information to the matrix, for versions < 7 this is a no-op.
ISO/IEC 18004:2015(E) -- 7.10 Version information (page 58) | Below is the the instruction that describes the task:
### Input:
\
Adds the version information to the matrix, for versions < 7 this is a no-op.
ISO/IEC 18004:2015(E) -- 7.10 Version information (page 58)
### Response:
def add_version_info(matrix, version):
"""\
Adds the version information to the matrix, for versions < 7 this is a no-op.
ISO/IEC 18004:2015(E) -- 7.10 Version information (page 58)
"""
#
# module 0 = least significant bit
# module 17 = most significant bit
#
# Figure 27 — Version information positioning (page 58)
#
# Lower left Upper right
# ---------- -----------
# 0 3 6 9 12 15 0 1 2
# 1 4 7 10 13 16 3 4 5
# 2 5 8 11 14 17 6 7 8
# 9 10 11
# 12 13 14
# 15 16 17
#
if version < 7:
return
version_info = consts.VERSION_INFO[version - 7]
for i in range(6):
bit1 = (version_info >> (i * 3)) & 0x01
bit2 = (version_info >> ((i * 3) + 1)) & 0x01
bit3 = (version_info >> ((i * 3) + 2)) & 0x01
# Lower left
matrix[-11][i] = bit1
matrix[-10][i] = bit2
matrix[-9][i] = bit3
# Upper right
matrix[i][-11] = bit1
matrix[i][-10] = bit2
matrix[i][-9] = bit3 |
def pre_validate(self, form):
'''Calls preprocessors before pre_validation'''
for preprocessor in self._preprocessors:
preprocessor(form, self)
super(FieldHelper, self).pre_validate(form) | Calls preprocessors before pre_validation | Below is the the instruction that describes the task:
### Input:
Calls preprocessors before pre_validation
### Response:
def pre_validate(self, form):
'''Calls preprocessors before pre_validation'''
for preprocessor in self._preprocessors:
preprocessor(form, self)
super(FieldHelper, self).pre_validate(form) |
def wipe_cfg_vals_from_git_cfg(*cfg_opts):
"""Remove a set of options from Git config."""
for cfg_key_suffix in cfg_opts:
cfg_key = f'cherry-picker.{cfg_key_suffix.replace("_", "-")}'
cmd = "git", "config", "--local", "--unset-all", cfg_key
subprocess.check_call(cmd, stderr=subprocess.STDOUT) | Remove a set of options from Git config. | Below is the the instruction that describes the task:
### Input:
Remove a set of options from Git config.
### Response:
def wipe_cfg_vals_from_git_cfg(*cfg_opts):
"""Remove a set of options from Git config."""
for cfg_key_suffix in cfg_opts:
cfg_key = f'cherry-picker.{cfg_key_suffix.replace("_", "-")}'
cmd = "git", "config", "--local", "--unset-all", cfg_key
subprocess.check_call(cmd, stderr=subprocess.STDOUT) |
def check_status(self, **kwargs):
"""
Check the status of the works in self.
Args:
show: True to show the status of the flow.
kwargs: keyword arguments passed to show_status
"""
for work in self:
work.check_status()
if kwargs.pop("show", False):
self.show_status(**kwargs) | Check the status of the works in self.
Args:
show: True to show the status of the flow.
kwargs: keyword arguments passed to show_status | Below is the the instruction that describes the task:
### Input:
Check the status of the works in self.
Args:
show: True to show the status of the flow.
kwargs: keyword arguments passed to show_status
### Response:
def check_status(self, **kwargs):
"""
Check the status of the works in self.
Args:
show: True to show the status of the flow.
kwargs: keyword arguments passed to show_status
"""
for work in self:
work.check_status()
if kwargs.pop("show", False):
self.show_status(**kwargs) |
def plotMDS(inputFileName, outPrefix, populationFileName, options):
"""Plots the MDS value.
:param inputFileName: the name of the ``mds`` file.
:param outPrefix: the prefix of the output files.
:param populationFileName: the name of the population file.
:param options: the options
:type inputFileName: str
:type outPrefix: str
:type populationFileName: str
:type options: argparse.Namespace
Plots the ``mds`` value according to the ``inputFileName`` file (``mds``)
and the ``populationFileName`` (the population file).
"""
# The options
plotMDS_options = Dummy()
plotMDS_options.file = inputFileName
plotMDS_options.out = outPrefix
plotMDS_options.format = options.format
plotMDS_options.title = options.title
plotMDS_options.xlabel = options.xlabel
plotMDS_options.ylabel = options.ylabel
plotMDS_options.population_file = populationFileName
try:
# Checking the options
PlotMDS.checkArgs(plotMDS_options)
# Reading the population file
populations = PlotMDS.readPopulations(plotMDS_options.population_file)
# Getting the data
data, labels = PlotMDS.extractData(plotMDS_options.file, populations)
order = [labels.index("CEU"), labels.index("YRI"),
labels.index("JPT-CHB"), labels.index("SOURCE")]
if "HIGHLIGHT" in labels:
order.append(labels.index("HIGHLIGHT"))
color = [(0.215686275, 0.000494118, 0.721568627),
(0.301960784, 0.68627451, 0.290196078),
(0.596078431, 0.305882353, 0.639215686),
(0.894117647, 0.101960784, 0.109803922),
(0.596078431, 0.305882353, 0.639215686)]
sizes = [12, 12, 12, 8, 12]
markers = [".", ".", ".", "+", "D"]
# Plotting the data
PlotMDS.plotMDS(data, order, labels, color, sizes, markers,
plotMDS_options)
except PlotMDS.ProgramError as e:
msg = "PlotMDS: %s" % e
raise ProgramError(msg) | Plots the MDS value.
:param inputFileName: the name of the ``mds`` file.
:param outPrefix: the prefix of the output files.
:param populationFileName: the name of the population file.
:param options: the options
:type inputFileName: str
:type outPrefix: str
:type populationFileName: str
:type options: argparse.Namespace
Plots the ``mds`` value according to the ``inputFileName`` file (``mds``)
and the ``populationFileName`` (the population file). | Below is the the instruction that describes the task:
### Input:
Plots the MDS value.
:param inputFileName: the name of the ``mds`` file.
:param outPrefix: the prefix of the output files.
:param populationFileName: the name of the population file.
:param options: the options
:type inputFileName: str
:type outPrefix: str
:type populationFileName: str
:type options: argparse.Namespace
Plots the ``mds`` value according to the ``inputFileName`` file (``mds``)
and the ``populationFileName`` (the population file).
### Response:
def plotMDS(inputFileName, outPrefix, populationFileName, options):
"""Plots the MDS value.
:param inputFileName: the name of the ``mds`` file.
:param outPrefix: the prefix of the output files.
:param populationFileName: the name of the population file.
:param options: the options
:type inputFileName: str
:type outPrefix: str
:type populationFileName: str
:type options: argparse.Namespace
Plots the ``mds`` value according to the ``inputFileName`` file (``mds``)
and the ``populationFileName`` (the population file).
"""
# The options
plotMDS_options = Dummy()
plotMDS_options.file = inputFileName
plotMDS_options.out = outPrefix
plotMDS_options.format = options.format
plotMDS_options.title = options.title
plotMDS_options.xlabel = options.xlabel
plotMDS_options.ylabel = options.ylabel
plotMDS_options.population_file = populationFileName
try:
# Checking the options
PlotMDS.checkArgs(plotMDS_options)
# Reading the population file
populations = PlotMDS.readPopulations(plotMDS_options.population_file)
# Getting the data
data, labels = PlotMDS.extractData(plotMDS_options.file, populations)
order = [labels.index("CEU"), labels.index("YRI"),
labels.index("JPT-CHB"), labels.index("SOURCE")]
if "HIGHLIGHT" in labels:
order.append(labels.index("HIGHLIGHT"))
color = [(0.215686275, 0.000494118, 0.721568627),
(0.301960784, 0.68627451, 0.290196078),
(0.596078431, 0.305882353, 0.639215686),
(0.894117647, 0.101960784, 0.109803922),
(0.596078431, 0.305882353, 0.639215686)]
sizes = [12, 12, 12, 8, 12]
markers = [".", ".", ".", "+", "D"]
# Plotting the data
PlotMDS.plotMDS(data, order, labels, color, sizes, markers,
plotMDS_options)
except PlotMDS.ProgramError as e:
msg = "PlotMDS: %s" % e
raise ProgramError(msg) |
def _get_preferred_host(self, result: ResolveResult) -> Tuple[str, str]:
'''Get preferred host from DNS results.'''
host_1 = result.first_ipv4.ip_address if result.first_ipv4 else None
host_2 = result.first_ipv6.ip_address if result.first_ipv6 else None
if not host_2:
return host_1, None
elif not host_1:
return host_2, None
preferred_host = self._happy_eyeballs_table.get_preferred(
host_1, host_2)
if preferred_host:
return preferred_host, None
else:
return host_1, host_2 | Get preferred host from DNS results. | Below is the the instruction that describes the task:
### Input:
Get preferred host from DNS results.
### Response:
def _get_preferred_host(self, result: ResolveResult) -> Tuple[str, str]:
'''Get preferred host from DNS results.'''
host_1 = result.first_ipv4.ip_address if result.first_ipv4 else None
host_2 = result.first_ipv6.ip_address if result.first_ipv6 else None
if not host_2:
return host_1, None
elif not host_1:
return host_2, None
preferred_host = self._happy_eyeballs_table.get_preferred(
host_1, host_2)
if preferred_host:
return preferred_host, None
else:
return host_1, host_2 |
def _get_scaling_term(self, C, rrup):
"""
Applies the Cascadia correction factor from Table 2 and the positive
correction factor given on Page 567
"""
a_f = 0.15 + 0.0007 * rrup
a_f[a_f > 0.35] = 0.35
return C["af"] + a_f | Applies the Cascadia correction factor from Table 2 and the positive
correction factor given on Page 567 | Below is the the instruction that describes the task:
### Input:
Applies the Cascadia correction factor from Table 2 and the positive
correction factor given on Page 567
### Response:
def _get_scaling_term(self, C, rrup):
"""
Applies the Cascadia correction factor from Table 2 and the positive
correction factor given on Page 567
"""
a_f = 0.15 + 0.0007 * rrup
a_f[a_f > 0.35] = 0.35
return C["af"] + a_f |
def write(self, log, rotate=False):
"""Write the output of all finished processes to a compiled log file."""
# Get path for logfile
if rotate:
timestamp = time.strftime('-%Y%m%d-%H%M')
logPath = os.path.join(self.log_dir, 'queue{}.log'.format(timestamp))
else:
logPath = os.path.join(self.log_dir, 'queue.log')
# Remove existing Log
if os.path.exists(logPath):
os.remove(logPath)
log_file = open(logPath, 'w')
log_file.write('Pueue log for executed Commands: \n \n')
# Format, color and write log
for key, logentry in log.items():
if logentry.get('returncode') is not None:
try:
# Get returncode color:
returncode = logentry['returncode']
if returncode == 0:
returncode = Color('{autogreen}' + '{}'.format(returncode) + '{/autogreen}')
else:
returncode = Color('{autored}' + '{}'.format(returncode) + '{/autored}')
# Write command id with returncode and actual command
log_file.write(
Color('{autoyellow}' + 'Command #{} '.format(key) + '{/autoyellow}') +
'exited with returncode {}: \n'.format(returncode) +
'"{}" \n'.format(logentry['command'])
)
# Write path
log_file.write('Path: {} \n'.format(logentry['path']))
# Write times
log_file.write('Start: {}, End: {} \n'
.format(logentry['start'], logentry['end']))
# Write STDERR
if logentry['stderr']:
log_file.write(Color('{autored}Stderr output: {/autored}\n ') + logentry['stderr'])
# Write STDOUT
if len(logentry['stdout']) > 0:
log_file.write(Color('{autogreen}Stdout output: {/autogreen}\n ') + logentry['stdout'])
log_file.write('\n')
except Exception as a:
print('Failed while writing to log file. Wrong file permissions?')
print('Exception: {}'.format(str(a)))
log_file.close() | Write the output of all finished processes to a compiled log file. | Below is the the instruction that describes the task:
### Input:
Write the output of all finished processes to a compiled log file.
### Response:
def write(self, log, rotate=False):
"""Write the output of all finished processes to a compiled log file."""
# Get path for logfile
if rotate:
timestamp = time.strftime('-%Y%m%d-%H%M')
logPath = os.path.join(self.log_dir, 'queue{}.log'.format(timestamp))
else:
logPath = os.path.join(self.log_dir, 'queue.log')
# Remove existing Log
if os.path.exists(logPath):
os.remove(logPath)
log_file = open(logPath, 'w')
log_file.write('Pueue log for executed Commands: \n \n')
# Format, color and write log
for key, logentry in log.items():
if logentry.get('returncode') is not None:
try:
# Get returncode color:
returncode = logentry['returncode']
if returncode == 0:
returncode = Color('{autogreen}' + '{}'.format(returncode) + '{/autogreen}')
else:
returncode = Color('{autored}' + '{}'.format(returncode) + '{/autored}')
# Write command id with returncode and actual command
log_file.write(
Color('{autoyellow}' + 'Command #{} '.format(key) + '{/autoyellow}') +
'exited with returncode {}: \n'.format(returncode) +
'"{}" \n'.format(logentry['command'])
)
# Write path
log_file.write('Path: {} \n'.format(logentry['path']))
# Write times
log_file.write('Start: {}, End: {} \n'
.format(logentry['start'], logentry['end']))
# Write STDERR
if logentry['stderr']:
log_file.write(Color('{autored}Stderr output: {/autored}\n ') + logentry['stderr'])
# Write STDOUT
if len(logentry['stdout']) > 0:
log_file.write(Color('{autogreen}Stdout output: {/autogreen}\n ') + logentry['stdout'])
log_file.write('\n')
except Exception as a:
print('Failed while writing to log file. Wrong file permissions?')
print('Exception: {}'.format(str(a)))
log_file.close() |
def get_config(self):
"""Return configurations of BoltzmannQPolicy
# Returns
Dict of config
"""
config = super(BoltzmannQPolicy, self).get_config()
config['tau'] = self.tau
config['clip'] = self.clip
return config | Return configurations of BoltzmannQPolicy
# Returns
Dict of config | Below is the the instruction that describes the task:
### Input:
Return configurations of BoltzmannQPolicy
# Returns
Dict of config
### Response:
def get_config(self):
"""Return configurations of BoltzmannQPolicy
# Returns
Dict of config
"""
config = super(BoltzmannQPolicy, self).get_config()
config['tau'] = self.tau
config['clip'] = self.clip
return config |
def _sync_content_metadata(self, serialized_data):
"""
Create/update/delete content metadata records using the SuccessFactors OCN Course Import API endpoint.
Arguments:
serialized_data: Serialized JSON string representing a list of content metadata items.
Raises:
ClientError: If SuccessFactors API call fails.
"""
url = self.enterprise_configuration.sapsf_base_url + self.global_sap_config.course_api_path
try:
status_code, response_body = self._call_post_with_session(url, serialized_data)
except requests.exceptions.RequestException as exc:
raise ClientError(
'SAPSuccessFactorsAPIClient request failed: {error} {message}'.format(
error=exc.__class__.__name__,
message=str(exc)
)
)
if status_code >= 400:
raise ClientError(
'SAPSuccessFactorsAPIClient request failed with status {status_code}: {message}'.format(
status_code=status_code,
message=response_body
)
) | Create/update/delete content metadata records using the SuccessFactors OCN Course Import API endpoint.
Arguments:
serialized_data: Serialized JSON string representing a list of content metadata items.
Raises:
ClientError: If SuccessFactors API call fails. | Below is the the instruction that describes the task:
### Input:
Create/update/delete content metadata records using the SuccessFactors OCN Course Import API endpoint.
Arguments:
serialized_data: Serialized JSON string representing a list of content metadata items.
Raises:
ClientError: If SuccessFactors API call fails.
### Response:
def _sync_content_metadata(self, serialized_data):
"""
Create/update/delete content metadata records using the SuccessFactors OCN Course Import API endpoint.
Arguments:
serialized_data: Serialized JSON string representing a list of content metadata items.
Raises:
ClientError: If SuccessFactors API call fails.
"""
url = self.enterprise_configuration.sapsf_base_url + self.global_sap_config.course_api_path
try:
status_code, response_body = self._call_post_with_session(url, serialized_data)
except requests.exceptions.RequestException as exc:
raise ClientError(
'SAPSuccessFactorsAPIClient request failed: {error} {message}'.format(
error=exc.__class__.__name__,
message=str(exc)
)
)
if status_code >= 400:
raise ClientError(
'SAPSuccessFactorsAPIClient request failed with status {status_code}: {message}'.format(
status_code=status_code,
message=response_body
)
) |
def post(self, request):
"""
Retrieve video IDs that are missing HLS profiles. This endpoint supports 2 types of input data:
1. If we want a batch of video ids which are missing HLS profile irrespective of their courses, the request
data should be in following format:
{
'batch_size': 50,
'offset': 0
}
And response will be in following format:
{
'videos': ['video_id1', 'video_id2', 'video_id3', ... , video_id50],
'total': 300,
'offset': 50,
'batch_size': 50
}
2. If we want all the videos which are missing HLS profiles in a set of specific courses, the request data
should be in following format:
{
'courses': [
'course_id1',
'course_id2',
...
]
}
And response will be in following format:
{
'videos': ['video_id1', 'video_id2', 'video_id3', ...]
}
"""
courses = request.data.get('courses')
batch_size = request.data.get('batch_size', 50)
offset = request.data.get('offset', 0)
if courses:
videos = (CourseVideo.objects.select_related('video')
.prefetch_related('video__encoded_videos', 'video__encoded_videos__profile')
.filter(course_id__in=courses, video__status='file_complete')
.exclude(video__encoded_videos__profile__profile_name='hls')
.values_list('video__edx_video_id', flat=True)
.distinct())
response = Response({'videos': videos}, status=status.HTTP_200_OK)
else:
videos = (Video.objects.prefetch_related('encoded_videos', 'encoded_videos__profile')
.filter(status='file_complete')
.exclude(encoded_videos__profile__profile_name='hls')
.order_by('id')
.values_list('edx_video_id', flat=True)
.distinct())
response = Response(
{
'videos': videos[offset: offset+batch_size],
'total': videos.count(),
'offset': offset,
'batch_size': batch_size,
},
status=status.HTTP_200_OK
)
return response | Retrieve video IDs that are missing HLS profiles. This endpoint supports 2 types of input data:
1. If we want a batch of video ids which are missing HLS profile irrespective of their courses, the request
data should be in following format:
{
'batch_size': 50,
'offset': 0
}
And response will be in following format:
{
'videos': ['video_id1', 'video_id2', 'video_id3', ... , video_id50],
'total': 300,
'offset': 50,
'batch_size': 50
}
2. If we want all the videos which are missing HLS profiles in a set of specific courses, the request data
should be in following format:
{
'courses': [
'course_id1',
'course_id2',
...
]
}
And response will be in following format:
{
'videos': ['video_id1', 'video_id2', 'video_id3', ...]
} | Below is the the instruction that describes the task:
### Input:
Retrieve video IDs that are missing HLS profiles. This endpoint supports 2 types of input data:
1. If we want a batch of video ids which are missing HLS profile irrespective of their courses, the request
data should be in following format:
{
'batch_size': 50,
'offset': 0
}
And response will be in following format:
{
'videos': ['video_id1', 'video_id2', 'video_id3', ... , video_id50],
'total': 300,
'offset': 50,
'batch_size': 50
}
2. If we want all the videos which are missing HLS profiles in a set of specific courses, the request data
should be in following format:
{
'courses': [
'course_id1',
'course_id2',
...
]
}
And response will be in following format:
{
'videos': ['video_id1', 'video_id2', 'video_id3', ...]
}
### Response:
def post(self, request):
"""
Retrieve video IDs that are missing HLS profiles. This endpoint supports 2 types of input data:
1. If we want a batch of video ids which are missing HLS profile irrespective of their courses, the request
data should be in following format:
{
'batch_size': 50,
'offset': 0
}
And response will be in following format:
{
'videos': ['video_id1', 'video_id2', 'video_id3', ... , video_id50],
'total': 300,
'offset': 50,
'batch_size': 50
}
2. If we want all the videos which are missing HLS profiles in a set of specific courses, the request data
should be in following format:
{
'courses': [
'course_id1',
'course_id2',
...
]
}
And response will be in following format:
{
'videos': ['video_id1', 'video_id2', 'video_id3', ...]
}
"""
courses = request.data.get('courses')
batch_size = request.data.get('batch_size', 50)
offset = request.data.get('offset', 0)
if courses:
videos = (CourseVideo.objects.select_related('video')
.prefetch_related('video__encoded_videos', 'video__encoded_videos__profile')
.filter(course_id__in=courses, video__status='file_complete')
.exclude(video__encoded_videos__profile__profile_name='hls')
.values_list('video__edx_video_id', flat=True)
.distinct())
response = Response({'videos': videos}, status=status.HTTP_200_OK)
else:
videos = (Video.objects.prefetch_related('encoded_videos', 'encoded_videos__profile')
.filter(status='file_complete')
.exclude(encoded_videos__profile__profile_name='hls')
.order_by('id')
.values_list('edx_video_id', flat=True)
.distinct())
response = Response(
{
'videos': videos[offset: offset+batch_size],
'total': videos.count(),
'offset': offset,
'batch_size': batch_size,
},
status=status.HTTP_200_OK
)
return response |
def to_unicode(s, encoding=None, errors='strict'):
"""
Make unicode string from any value
:param s:
:param encoding:
:param errors:
:return: unicode
"""
encoding = encoding or 'utf-8'
if is_unicode(s):
return s
elif is_strlike(s):
return s.decode(encoding, errors)
else:
if six.PY2:
return str(s).decode(encoding, errors)
else:
return str(s) | Make unicode string from any value
:param s:
:param encoding:
:param errors:
:return: unicode | Below is the the instruction that describes the task:
### Input:
Make unicode string from any value
:param s:
:param encoding:
:param errors:
:return: unicode
### Response:
def to_unicode(s, encoding=None, errors='strict'):
"""
Make unicode string from any value
:param s:
:param encoding:
:param errors:
:return: unicode
"""
encoding = encoding or 'utf-8'
if is_unicode(s):
return s
elif is_strlike(s):
return s.decode(encoding, errors)
else:
if six.PY2:
return str(s).decode(encoding, errors)
else:
return str(s) |
def datetime_match(data, dts):
"""
matching of datetimes in time columns for data filtering
"""
dts = dts if islistable(dts) else [dts]
if any([not isinstance(i, datetime.datetime) for i in dts]):
error_msg = (
"`time` can only be filtered by datetimes"
)
raise TypeError(error_msg)
return data.isin(dts) | matching of datetimes in time columns for data filtering | Below is the the instruction that describes the task:
### Input:
matching of datetimes in time columns for data filtering
### Response:
def datetime_match(data, dts):
"""
matching of datetimes in time columns for data filtering
"""
dts = dts if islistable(dts) else [dts]
if any([not isinstance(i, datetime.datetime) for i in dts]):
error_msg = (
"`time` can only be filtered by datetimes"
)
raise TypeError(error_msg)
return data.isin(dts) |
def send(self, request, **kw):
"""
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
"""
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response,
from_cache=True)
# check for etags and add headers if appropriate
request.headers.update(
self.controller.conditional_headers(request)
)
resp = super(CacheControlAdapter, self).send(request, **kw)
return resp | Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can. | Below is the the instruction that describes the task:
### Input:
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
### Response:
def send(self, request, **kw):
"""
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
"""
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response,
from_cache=True)
# check for etags and add headers if appropriate
request.headers.update(
self.controller.conditional_headers(request)
)
resp = super(CacheControlAdapter, self).send(request, **kw)
return resp |
def factory(cls, object_raw):
"""Return a proper object
"""
if object_raw is None:
return None
if object_raw.type is ObjectRaw.Types.object:
return ObjectObject(object_raw)
elif object_raw.type is ObjectRaw.Types.type:
return ObjectType(object_raw)
elif object_raw.type is ObjectRaw.Types.array:
return ObjectArray(object_raw)
elif object_raw.type is ObjectRaw.Types.dynamic:
return ObjectDynamic(object_raw)
elif object_raw.type is ObjectRaw.Types.const:
return ObjectConst(object_raw)
elif object_raw.type is ObjectRaw.Types.enum:
return ObjectEnum(object_raw)
else:
return Object(object_raw) | Return a proper object | Below is the the instruction that describes the task:
### Input:
Return a proper object
### Response:
def factory(cls, object_raw):
"""Return a proper object
"""
if object_raw is None:
return None
if object_raw.type is ObjectRaw.Types.object:
return ObjectObject(object_raw)
elif object_raw.type is ObjectRaw.Types.type:
return ObjectType(object_raw)
elif object_raw.type is ObjectRaw.Types.array:
return ObjectArray(object_raw)
elif object_raw.type is ObjectRaw.Types.dynamic:
return ObjectDynamic(object_raw)
elif object_raw.type is ObjectRaw.Types.const:
return ObjectConst(object_raw)
elif object_raw.type is ObjectRaw.Types.enum:
return ObjectEnum(object_raw)
else:
return Object(object_raw) |
def kl(samples1, samples2, pdf1=False, pdf2=False,
bins=30, hist_min=None, hist_max=None):
""" Computes the Kullback-Leibler divergence for a single parameter
from two distributions.
Parameters
----------
samples1 : numpy.array
Samples or probability density function (must also set `pdf1=True`).
samples2 : numpy.array
Samples or probability density function (must also set `pdf2=True`).
pdf1 : bool
Set to `True` if `samples1` is a probability density funtion already.
pdf2 : bool
Set to `True` if `samples2` is a probability density funtion already.
bins : int
Number of bins to use when calculating probability density function
from a set of samples of the distribution.
hist_min : numpy.float64
Minimum of the distributions' values to use.
hist_max : numpy.float64
Maximum of the distributions' values to use.
Returns
-------
numpy.float64
The Kullback-Leibler divergence value.
"""
hist_range = (hist_min, hist_max)
if not pdf1:
samples1, _ = numpy.histogram(samples1, bins=bins,
range=hist_range, normed=True)
if not pdf2:
samples2, _ = numpy.histogram(samples2, bins=bins,
range=hist_range, normed=True)
return stats.entropy(samples1, qk=samples2) | Computes the Kullback-Leibler divergence for a single parameter
from two distributions.
Parameters
----------
samples1 : numpy.array
Samples or probability density function (must also set `pdf1=True`).
samples2 : numpy.array
Samples or probability density function (must also set `pdf2=True`).
pdf1 : bool
Set to `True` if `samples1` is a probability density funtion already.
pdf2 : bool
Set to `True` if `samples2` is a probability density funtion already.
bins : int
Number of bins to use when calculating probability density function
from a set of samples of the distribution.
hist_min : numpy.float64
Minimum of the distributions' values to use.
hist_max : numpy.float64
Maximum of the distributions' values to use.
Returns
-------
numpy.float64
The Kullback-Leibler divergence value. | Below is the the instruction that describes the task:
### Input:
Computes the Kullback-Leibler divergence for a single parameter
from two distributions.
Parameters
----------
samples1 : numpy.array
Samples or probability density function (must also set `pdf1=True`).
samples2 : numpy.array
Samples or probability density function (must also set `pdf2=True`).
pdf1 : bool
Set to `True` if `samples1` is a probability density funtion already.
pdf2 : bool
Set to `True` if `samples2` is a probability density funtion already.
bins : int
Number of bins to use when calculating probability density function
from a set of samples of the distribution.
hist_min : numpy.float64
Minimum of the distributions' values to use.
hist_max : numpy.float64
Maximum of the distributions' values to use.
Returns
-------
numpy.float64
The Kullback-Leibler divergence value.
### Response:
def kl(samples1, samples2, pdf1=False, pdf2=False,
bins=30, hist_min=None, hist_max=None):
""" Computes the Kullback-Leibler divergence for a single parameter
from two distributions.
Parameters
----------
samples1 : numpy.array
Samples or probability density function (must also set `pdf1=True`).
samples2 : numpy.array
Samples or probability density function (must also set `pdf2=True`).
pdf1 : bool
Set to `True` if `samples1` is a probability density funtion already.
pdf2 : bool
Set to `True` if `samples2` is a probability density funtion already.
bins : int
Number of bins to use when calculating probability density function
from a set of samples of the distribution.
hist_min : numpy.float64
Minimum of the distributions' values to use.
hist_max : numpy.float64
Maximum of the distributions' values to use.
Returns
-------
numpy.float64
The Kullback-Leibler divergence value.
"""
hist_range = (hist_min, hist_max)
if not pdf1:
samples1, _ = numpy.histogram(samples1, bins=bins,
range=hist_range, normed=True)
if not pdf2:
samples2, _ = numpy.histogram(samples2, bins=bins,
range=hist_range, normed=True)
return stats.entropy(samples1, qk=samples2) |
def init_stats_history(self):
"""Init the stats history (dict of GlancesAttribute)."""
if self.history_enable():
init_list = [a['name'] for a in self.get_items_history_list()]
logger.debug("Stats history activated for plugin {} (items: {})".format(self.plugin_name, init_list))
return GlancesHistory() | Init the stats history (dict of GlancesAttribute). | Below is the the instruction that describes the task:
### Input:
Init the stats history (dict of GlancesAttribute).
### Response:
def init_stats_history(self):
"""Init the stats history (dict of GlancesAttribute)."""
if self.history_enable():
init_list = [a['name'] for a in self.get_items_history_list()]
logger.debug("Stats history activated for plugin {} (items: {})".format(self.plugin_name, init_list))
return GlancesHistory() |
def plot_gain_offsets(dio_cross,dio_chan_per_coarse=8,feedtype='l',ax1=None,ax2=None,legend=True,**kwargs):
'''
Plots the calculated gain offsets of each coarse channel along with
the time averaged power spectra of the X and Y feeds
'''
#Get ON-OFF ND spectra
Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs)
obs = Waterfall(dio_cross,max_load=150)
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Get phase offsets and convert to degrees
coarse_G = gain_offsets(I,Q,U,V,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
coarse_freqs = convert_to_coarse(freqs,dio_chan_per_coarse)
#Get X and Y spectra for the noise diode ON and OFF
#If using circular feeds these correspond to LL and RR
XX_OFF,XX_ON = foldcal(np.expand_dims(data[:,0,:],axis=1),tsamp,**kwargs)
YY_OFF,YY_ON = foldcal(np.expand_dims(data[:,1,:],axis=1),tsamp,**kwargs)
if ax1==None:
plt.subplot(211)
else:
axG = plt.axes(ax1)
plt.setp(axG.get_xticklabels(),visible=False)
plt.plot(coarse_freqs,coarse_G,'ko',markersize=2)
plt.ylabel(r'$\frac{\Delta G}{2}$',rotation=90)
if feedtype=='l':
plt.title('XY Gain Difference')
if feedtype=='c':
plt.title('LR Gain Difference')
plt.grid(True)
if ax2==None:
plt.subplot(212)
else:
axXY = plt.axes(ax2,sharex=axG)
if feedtype=='l':
plt.plot(freqs,XX_OFF,'b-',label='XX')
plt.plot(freqs,YY_OFF,'r-',label='YY')
if feedtype=='c':
plt.plot(freqs,XX_OFF,'b-',label='LL')
plt.plot(freqs,YY_OFF,'r-',label='RR')
plt.xlabel('Frequency (MHz)')
plt.ylabel('Power (Counts)')
if legend==True:
plt.legend() | Plots the calculated gain offsets of each coarse channel along with
the time averaged power spectra of the X and Y feeds | Below is the the instruction that describes the task:
### Input:
Plots the calculated gain offsets of each coarse channel along with
the time averaged power spectra of the X and Y feeds
### Response:
def plot_gain_offsets(dio_cross,dio_chan_per_coarse=8,feedtype='l',ax1=None,ax2=None,legend=True,**kwargs):
'''
Plots the calculated gain offsets of each coarse channel along with
the time averaged power spectra of the X and Y feeds
'''
#Get ON-OFF ND spectra
Idiff,Qdiff,Udiff,Vdiff,freqs = get_diff(dio_cross,feedtype,**kwargs)
obs = Waterfall(dio_cross,max_load=150)
tsamp = obs.header['tsamp']
data = obs.data
obs = None
I,Q,U,V = get_stokes(data,feedtype)
#Get phase offsets and convert to degrees
coarse_G = gain_offsets(I,Q,U,V,tsamp,dio_chan_per_coarse,feedtype,**kwargs)
coarse_freqs = convert_to_coarse(freqs,dio_chan_per_coarse)
#Get X and Y spectra for the noise diode ON and OFF
#If using circular feeds these correspond to LL and RR
XX_OFF,XX_ON = foldcal(np.expand_dims(data[:,0,:],axis=1),tsamp,**kwargs)
YY_OFF,YY_ON = foldcal(np.expand_dims(data[:,1,:],axis=1),tsamp,**kwargs)
if ax1==None:
plt.subplot(211)
else:
axG = plt.axes(ax1)
plt.setp(axG.get_xticklabels(),visible=False)
plt.plot(coarse_freqs,coarse_G,'ko',markersize=2)
plt.ylabel(r'$\frac{\Delta G}{2}$',rotation=90)
if feedtype=='l':
plt.title('XY Gain Difference')
if feedtype=='c':
plt.title('LR Gain Difference')
plt.grid(True)
if ax2==None:
plt.subplot(212)
else:
axXY = plt.axes(ax2,sharex=axG)
if feedtype=='l':
plt.plot(freqs,XX_OFF,'b-',label='XX')
plt.plot(freqs,YY_OFF,'r-',label='YY')
if feedtype=='c':
plt.plot(freqs,XX_OFF,'b-',label='LL')
plt.plot(freqs,YY_OFF,'r-',label='RR')
plt.xlabel('Frequency (MHz)')
plt.ylabel('Power (Counts)')
if legend==True:
plt.legend() |
def infer_paths(output_dir, **subdirs):
"""Infers standard paths to policy and model directories.
Example:
>>> infer_paths("/some/output/dir/", policy="", model="custom/path")
{"policy": "/some/output/dir/policy", "model": "custom/path",
"output_dir":"/some/output/dir/"}
Args:
output_dir: output directory.
**subdirs: sub-directories.
Returns:
a dictionary with the directories.
"""
directories = {}
for name, path in six.iteritems(subdirs):
directories[name] = path if path else os.path.join(output_dir, name)
directories["output_dir"] = output_dir
return directories | Infers standard paths to policy and model directories.
Example:
>>> infer_paths("/some/output/dir/", policy="", model="custom/path")
{"policy": "/some/output/dir/policy", "model": "custom/path",
"output_dir":"/some/output/dir/"}
Args:
output_dir: output directory.
**subdirs: sub-directories.
Returns:
a dictionary with the directories. | Below is the the instruction that describes the task:
### Input:
Infers standard paths to policy and model directories.
Example:
>>> infer_paths("/some/output/dir/", policy="", model="custom/path")
{"policy": "/some/output/dir/policy", "model": "custom/path",
"output_dir":"/some/output/dir/"}
Args:
output_dir: output directory.
**subdirs: sub-directories.
Returns:
a dictionary with the directories.
### Response:
def infer_paths(output_dir, **subdirs):
"""Infers standard paths to policy and model directories.
Example:
>>> infer_paths("/some/output/dir/", policy="", model="custom/path")
{"policy": "/some/output/dir/policy", "model": "custom/path",
"output_dir":"/some/output/dir/"}
Args:
output_dir: output directory.
**subdirs: sub-directories.
Returns:
a dictionary with the directories.
"""
directories = {}
for name, path in six.iteritems(subdirs):
directories[name] = path if path else os.path.join(output_dir, name)
directories["output_dir"] = output_dir
return directories |
def from_description(cls, table):
""" Factory method that uses the dynamo3 'describe' return value """
throughput = table.provisioned_throughput
attrs = {}
for data in getattr(table, "attribute_definitions", []):
field = TableField(data["AttributeName"], TYPES_REV[data["AttributeType"]])
attrs[field.name] = field
for data in getattr(table, "key_schema", []):
name = data["AttributeName"]
attrs[name].key_type = data["KeyType"]
for index in getattr(table, "local_secondary_indexes", []):
for data in index["KeySchema"]:
if data["KeyType"] == "RANGE":
name = data["AttributeName"]
index_type = index["Projection"]["ProjectionType"]
includes = index["Projection"].get("NonKeyAttributes")
attrs[name] = attrs[name].to_index(
index_type, index["IndexName"], includes
)
break
global_indexes = {}
for index in getattr(table, "global_secondary_indexes", []):
idx = GlobalIndex.from_description(index, attrs)
global_indexes[idx.name] = idx
return cls(
table,
attrs,
global_indexes,
throughput["ReadCapacityUnits"],
throughput["WriteCapacityUnits"],
throughput["NumberOfDecreasesToday"],
table.table_size_bytes,
) | Factory method that uses the dynamo3 'describe' return value | Below is the the instruction that describes the task:
### Input:
Factory method that uses the dynamo3 'describe' return value
### Response:
def from_description(cls, table):
""" Factory method that uses the dynamo3 'describe' return value """
throughput = table.provisioned_throughput
attrs = {}
for data in getattr(table, "attribute_definitions", []):
field = TableField(data["AttributeName"], TYPES_REV[data["AttributeType"]])
attrs[field.name] = field
for data in getattr(table, "key_schema", []):
name = data["AttributeName"]
attrs[name].key_type = data["KeyType"]
for index in getattr(table, "local_secondary_indexes", []):
for data in index["KeySchema"]:
if data["KeyType"] == "RANGE":
name = data["AttributeName"]
index_type = index["Projection"]["ProjectionType"]
includes = index["Projection"].get("NonKeyAttributes")
attrs[name] = attrs[name].to_index(
index_type, index["IndexName"], includes
)
break
global_indexes = {}
for index in getattr(table, "global_secondary_indexes", []):
idx = GlobalIndex.from_description(index, attrs)
global_indexes[idx.name] = idx
return cls(
table,
attrs,
global_indexes,
throughput["ReadCapacityUnits"],
throughput["WriteCapacityUnits"],
throughput["NumberOfDecreasesToday"],
table.table_size_bytes,
) |
def load_unicode(self, resource_path):
"""
Gets the content of a resource
"""
resource_content = pkg_resources.resource_string(self.module_name, resource_path)
return resource_content.decode('utf-8') | Gets the content of a resource | Below is the the instruction that describes the task:
### Input:
Gets the content of a resource
### Response:
def load_unicode(self, resource_path):
"""
Gets the content of a resource
"""
resource_content = pkg_resources.resource_string(self.module_name, resource_path)
return resource_content.decode('utf-8') |
def setup_privnet(self, host=None):
"""
Load settings from the privnet JSON config file
Args:
host (string, optional): if supplied, uses this IP or domain as neo nodes. The host must
use these standard ports: P2P 20333, RPC 30333.
"""
self.setup(FILENAME_SETTINGS_PRIVNET)
if isinstance(host, str):
if ":" in host:
raise Exception("No protocol prefix or port allowed in host, use just the IP or domain.")
print("Using custom privatenet host:", host)
self.SEED_LIST = ["%s:20333" % host]
self.RPC_LIST = ["http://%s:30333" % host]
print("- P2P:", ", ".join(self.SEED_LIST))
print("- RPC:", ", ".join(self.RPC_LIST))
self.check_privatenet() | Load settings from the privnet JSON config file
Args:
host (string, optional): if supplied, uses this IP or domain as neo nodes. The host must
use these standard ports: P2P 20333, RPC 30333. | Below is the the instruction that describes the task:
### Input:
Load settings from the privnet JSON config file
Args:
host (string, optional): if supplied, uses this IP or domain as neo nodes. The host must
use these standard ports: P2P 20333, RPC 30333.
### Response:
def setup_privnet(self, host=None):
"""
Load settings from the privnet JSON config file
Args:
host (string, optional): if supplied, uses this IP or domain as neo nodes. The host must
use these standard ports: P2P 20333, RPC 30333.
"""
self.setup(FILENAME_SETTINGS_PRIVNET)
if isinstance(host, str):
if ":" in host:
raise Exception("No protocol prefix or port allowed in host, use just the IP or domain.")
print("Using custom privatenet host:", host)
self.SEED_LIST = ["%s:20333" % host]
self.RPC_LIST = ["http://%s:30333" % host]
print("- P2P:", ", ".join(self.SEED_LIST))
print("- RPC:", ", ".join(self.RPC_LIST))
self.check_privatenet() |
def WheelDown(self, wheelTimes: int = 1, interval: float = 0.05, waitTime: float = OPERATION_WAIT_TIME) -> None:
"""
Make control have focus first, move cursor to center and mouse wheel down.
wheelTimes: int.
interval: float.
waitTime: float.
"""
x, y = GetCursorPos()
self.SetFocus()
self.MoveCursorToMyCenter(False)
WheelDown(wheelTimes, interval, waitTime)
SetCursorPos(x, y) | Make control have focus first, move cursor to center and mouse wheel down.
wheelTimes: int.
interval: float.
waitTime: float. | Below is the the instruction that describes the task:
### Input:
Make control have focus first, move cursor to center and mouse wheel down.
wheelTimes: int.
interval: float.
waitTime: float.
### Response:
def WheelDown(self, wheelTimes: int = 1, interval: float = 0.05, waitTime: float = OPERATION_WAIT_TIME) -> None:
"""
Make control have focus first, move cursor to center and mouse wheel down.
wheelTimes: int.
interval: float.
waitTime: float.
"""
x, y = GetCursorPos()
self.SetFocus()
self.MoveCursorToMyCenter(False)
WheelDown(wheelTimes, interval, waitTime)
SetCursorPos(x, y) |
def create_game(self, map_name):
"""Create a game for the agents to join.
Args:
map_name: The map to use.
"""
map_inst = maps.get(map_name)
map_data = map_inst.data(self._run_config)
if map_name not in self._saved_maps:
for controller in self._controllers:
controller.save_map(map_inst.path, map_data)
self._saved_maps.add(map_name)
# Form the create game message.
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(map_path=map_inst.path),
disable_fog=False)
# Set up for two agents.
for _ in range(self._num_agents):
create.player_setup.add(type=sc_pb.Participant)
# Create the game.
self._controllers[0].create_game(create) | Create a game for the agents to join.
Args:
map_name: The map to use. | Below is the the instruction that describes the task:
### Input:
Create a game for the agents to join.
Args:
map_name: The map to use.
### Response:
def create_game(self, map_name):
"""Create a game for the agents to join.
Args:
map_name: The map to use.
"""
map_inst = maps.get(map_name)
map_data = map_inst.data(self._run_config)
if map_name not in self._saved_maps:
for controller in self._controllers:
controller.save_map(map_inst.path, map_data)
self._saved_maps.add(map_name)
# Form the create game message.
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(map_path=map_inst.path),
disable_fog=False)
# Set up for two agents.
for _ in range(self._num_agents):
create.player_setup.add(type=sc_pb.Participant)
# Create the game.
self._controllers[0].create_game(create) |
def get_bounds(pts):
"""Return the minimum point and maximum point bounding a
set of points."""
pts_t = np.asarray(pts).T
return np.asarray(([np.min(_pts) for _pts in pts_t],
[np.max(_pts) for _pts in pts_t])) | Return the minimum point and maximum point bounding a
set of points. | Below is the the instruction that describes the task:
### Input:
Return the minimum point and maximum point bounding a
set of points.
### Response:
def get_bounds(pts):
"""Return the minimum point and maximum point bounding a
set of points."""
pts_t = np.asarray(pts).T
return np.asarray(([np.min(_pts) for _pts in pts_t],
[np.max(_pts) for _pts in pts_t])) |
def _log_failed_submission(self, data):
"""
Log a reasonable representation of an event that should have been sent
to Sentry
"""
message = data.pop('message', '<no message value>')
output = [message]
if 'exception' in data and 'stacktrace' in data['exception']['values'][-1]:
# try to reconstruct a reasonable version of the exception
for frame in data['exception']['values'][-1]['stacktrace'].get('frames', []):
output.append(' File "%(fn)s", line %(lineno)s, in %(func)s' % {
'fn': frame.get('filename', 'unknown_filename'),
'lineno': frame.get('lineno', -1),
'func': frame.get('function', 'unknown_function'),
})
self.uncaught_logger.error(output) | Log a reasonable representation of an event that should have been sent
to Sentry | Below is the the instruction that describes the task:
### Input:
Log a reasonable representation of an event that should have been sent
to Sentry
### Response:
def _log_failed_submission(self, data):
"""
Log a reasonable representation of an event that should have been sent
to Sentry
"""
message = data.pop('message', '<no message value>')
output = [message]
if 'exception' in data and 'stacktrace' in data['exception']['values'][-1]:
# try to reconstruct a reasonable version of the exception
for frame in data['exception']['values'][-1]['stacktrace'].get('frames', []):
output.append(' File "%(fn)s", line %(lineno)s, in %(func)s' % {
'fn': frame.get('filename', 'unknown_filename'),
'lineno': frame.get('lineno', -1),
'func': frame.get('function', 'unknown_function'),
})
self.uncaught_logger.error(output) |
def is_occupied(self, address):
"""
Check if an address belongs to any segment
:param address: The address to check
:return: True if this address belongs to a segment, False otherwise
"""
idx = self._search(address)
if len(self._list) <= idx:
return False
if self._list[idx].start <= address < self._list[idx].end:
return True
if idx > 0 and address < self._list[idx - 1].end:
# TODO: It seems that this branch is never reached. Should it be removed?
return True
return False | Check if an address belongs to any segment
:param address: The address to check
:return: True if this address belongs to a segment, False otherwise | Below is the the instruction that describes the task:
### Input:
Check if an address belongs to any segment
:param address: The address to check
:return: True if this address belongs to a segment, False otherwise
### Response:
def is_occupied(self, address):
"""
Check if an address belongs to any segment
:param address: The address to check
:return: True if this address belongs to a segment, False otherwise
"""
idx = self._search(address)
if len(self._list) <= idx:
return False
if self._list[idx].start <= address < self._list[idx].end:
return True
if idx > 0 and address < self._list[idx - 1].end:
# TODO: It seems that this branch is never reached. Should it be removed?
return True
return False |
def fixed_legend_filter_field(self, fixed_legend_filter_field):
"""Sets the fixed_legend_filter_field of this ChartSettings.
Statistic to use for determining whether a series is displayed on the fixed legend # noqa: E501
:param fixed_legend_filter_field: The fixed_legend_filter_field of this ChartSettings. # noqa: E501
:type: str
"""
allowed_values = ["CURRENT", "MEAN", "MEDIAN", "SUM", "MIN", "MAX", "COUNT"] # noqa: E501
if fixed_legend_filter_field not in allowed_values:
raise ValueError(
"Invalid value for `fixed_legend_filter_field` ({0}), must be one of {1}" # noqa: E501
.format(fixed_legend_filter_field, allowed_values)
)
self._fixed_legend_filter_field = fixed_legend_filter_field | Sets the fixed_legend_filter_field of this ChartSettings.
Statistic to use for determining whether a series is displayed on the fixed legend # noqa: E501
:param fixed_legend_filter_field: The fixed_legend_filter_field of this ChartSettings. # noqa: E501
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the fixed_legend_filter_field of this ChartSettings.
Statistic to use for determining whether a series is displayed on the fixed legend # noqa: E501
:param fixed_legend_filter_field: The fixed_legend_filter_field of this ChartSettings. # noqa: E501
:type: str
### Response:
def fixed_legend_filter_field(self, fixed_legend_filter_field):
"""Sets the fixed_legend_filter_field of this ChartSettings.
Statistic to use for determining whether a series is displayed on the fixed legend # noqa: E501
:param fixed_legend_filter_field: The fixed_legend_filter_field of this ChartSettings. # noqa: E501
:type: str
"""
allowed_values = ["CURRENT", "MEAN", "MEDIAN", "SUM", "MIN", "MAX", "COUNT"] # noqa: E501
if fixed_legend_filter_field not in allowed_values:
raise ValueError(
"Invalid value for `fixed_legend_filter_field` ({0}), must be one of {1}" # noqa: E501
.format(fixed_legend_filter_field, allowed_values)
)
self._fixed_legend_filter_field = fixed_legend_filter_field |
def create_legacy_pad(scope, input_name, output_name, H_in, W_in, k_h, k_w,
s_h, s_w, p_h, p_w, padded_value, container):
'''
This function adds one Pad operator into its last argument, which is a Container object. By feeding the output of
the created Pad operator into Pool operator under valid padding mode, we can achieve the same functionality of
CoreML' pooling under IncludeLastPixel padding mode.
:param scope:
:param input_name:
:param output_name:
:param H_in: input dimension along H-axis
:param W_in: input dimension along W-axis
:param k_h: kernel's H-axis dimension
:param k_w: kernel's W-axis dimension
:param s_h: stride along H-axis
:param s_w: stride along W-axis
:param p_h: padding amount at the beginning and the end of H-axis
:param p_w: padding amount at the beginning and the end of W-axis
:param padded_value: value used to fill padded area
:param container: Container object
'''
# Add a Pad operator to pre-process 4-D tensor
pad_t, pad_b = calculate_legacy_pad_amount(H_in, p_h, k_h, s_h)
pad_l, pad_r = calculate_legacy_pad_amount(W_in, p_w, k_w, s_w)
# CoreML pooling operator pads only their H- and W-axes. Here we assume the shape of the tensor to be padded
# is [N, C, H, W], so we have 8 padding amounts
# pads = [N_begin_index, C_begin_index, H_begin_index, W_begin_index,
# N_end_index, C_end_index, H_end_index, W_end_index]
# Because only H- and W-axes are padded in CoreML, we leave padding amounts of N- and C-axes zeros.
pads = [0, 0, pad_t, pad_l, 0, 0, pad_b, pad_r]
apply_pad(scope, input_name, output_name, container, pads=pads, value=padded_value) | This function adds one Pad operator into its last argument, which is a Container object. By feeding the output of
the created Pad operator into Pool operator under valid padding mode, we can achieve the same functionality of
CoreML' pooling under IncludeLastPixel padding mode.
:param scope:
:param input_name:
:param output_name:
:param H_in: input dimension along H-axis
:param W_in: input dimension along W-axis
:param k_h: kernel's H-axis dimension
:param k_w: kernel's W-axis dimension
:param s_h: stride along H-axis
:param s_w: stride along W-axis
:param p_h: padding amount at the beginning and the end of H-axis
:param p_w: padding amount at the beginning and the end of W-axis
:param padded_value: value used to fill padded area
:param container: Container object | Below is the the instruction that describes the task:
### Input:
This function adds one Pad operator into its last argument, which is a Container object. By feeding the output of
the created Pad operator into Pool operator under valid padding mode, we can achieve the same functionality of
CoreML' pooling under IncludeLastPixel padding mode.
:param scope:
:param input_name:
:param output_name:
:param H_in: input dimension along H-axis
:param W_in: input dimension along W-axis
:param k_h: kernel's H-axis dimension
:param k_w: kernel's W-axis dimension
:param s_h: stride along H-axis
:param s_w: stride along W-axis
:param p_h: padding amount at the beginning and the end of H-axis
:param p_w: padding amount at the beginning and the end of W-axis
:param padded_value: value used to fill padded area
:param container: Container object
### Response:
def create_legacy_pad(scope, input_name, output_name, H_in, W_in, k_h, k_w,
s_h, s_w, p_h, p_w, padded_value, container):
'''
This function adds one Pad operator into its last argument, which is a Container object. By feeding the output of
the created Pad operator into Pool operator under valid padding mode, we can achieve the same functionality of
CoreML' pooling under IncludeLastPixel padding mode.
:param scope:
:param input_name:
:param output_name:
:param H_in: input dimension along H-axis
:param W_in: input dimension along W-axis
:param k_h: kernel's H-axis dimension
:param k_w: kernel's W-axis dimension
:param s_h: stride along H-axis
:param s_w: stride along W-axis
:param p_h: padding amount at the beginning and the end of H-axis
:param p_w: padding amount at the beginning and the end of W-axis
:param padded_value: value used to fill padded area
:param container: Container object
'''
# Add a Pad operator to pre-process 4-D tensor
pad_t, pad_b = calculate_legacy_pad_amount(H_in, p_h, k_h, s_h)
pad_l, pad_r = calculate_legacy_pad_amount(W_in, p_w, k_w, s_w)
# CoreML pooling operator pads only their H- and W-axes. Here we assume the shape of the tensor to be padded
# is [N, C, H, W], so we have 8 padding amounts
# pads = [N_begin_index, C_begin_index, H_begin_index, W_begin_index,
# N_end_index, C_end_index, H_end_index, W_end_index]
# Because only H- and W-axes are padded in CoreML, we leave padding amounts of N- and C-axes zeros.
pads = [0, 0, pad_t, pad_l, 0, 0, pad_b, pad_r]
apply_pad(scope, input_name, output_name, container, pads=pads, value=padded_value) |
def to_fp(self, file_pointer, comments=None):
"""
The method can be used to save a CNF formula into a file pointer.
The file pointer is expected as an argument. Additionally,
supplementary comment lines can be specified in the ``comments``
parameter.
:param fname: a file name where to store the formula.
:param comments: additional comments to put in the file.
:type fname: str
:type comments: list(str)
Example:
.. code-block:: python
>>> from pysat.formula import CNF
>>> cnf = CNF()
...
>>> # the formula is filled with a bunch of clauses
>>> with open('some-file.cnf', 'w') as fp:
... cnf.to_fp(fp) # writing to the file pointer
"""
# saving formula's internal comments
for c in self.comments:
print(c, file=file_pointer)
# saving externally specified comments
if comments:
for c in comments:
print(c, file=file_pointer)
print('p cnf', self.nv, len(self.clauses), file=file_pointer)
for cl in self.clauses:
print(' '.join(str(l) for l in cl), '0', file=file_pointer) | The method can be used to save a CNF formula into a file pointer.
The file pointer is expected as an argument. Additionally,
supplementary comment lines can be specified in the ``comments``
parameter.
:param fname: a file name where to store the formula.
:param comments: additional comments to put in the file.
:type fname: str
:type comments: list(str)
Example:
.. code-block:: python
>>> from pysat.formula import CNF
>>> cnf = CNF()
...
>>> # the formula is filled with a bunch of clauses
>>> with open('some-file.cnf', 'w') as fp:
... cnf.to_fp(fp) # writing to the file pointer | Below is the the instruction that describes the task:
### Input:
The method can be used to save a CNF formula into a file pointer.
The file pointer is expected as an argument. Additionally,
supplementary comment lines can be specified in the ``comments``
parameter.
:param fname: a file name where to store the formula.
:param comments: additional comments to put in the file.
:type fname: str
:type comments: list(str)
Example:
.. code-block:: python
>>> from pysat.formula import CNF
>>> cnf = CNF()
...
>>> # the formula is filled with a bunch of clauses
>>> with open('some-file.cnf', 'w') as fp:
... cnf.to_fp(fp) # writing to the file pointer
### Response:
def to_fp(self, file_pointer, comments=None):
"""
The method can be used to save a CNF formula into a file pointer.
The file pointer is expected as an argument. Additionally,
supplementary comment lines can be specified in the ``comments``
parameter.
:param fname: a file name where to store the formula.
:param comments: additional comments to put in the file.
:type fname: str
:type comments: list(str)
Example:
.. code-block:: python
>>> from pysat.formula import CNF
>>> cnf = CNF()
...
>>> # the formula is filled with a bunch of clauses
>>> with open('some-file.cnf', 'w') as fp:
... cnf.to_fp(fp) # writing to the file pointer
"""
# saving formula's internal comments
for c in self.comments:
print(c, file=file_pointer)
# saving externally specified comments
if comments:
for c in comments:
print(c, file=file_pointer)
print('p cnf', self.nv, len(self.clauses), file=file_pointer)
for cl in self.clauses:
print(' '.join(str(l) for l in cl), '0', file=file_pointer) |
def update_template_version(
self,
name,
subject,
template_id,
version_id,
text='',
html=None,
timeout=None
):
""" API call to update a template version """
if(html):
payload = {
'name': name,
'subject': subject,
'html': html,
'text': text
}
else:
payload = {
'name': name,
'subject': subject,
'text': text
}
return self._api_request(
self.TEMPLATES_VERSION_ENDPOINT % (template_id, version_id),
self.HTTP_PUT,
payload=payload,
timeout=timeout
) | API call to update a template version | Below is the the instruction that describes the task:
### Input:
API call to update a template version
### Response:
def update_template_version(
self,
name,
subject,
template_id,
version_id,
text='',
html=None,
timeout=None
):
""" API call to update a template version """
if(html):
payload = {
'name': name,
'subject': subject,
'html': html,
'text': text
}
else:
payload = {
'name': name,
'subject': subject,
'text': text
}
return self._api_request(
self.TEMPLATES_VERSION_ENDPOINT % (template_id, version_id),
self.HTTP_PUT,
payload=payload,
timeout=timeout
) |
def delete_renditions_if_master_has_changed(sender, instance, **kwargs):
'''if master file as changed delete all renditions'''
try:
obj = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
pass # Object is new, so field hasn't technically changed.
else:
if not obj.master == instance.master: # Field has changed
obj.master.delete(save=False)
instance.delete_all_renditions() | if master file as changed delete all renditions | Below is the the instruction that describes the task:
### Input:
if master file as changed delete all renditions
### Response:
def delete_renditions_if_master_has_changed(sender, instance, **kwargs):
'''if master file as changed delete all renditions'''
try:
obj = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
pass # Object is new, so field hasn't technically changed.
else:
if not obj.master == instance.master: # Field has changed
obj.master.delete(save=False)
instance.delete_all_renditions() |
def invert_dictset(d):
"""Invert a dictionary with keys matching a set of values, turned into lists."""
# Based on recipe from ASPN
result = {}
for k, c in d.items():
for v in c:
keys = result.setdefault(v, [])
keys.append(k)
return result | Invert a dictionary with keys matching a set of values, turned into lists. | Below is the the instruction that describes the task:
### Input:
Invert a dictionary with keys matching a set of values, turned into lists.
### Response:
def invert_dictset(d):
"""Invert a dictionary with keys matching a set of values, turned into lists."""
# Based on recipe from ASPN
result = {}
for k, c in d.items():
for v in c:
keys = result.setdefault(v, [])
keys.append(k)
return result |
def __getNumberOfFollowers(self, web):
"""Scrap the number of followers from a GitHub profile.
:param web: parsed web.
:type web: BeautifulSoup node.
"""
counters = web.find_all('span', {'class': 'Counter'})
try:
if 'k' not in counters[2].text:
self.followers = int(counters[2].text)
else:
follText = counters[2].text.replace(" ", "")
follText = follText.replace("\n", "").replace("k", "")
if follText and len(follText) > 1:
self.followers = int(follText.split(".")[0])*1000 + \
int(follText.split(".")[1]) * 100
elif follText:
self.followers = int(follText.split(".")[0])*1000
except IndexError as error:
print("There was an error with the user " + self.name)
print(error)
except AttributeError as error:
print("There was an error with the user " + self.name)
print(error) | Scrap the number of followers from a GitHub profile.
:param web: parsed web.
:type web: BeautifulSoup node. | Below is the the instruction that describes the task:
### Input:
Scrap the number of followers from a GitHub profile.
:param web: parsed web.
:type web: BeautifulSoup node.
### Response:
def __getNumberOfFollowers(self, web):
"""Scrap the number of followers from a GitHub profile.
:param web: parsed web.
:type web: BeautifulSoup node.
"""
counters = web.find_all('span', {'class': 'Counter'})
try:
if 'k' not in counters[2].text:
self.followers = int(counters[2].text)
else:
follText = counters[2].text.replace(" ", "")
follText = follText.replace("\n", "").replace("k", "")
if follText and len(follText) > 1:
self.followers = int(follText.split(".")[0])*1000 + \
int(follText.split(".")[1]) * 100
elif follText:
self.followers = int(follText.split(".")[0])*1000
except IndexError as error:
print("There was an error with the user " + self.name)
print(error)
except AttributeError as error:
print("There was an error with the user " + self.name)
print(error) |
def ttl(self):
"""how long you should cache results for cacheable queries"""
ret = 3600
cn = self.get_process()
if "ttl" in cn:
ret = cn["ttl"]
return ret | how long you should cache results for cacheable queries | Below is the the instruction that describes the task:
### Input:
how long you should cache results for cacheable queries
### Response:
def ttl(self):
"""how long you should cache results for cacheable queries"""
ret = 3600
cn = self.get_process()
if "ttl" in cn:
ret = cn["ttl"]
return ret |
def _pop_key(self, key, has_default):
"""Remove an element by key."""
try:
index, value = self._dict.pop(key)
except KeyError:
if has_default:
return None, None
else:
raise
key2, value2 = self._list.pop(index)
assert key is key2
assert value is value2
return index, value | Remove an element by key. | Below is the the instruction that describes the task:
### Input:
Remove an element by key.
### Response:
def _pop_key(self, key, has_default):
"""Remove an element by key."""
try:
index, value = self._dict.pop(key)
except KeyError:
if has_default:
return None, None
else:
raise
key2, value2 = self._list.pop(index)
assert key is key2
assert value is value2
return index, value |
def check_syntax(string):
""" Check syntax of a string of PostgreSQL-dialect SQL """
args = ["ecpg", "-o", "-", "-"]
with open(os.devnull, "w") as devnull:
try:
proc = subprocess.Popen(args, shell=False,
stdout=devnull,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
_, err = proc.communicate(string)
except OSError:
msg = "Unable to execute 'ecpg', you likely need to install it.'"
raise OSError(msg)
if proc.returncode == 0:
return (True, "")
else:
return (False, parse_error(err)) | Check syntax of a string of PostgreSQL-dialect SQL | Below is the the instruction that describes the task:
### Input:
Check syntax of a string of PostgreSQL-dialect SQL
### Response:
def check_syntax(string):
""" Check syntax of a string of PostgreSQL-dialect SQL """
args = ["ecpg", "-o", "-", "-"]
with open(os.devnull, "w") as devnull:
try:
proc = subprocess.Popen(args, shell=False,
stdout=devnull,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
_, err = proc.communicate(string)
except OSError:
msg = "Unable to execute 'ecpg', you likely need to install it.'"
raise OSError(msg)
if proc.returncode == 0:
return (True, "")
else:
return (False, parse_error(err)) |
def _extract_program_from_pyquil_executable_response(response: PyQuilExecutableResponse) -> Program:
"""
Unpacks a rpcq PyQuilExecutableResponse object into a pyQuil Program object.
:param response: PyQuilExecutableResponse object to be unpacked.
:return: Resulting pyQuil Program object.
"""
p = Program(response.program)
for attr, val in response.attributes.items():
setattr(p, attr, val)
return p | Unpacks a rpcq PyQuilExecutableResponse object into a pyQuil Program object.
:param response: PyQuilExecutableResponse object to be unpacked.
:return: Resulting pyQuil Program object. | Below is the the instruction that describes the task:
### Input:
Unpacks a rpcq PyQuilExecutableResponse object into a pyQuil Program object.
:param response: PyQuilExecutableResponse object to be unpacked.
:return: Resulting pyQuil Program object.
### Response:
def _extract_program_from_pyquil_executable_response(response: PyQuilExecutableResponse) -> Program:
"""
Unpacks a rpcq PyQuilExecutableResponse object into a pyQuil Program object.
:param response: PyQuilExecutableResponse object to be unpacked.
:return: Resulting pyQuil Program object.
"""
p = Program(response.program)
for attr, val in response.attributes.items():
setattr(p, attr, val)
return p |
def process(self, metric):
"""
process a single metric
@type metric: diamond.metric.Metric
@param metric: metric to process
@rtype None
"""
for rule in self.rules:
rule.process(metric, self) | process a single metric
@type metric: diamond.metric.Metric
@param metric: metric to process
@rtype None | Below is the the instruction that describes the task:
### Input:
process a single metric
@type metric: diamond.metric.Metric
@param metric: metric to process
@rtype None
### Response:
def process(self, metric):
"""
process a single metric
@type metric: diamond.metric.Metric
@param metric: metric to process
@rtype None
"""
for rule in self.rules:
rule.process(metric, self) |
def _listen(self):
"""Listen for messages passed from parent
This method distributes messages received via stdin to their
corresponding channel. Based on the format of the incoming
message, the message is forwarded to its corresponding channel
to be processed by its corresponding handler.
"""
def _listen():
"""This runs in a thread"""
for line in iter(sys.stdin.readline, b""):
try:
response = json.loads(line)
except Exception as e:
# The parent has passed on a message that
# isn't formatted in any particular way.
# This is likely a bug.
raise e
else:
if response.get("header") == "pyblish-qml:popen.response":
self.channels["response"].put(line)
elif response.get("header") == "pyblish-qml:popen.parent":
self.channels["parent"].put(line)
elif response.get("header") == "pyblish-qml:server.pulse":
self._kill.cancel() # reset timer
self._self_destruct()
else:
# The parent has passed on a message that
# is JSON, but not in any format we recognise.
# This is likely a bug.
raise Exception("Unhandled message "
"passed to Popen, '%s'" % line)
thread = threading.Thread(target=_listen)
thread.daemon = True
thread.start() | Listen for messages passed from parent
This method distributes messages received via stdin to their
corresponding channel. Based on the format of the incoming
message, the message is forwarded to its corresponding channel
to be processed by its corresponding handler. | Below is the the instruction that describes the task:
### Input:
Listen for messages passed from parent
This method distributes messages received via stdin to their
corresponding channel. Based on the format of the incoming
message, the message is forwarded to its corresponding channel
to be processed by its corresponding handler.
### Response:
def _listen(self):
"""Listen for messages passed from parent
This method distributes messages received via stdin to their
corresponding channel. Based on the format of the incoming
message, the message is forwarded to its corresponding channel
to be processed by its corresponding handler.
"""
def _listen():
"""This runs in a thread"""
for line in iter(sys.stdin.readline, b""):
try:
response = json.loads(line)
except Exception as e:
# The parent has passed on a message that
# isn't formatted in any particular way.
# This is likely a bug.
raise e
else:
if response.get("header") == "pyblish-qml:popen.response":
self.channels["response"].put(line)
elif response.get("header") == "pyblish-qml:popen.parent":
self.channels["parent"].put(line)
elif response.get("header") == "pyblish-qml:server.pulse":
self._kill.cancel() # reset timer
self._self_destruct()
else:
# The parent has passed on a message that
# is JSON, but not in any format we recognise.
# This is likely a bug.
raise Exception("Unhandled message "
"passed to Popen, '%s'" % line)
thread = threading.Thread(target=_listen)
thread.daemon = True
thread.start() |
def load_mozlz4(self, path):
"""Load a Mozilla LZ4 file from the user profile.
Mozilla LZ4 is regular LZ4 with a custom string prefix.
"""
with open(self.profile_path(path, must_exist=True), 'rb') as f:
if f.read(8) != b'mozLz40\0':
raise NotMozLz4Error('Not Mozilla LZ4 format.')
data = lz4.block.decompress(f.read())
return data | Load a Mozilla LZ4 file from the user profile.
Mozilla LZ4 is regular LZ4 with a custom string prefix. | Below is the the instruction that describes the task:
### Input:
Load a Mozilla LZ4 file from the user profile.
Mozilla LZ4 is regular LZ4 with a custom string prefix.
### Response:
def load_mozlz4(self, path):
"""Load a Mozilla LZ4 file from the user profile.
Mozilla LZ4 is regular LZ4 with a custom string prefix.
"""
with open(self.profile_path(path, must_exist=True), 'rb') as f:
if f.read(8) != b'mozLz40\0':
raise NotMozLz4Error('Not Mozilla LZ4 format.')
data = lz4.block.decompress(f.read())
return data |
def get_uid(path, follow_symlinks=True):
'''
Return the id of the user that owns a given file
Symlinks are followed by default to mimic Unix behavior. Specify
`follow_symlinks=False` to turn off this behavior.
Args:
path (str): The path to the file or directory
follow_symlinks (bool):
If the object specified by ``path`` is a symlink, get attributes of
the linked file instead of the symlink itself. Default is True
Returns:
str: The uid of the owner
CLI Example:
.. code-block:: bash
salt '*' file.get_uid c:\\temp\\test.txt
salt '*' file.get_uid c:\\temp\\test.txt follow_symlinks=False
'''
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
# Under Windows, if the path is a symlink, the user that owns the symlink is
# returned, not the user that owns the file/directory the symlink is
# pointing to. This behavior is *different* to *nix, therefore the symlink
# is first resolved manually if necessary. Remember symlinks are only
# supported on Windows Vista or later.
if follow_symlinks and sys.getwindowsversion().major >= 6:
path = _resolve_symlink(path)
owner_sid = salt.utils.win_dacl.get_owner(path)
return salt.utils.win_dacl.get_sid_string(owner_sid) | Return the id of the user that owns a given file
Symlinks are followed by default to mimic Unix behavior. Specify
`follow_symlinks=False` to turn off this behavior.
Args:
path (str): The path to the file or directory
follow_symlinks (bool):
If the object specified by ``path`` is a symlink, get attributes of
the linked file instead of the symlink itself. Default is True
Returns:
str: The uid of the owner
CLI Example:
.. code-block:: bash
salt '*' file.get_uid c:\\temp\\test.txt
salt '*' file.get_uid c:\\temp\\test.txt follow_symlinks=False | Below is the the instruction that describes the task:
### Input:
Return the id of the user that owns a given file
Symlinks are followed by default to mimic Unix behavior. Specify
`follow_symlinks=False` to turn off this behavior.
Args:
path (str): The path to the file or directory
follow_symlinks (bool):
If the object specified by ``path`` is a symlink, get attributes of
the linked file instead of the symlink itself. Default is True
Returns:
str: The uid of the owner
CLI Example:
.. code-block:: bash
salt '*' file.get_uid c:\\temp\\test.txt
salt '*' file.get_uid c:\\temp\\test.txt follow_symlinks=False
### Response:
def get_uid(path, follow_symlinks=True):
'''
Return the id of the user that owns a given file
Symlinks are followed by default to mimic Unix behavior. Specify
`follow_symlinks=False` to turn off this behavior.
Args:
path (str): The path to the file or directory
follow_symlinks (bool):
If the object specified by ``path`` is a symlink, get attributes of
the linked file instead of the symlink itself. Default is True
Returns:
str: The uid of the owner
CLI Example:
.. code-block:: bash
salt '*' file.get_uid c:\\temp\\test.txt
salt '*' file.get_uid c:\\temp\\test.txt follow_symlinks=False
'''
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
# Under Windows, if the path is a symlink, the user that owns the symlink is
# returned, not the user that owns the file/directory the symlink is
# pointing to. This behavior is *different* to *nix, therefore the symlink
# is first resolved manually if necessary. Remember symlinks are only
# supported on Windows Vista or later.
if follow_symlinks and sys.getwindowsversion().major >= 6:
path = _resolve_symlink(path)
owner_sid = salt.utils.win_dacl.get_owner(path)
return salt.utils.win_dacl.get_sid_string(owner_sid) |
def run(self, args):
"""Current line number in source file"""
# info line identifier
if not self.proc.curframe:
self.errmsg("No line number information available.")
return
if len(args) == 3:
# lineinfo returns (item, file, lineno) or (None,)
answer = self.lineinfo(args[2])
if answer[0]:
item, filename, lineno = answer
if not os.path.isfile(filename):
filename = Mclifns.search_file(filename,
self.core.search_path,
self.main_dirname)
self.msg('Line %s of "%s" <%s>' %
(lineno, filename, item))
return
filename=self.core.canonic_filename(self.proc.curframe)
if not os.path.isfile(filename):
filename = Mclifns.search_file(filename, self.core.search_path,
self.main_dirname)
pass
filename = self.core.canonic_filename(self.proc.curframe)
msg1 = 'Line %d of \"%s\"' % (inspect.getlineno(self.proc.curframe),
self.core.filename(filename))
msg2 = ('at instruction %d' % self.proc.curframe.f_lasti)
if self.proc.event:
msg2 += ', %s event' % self.proc.event
pass
self.msg(Mmisc.wrapped_lines(msg1, msg2, self.settings['width']))
return False | Current line number in source file | Below is the the instruction that describes the task:
### Input:
Current line number in source file
### Response:
def run(self, args):
"""Current line number in source file"""
# info line identifier
if not self.proc.curframe:
self.errmsg("No line number information available.")
return
if len(args) == 3:
# lineinfo returns (item, file, lineno) or (None,)
answer = self.lineinfo(args[2])
if answer[0]:
item, filename, lineno = answer
if not os.path.isfile(filename):
filename = Mclifns.search_file(filename,
self.core.search_path,
self.main_dirname)
self.msg('Line %s of "%s" <%s>' %
(lineno, filename, item))
return
filename=self.core.canonic_filename(self.proc.curframe)
if not os.path.isfile(filename):
filename = Mclifns.search_file(filename, self.core.search_path,
self.main_dirname)
pass
filename = self.core.canonic_filename(self.proc.curframe)
msg1 = 'Line %d of \"%s\"' % (inspect.getlineno(self.proc.curframe),
self.core.filename(filename))
msg2 = ('at instruction %d' % self.proc.curframe.f_lasti)
if self.proc.event:
msg2 += ', %s event' % self.proc.event
pass
self.msg(Mmisc.wrapped_lines(msg1, msg2, self.settings['width']))
return False |
def send(self, stack: Layers):
"""
Add a message stack to the send list.
"""
if not isinstance(stack, Stack):
stack = Stack(stack)
if not self.platform.accept(stack):
raise UnacceptableStack('The platform does not allow "{}"'
.format(stack.describe()))
self._stacks.append(stack) | Add a message stack to the send list. | Below is the the instruction that describes the task:
### Input:
Add a message stack to the send list.
### Response:
def send(self, stack: Layers):
"""
Add a message stack to the send list.
"""
if not isinstance(stack, Stack):
stack = Stack(stack)
if not self.platform.accept(stack):
raise UnacceptableStack('The platform does not allow "{}"'
.format(stack.describe()))
self._stacks.append(stack) |
def _sample_action_fluent(self,
name: str,
dtype: tf.DType,
size: Sequence[int],
constraints: Dict[str, Constraints],
default_value: tf.Tensor,
prob: float) -> tf.Tensor:
'''Samples the action fluent with given `name`, `dtype`, and `size`.
With probability `prob` it chooses the action fluent `default_value`,
with probability 1-`prob` it samples the fluent w.r.t. its `constraints`.
Args:
name (str): The name of the action fluent.
dtype (tf.DType): The data type of the action fluent.
size (Sequence[int]): The size and shape of the action fluent.
constraints (Dict[str, Tuple[Optional[TensorFluent], Optional[TensorFluent]]]): The bounds for each action fluent.
default_value (tf.Tensor): The default value for the action fluent.
prob (float): A probability measure.
Returns:
tf.Tensor: A tensor for sampling the action fluent.
'''
shape = [self.batch_size] + list(size)
if dtype == tf.float32:
bounds = constraints.get(name)
if bounds is None:
low, high = -self.MAX_REAL_VALUE, self.MAX_REAL_VALUE
dist = tf.distributions.Uniform(low=low, high=high)
sampled_fluent = dist.sample(shape)
else:
low, high = bounds
batch = (low is not None and low.batch) or (high is not None and high.batch)
low = tf.cast(low.tensor, tf.float32) if low is not None else -self.MAX_REAL_VALUE
high = tf.cast(high.tensor, tf.float32) if high is not None else self.MAX_REAL_VALUE
dist = tf.distributions.Uniform(low=low, high=high)
if batch:
sampled_fluent = dist.sample()
elif isinstance(low, tf.Tensor) or isinstance(high, tf.Tensor):
if (low+high).shape.as_list() == list(size):
sampled_fluent = dist.sample([self.batch_size])
else:
raise ValueError('bounds are not compatible with action fluent.')
else:
sampled_fluent = dist.sample(shape)
elif dtype == tf.int32:
logits = [1.0] * self.MAX_INT_VALUE
dist = tf.distributions.Categorical(logits=logits, dtype=tf.int32)
sampled_fluent = dist.sample(shape)
elif dtype == tf.bool:
probs = 0.5
dist = tf.distributions.Bernoulli(probs=probs, dtype=tf.bool)
sampled_fluent = dist.sample(shape)
select_default = tf.distributions.Bernoulli(prob, dtype=tf.bool).sample(self.batch_size)
action_fluent = tf.where(select_default, default_value, sampled_fluent)
return action_fluent | Samples the action fluent with given `name`, `dtype`, and `size`.
With probability `prob` it chooses the action fluent `default_value`,
with probability 1-`prob` it samples the fluent w.r.t. its `constraints`.
Args:
name (str): The name of the action fluent.
dtype (tf.DType): The data type of the action fluent.
size (Sequence[int]): The size and shape of the action fluent.
constraints (Dict[str, Tuple[Optional[TensorFluent], Optional[TensorFluent]]]): The bounds for each action fluent.
default_value (tf.Tensor): The default value for the action fluent.
prob (float): A probability measure.
Returns:
tf.Tensor: A tensor for sampling the action fluent. | Below is the the instruction that describes the task:
### Input:
Samples the action fluent with given `name`, `dtype`, and `size`.
With probability `prob` it chooses the action fluent `default_value`,
with probability 1-`prob` it samples the fluent w.r.t. its `constraints`.
Args:
name (str): The name of the action fluent.
dtype (tf.DType): The data type of the action fluent.
size (Sequence[int]): The size and shape of the action fluent.
constraints (Dict[str, Tuple[Optional[TensorFluent], Optional[TensorFluent]]]): The bounds for each action fluent.
default_value (tf.Tensor): The default value for the action fluent.
prob (float): A probability measure.
Returns:
tf.Tensor: A tensor for sampling the action fluent.
### Response:
def _sample_action_fluent(self,
name: str,
dtype: tf.DType,
size: Sequence[int],
constraints: Dict[str, Constraints],
default_value: tf.Tensor,
prob: float) -> tf.Tensor:
'''Samples the action fluent with given `name`, `dtype`, and `size`.
With probability `prob` it chooses the action fluent `default_value`,
with probability 1-`prob` it samples the fluent w.r.t. its `constraints`.
Args:
name (str): The name of the action fluent.
dtype (tf.DType): The data type of the action fluent.
size (Sequence[int]): The size and shape of the action fluent.
constraints (Dict[str, Tuple[Optional[TensorFluent], Optional[TensorFluent]]]): The bounds for each action fluent.
default_value (tf.Tensor): The default value for the action fluent.
prob (float): A probability measure.
Returns:
tf.Tensor: A tensor for sampling the action fluent.
'''
shape = [self.batch_size] + list(size)
if dtype == tf.float32:
bounds = constraints.get(name)
if bounds is None:
low, high = -self.MAX_REAL_VALUE, self.MAX_REAL_VALUE
dist = tf.distributions.Uniform(low=low, high=high)
sampled_fluent = dist.sample(shape)
else:
low, high = bounds
batch = (low is not None and low.batch) or (high is not None and high.batch)
low = tf.cast(low.tensor, tf.float32) if low is not None else -self.MAX_REAL_VALUE
high = tf.cast(high.tensor, tf.float32) if high is not None else self.MAX_REAL_VALUE
dist = tf.distributions.Uniform(low=low, high=high)
if batch:
sampled_fluent = dist.sample()
elif isinstance(low, tf.Tensor) or isinstance(high, tf.Tensor):
if (low+high).shape.as_list() == list(size):
sampled_fluent = dist.sample([self.batch_size])
else:
raise ValueError('bounds are not compatible with action fluent.')
else:
sampled_fluent = dist.sample(shape)
elif dtype == tf.int32:
logits = [1.0] * self.MAX_INT_VALUE
dist = tf.distributions.Categorical(logits=logits, dtype=tf.int32)
sampled_fluent = dist.sample(shape)
elif dtype == tf.bool:
probs = 0.5
dist = tf.distributions.Bernoulli(probs=probs, dtype=tf.bool)
sampled_fluent = dist.sample(shape)
select_default = tf.distributions.Bernoulli(prob, dtype=tf.bool).sample(self.batch_size)
action_fluent = tf.where(select_default, default_value, sampled_fluent)
return action_fluent |
def taper_rate(p0, p1):
'''Compute the taper rate between points p0 and p1
Args:
p0, p1: iterables with first 4 components containing (x, y, z, r)
Returns:
The taper rate, defined as the absolute value of the difference in
the diameters of p0 and p1 divided by the euclidian distance
between them.
'''
return 2 * abs(p0[COLS.R] - p1[COLS.R]) / point_dist(p0, p1) | Compute the taper rate between points p0 and p1
Args:
p0, p1: iterables with first 4 components containing (x, y, z, r)
Returns:
The taper rate, defined as the absolute value of the difference in
the diameters of p0 and p1 divided by the euclidian distance
between them. | Below is the the instruction that describes the task:
### Input:
Compute the taper rate between points p0 and p1
Args:
p0, p1: iterables with first 4 components containing (x, y, z, r)
Returns:
The taper rate, defined as the absolute value of the difference in
the diameters of p0 and p1 divided by the euclidian distance
between them.
### Response:
def taper_rate(p0, p1):
'''Compute the taper rate between points p0 and p1
Args:
p0, p1: iterables with first 4 components containing (x, y, z, r)
Returns:
The taper rate, defined as the absolute value of the difference in
the diameters of p0 and p1 divided by the euclidian distance
between them.
'''
return 2 * abs(p0[COLS.R] - p1[COLS.R]) / point_dist(p0, p1) |
def transform_current_line(self, transform_callback):
"""
Apply the given transformation function to the current line.
:param transform_callback: callable that takes a string and return a new string.
"""
document = self.document
a = document.cursor_position + document.get_start_of_line_position()
b = document.cursor_position + document.get_end_of_line_position()
self.text = (
document.text[:a] +
transform_callback(document.text[a:b]) +
document.text[b:]) | Apply the given transformation function to the current line.
:param transform_callback: callable that takes a string and return a new string. | Below is the the instruction that describes the task:
### Input:
Apply the given transformation function to the current line.
:param transform_callback: callable that takes a string and return a new string.
### Response:
def transform_current_line(self, transform_callback):
"""
Apply the given transformation function to the current line.
:param transform_callback: callable that takes a string and return a new string.
"""
document = self.document
a = document.cursor_position + document.get_start_of_line_position()
b = document.cursor_position + document.get_end_of_line_position()
self.text = (
document.text[:a] +
transform_callback(document.text[a:b]) +
document.text[b:]) |
def checkArgs(args):
"""Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
"""
# Check if we have the tped and the tfam files
for fileName in [args.bfile + i for i in [".bed", ".bim", ".fam"]]:
if not os.path.isfile(fileName):
msg = "%(fileName)s: no such file" % locals()
raise ProgramError(msg)
# Check the indep-pairwise option
# The two first must be int, the last one float
try:
for i in xrange(2):
tmp = int(args.indep_pairwise[i])
tmp = float(args.indep_pairwise[2])
except ValueError:
msg = "indep-pairwise: need INT INT FLOAT"
raise ProgramError(msg)
# Check the maf value
tmpMAF = None
try:
tmpMAF = float(args.maf)
except ValueError:
msg = "maf: must be a float, not %s" % args.maf
raise ProgramError(msg)
if (tmpMAF > 0.5) or (tmpMAF < 0.0):
msg = "maf: must be between 0.0 and 0.5, not %s" % args.maf
raise ProgramError(msg)
# Check the number of line per file
if args.line_per_file_for_sge < 1:
msg = "line-per-file-for-sge: must be above 0, not " \
"%d" % args.line_per_file_for_sge
raise ProgramError(msg)
# Check the minimum number of SNPs
if args.min_nb_snp < 1:
msg = "min-nb-snp: must be above 1"
raise ProgramError(msg)
return True | Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1. | Below is the the instruction that describes the task:
### Input:
Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
### Response:
def checkArgs(args):
"""Checks the arguments and options.
:param args: an object containing the options of the program.
:type args: argparse.Namespace
:returns: ``True`` if everything was OK.
If there is a problem with an option, an exception is raised using the
:py:class:`ProgramError` class, a message is printed to the
:class:`sys.stderr` and the program exists with code 1.
"""
# Check if we have the tped and the tfam files
for fileName in [args.bfile + i for i in [".bed", ".bim", ".fam"]]:
if not os.path.isfile(fileName):
msg = "%(fileName)s: no such file" % locals()
raise ProgramError(msg)
# Check the indep-pairwise option
# The two first must be int, the last one float
try:
for i in xrange(2):
tmp = int(args.indep_pairwise[i])
tmp = float(args.indep_pairwise[2])
except ValueError:
msg = "indep-pairwise: need INT INT FLOAT"
raise ProgramError(msg)
# Check the maf value
tmpMAF = None
try:
tmpMAF = float(args.maf)
except ValueError:
msg = "maf: must be a float, not %s" % args.maf
raise ProgramError(msg)
if (tmpMAF > 0.5) or (tmpMAF < 0.0):
msg = "maf: must be between 0.0 and 0.5, not %s" % args.maf
raise ProgramError(msg)
# Check the number of line per file
if args.line_per_file_for_sge < 1:
msg = "line-per-file-for-sge: must be above 0, not " \
"%d" % args.line_per_file_for_sge
raise ProgramError(msg)
# Check the minimum number of SNPs
if args.min_nb_snp < 1:
msg = "min-nb-snp: must be above 1"
raise ProgramError(msg)
return True |
def file_from_content(content):
"""Provides a real file object from file content
Converts ``FileStorage``, ``FileIntent`` and
``bytes`` to an actual file.
"""
f = content
if isinstance(content, cgi.FieldStorage):
f = content.file
elif isinstance(content, FileIntent):
f = content._fileobj
elif isinstance(content, byte_string):
f = SpooledTemporaryFile(INMEMORY_FILESIZE)
f.write(content)
f.seek(0)
return f | Provides a real file object from file content
Converts ``FileStorage``, ``FileIntent`` and
``bytes`` to an actual file. | Below is the the instruction that describes the task:
### Input:
Provides a real file object from file content
Converts ``FileStorage``, ``FileIntent`` and
``bytes`` to an actual file.
### Response:
def file_from_content(content):
"""Provides a real file object from file content
Converts ``FileStorage``, ``FileIntent`` and
``bytes`` to an actual file.
"""
f = content
if isinstance(content, cgi.FieldStorage):
f = content.file
elif isinstance(content, FileIntent):
f = content._fileobj
elif isinstance(content, byte_string):
f = SpooledTemporaryFile(INMEMORY_FILESIZE)
f.write(content)
f.seek(0)
return f |
def violin(adata, keys, groupby=None, log=False, use_raw=None, stripplot=True, jitter=True,
size=1, scale='width', order=None, multi_panel=None, show=None,
xlabel='', rotation=None, save=None, ax=None, **kwds):
"""\
Violin plot.
Wraps `seaborn.violinplot` for :class:`~anndata.AnnData`.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
keys : `str` or list of `str`
Keys for accessing variables of `.var_names` or fields of `.obs`.
groupby : `str` or `None`, optional (default: `None`)
The key of the observation grouping to consider.
log : `bool`, optional (default: `False`)
Plot on logarithmic axis.
use_raw : `bool`, optional (default: `None`)
Use `raw` attribute of `adata` if present.
multi_panel : `bool`, optional (default: `False`)
Display keys in multiple panels also when `groupby is not None`.
stripplot : `bool` optional (default: `True`)
Add a stripplot on top of the violin plot.
See `seaborn.stripplot`.
jitter : `float` or `bool`, optional (default: `True`)
Add jitter to the stripplot (only when stripplot is True)
See `seaborn.stripplot`.
size : int, optional (default: 1)
Size of the jitter points.
order : list of str, optional (default: `True`)
Order in which to show the categories.
scale : {{'area', 'count', 'width'}}, optional (default: 'width')
The method used to scale the width of each violin. If 'area', each
violin will have the same area. If 'count', the width of the violins
will be scaled by the number of observations in that bin. If 'width',
each violin will have the same width.
xlabel : `str`, optional (default: `''`)
Label of the x axis. Defaults to `groupby` if `rotation` is `None`,
otherwise, no label is shown.
rotation : `float`, optional (default: `None`)
Rotation of xtick labels.
{show_save_ax}
**kwds : keyword arguments
Are passed to `seaborn.violinplot`.
Returns
-------
A :class:`~matplotlib.axes.Axes` object if `ax` is `None` else `None`.
"""
sanitize_anndata(adata)
if use_raw is None and adata.raw is not None: use_raw = True
if isinstance(keys, str): keys = [keys]
obs_keys = False
for key in keys:
if key in adata.obs_keys(): obs_keys = True
if obs_keys and key not in set(adata.obs_keys()):
raise ValueError(
'Either use observation keys or variable names, but do not mix. '
'Did not find {} in adata.obs_keys().'.format(key))
if obs_keys:
obs_df = adata.obs
else:
if groupby is None: obs_df = pd.DataFrame()
else: obs_df = pd.DataFrame(adata.obs[groupby])
for key in keys:
if adata.raw is not None and use_raw:
X_col = adata.raw[:, key].X
else:
X_col = adata[:, key].X
obs_df[key] = X_col
if groupby is None:
obs_tidy = pd.melt(obs_df, value_vars=keys)
x = 'variable'
ys = ['value']
else:
obs_tidy = obs_df
x = groupby
ys = keys
if multi_panel:
if groupby is None and len(ys) == 1:
# This is a quick and dirty way for adapting scales across several
# keys if groupby is None.
y = ys[0]
g = sns.FacetGrid(obs_tidy, col=x, col_order=keys, sharey=False)
# don't really know why this gives a warning without passing `order`
g = g.map(sns.violinplot, y, inner=None, orient='vertical',
scale=scale, order=keys, **kwds)
if stripplot:
g = g.map(sns.stripplot, y, orient='vertical', jitter=jitter, size=size, order=keys,
color='black')
if log:
g.set(yscale='log')
g.set_titles(col_template='{col_name}').set_xlabels('')
if rotation is not None:
for ax in g.axes[0]:
ax.tick_params(labelrotation=rotation)
else:
if ax is None:
axs, _, _, _ = setup_axes(
ax=ax, panels=['x'] if groupby is None else keys, show_ticks=True, right_margin=0.3)
else:
axs = [ax]
for ax, y in zip(axs, ys):
ax = sns.violinplot(x, y=y, data=obs_tidy, inner=None, order=order,
orient='vertical', scale=scale, ax=ax, **kwds)
if stripplot:
ax = sns.stripplot(x, y=y, data=obs_tidy, order=order,
jitter=jitter, color='black', size=size, ax=ax)
if xlabel == '' and groupby is not None and rotation is None:
xlabel = groupby.replace('_', ' ')
ax.set_xlabel(xlabel)
if log:
ax.set_yscale('log')
if rotation is not None:
ax.tick_params(labelrotation=rotation)
utils.savefig_or_show('violin', show=show, save=save)
if show is False:
if multi_panel:
return g
elif len(axs) == 1:
return axs[0]
else:
return axs | \
Violin plot.
Wraps `seaborn.violinplot` for :class:`~anndata.AnnData`.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
keys : `str` or list of `str`
Keys for accessing variables of `.var_names` or fields of `.obs`.
groupby : `str` or `None`, optional (default: `None`)
The key of the observation grouping to consider.
log : `bool`, optional (default: `False`)
Plot on logarithmic axis.
use_raw : `bool`, optional (default: `None`)
Use `raw` attribute of `adata` if present.
multi_panel : `bool`, optional (default: `False`)
Display keys in multiple panels also when `groupby is not None`.
stripplot : `bool` optional (default: `True`)
Add a stripplot on top of the violin plot.
See `seaborn.stripplot`.
jitter : `float` or `bool`, optional (default: `True`)
Add jitter to the stripplot (only when stripplot is True)
See `seaborn.stripplot`.
size : int, optional (default: 1)
Size of the jitter points.
order : list of str, optional (default: `True`)
Order in which to show the categories.
scale : {{'area', 'count', 'width'}}, optional (default: 'width')
The method used to scale the width of each violin. If 'area', each
violin will have the same area. If 'count', the width of the violins
will be scaled by the number of observations in that bin. If 'width',
each violin will have the same width.
xlabel : `str`, optional (default: `''`)
Label of the x axis. Defaults to `groupby` if `rotation` is `None`,
otherwise, no label is shown.
rotation : `float`, optional (default: `None`)
Rotation of xtick labels.
{show_save_ax}
**kwds : keyword arguments
Are passed to `seaborn.violinplot`.
Returns
-------
A :class:`~matplotlib.axes.Axes` object if `ax` is `None` else `None`. | Below is the the instruction that describes the task:
### Input:
\
Violin plot.
Wraps `seaborn.violinplot` for :class:`~anndata.AnnData`.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
keys : `str` or list of `str`
Keys for accessing variables of `.var_names` or fields of `.obs`.
groupby : `str` or `None`, optional (default: `None`)
The key of the observation grouping to consider.
log : `bool`, optional (default: `False`)
Plot on logarithmic axis.
use_raw : `bool`, optional (default: `None`)
Use `raw` attribute of `adata` if present.
multi_panel : `bool`, optional (default: `False`)
Display keys in multiple panels also when `groupby is not None`.
stripplot : `bool` optional (default: `True`)
Add a stripplot on top of the violin plot.
See `seaborn.stripplot`.
jitter : `float` or `bool`, optional (default: `True`)
Add jitter to the stripplot (only when stripplot is True)
See `seaborn.stripplot`.
size : int, optional (default: 1)
Size of the jitter points.
order : list of str, optional (default: `True`)
Order in which to show the categories.
scale : {{'area', 'count', 'width'}}, optional (default: 'width')
The method used to scale the width of each violin. If 'area', each
violin will have the same area. If 'count', the width of the violins
will be scaled by the number of observations in that bin. If 'width',
each violin will have the same width.
xlabel : `str`, optional (default: `''`)
Label of the x axis. Defaults to `groupby` if `rotation` is `None`,
otherwise, no label is shown.
rotation : `float`, optional (default: `None`)
Rotation of xtick labels.
{show_save_ax}
**kwds : keyword arguments
Are passed to `seaborn.violinplot`.
Returns
-------
A :class:`~matplotlib.axes.Axes` object if `ax` is `None` else `None`.
### Response:
def violin(adata, keys, groupby=None, log=False, use_raw=None, stripplot=True, jitter=True,
size=1, scale='width', order=None, multi_panel=None, show=None,
xlabel='', rotation=None, save=None, ax=None, **kwds):
"""\
Violin plot.
Wraps `seaborn.violinplot` for :class:`~anndata.AnnData`.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
keys : `str` or list of `str`
Keys for accessing variables of `.var_names` or fields of `.obs`.
groupby : `str` or `None`, optional (default: `None`)
The key of the observation grouping to consider.
log : `bool`, optional (default: `False`)
Plot on logarithmic axis.
use_raw : `bool`, optional (default: `None`)
Use `raw` attribute of `adata` if present.
multi_panel : `bool`, optional (default: `False`)
Display keys in multiple panels also when `groupby is not None`.
stripplot : `bool` optional (default: `True`)
Add a stripplot on top of the violin plot.
See `seaborn.stripplot`.
jitter : `float` or `bool`, optional (default: `True`)
Add jitter to the stripplot (only when stripplot is True)
See `seaborn.stripplot`.
size : int, optional (default: 1)
Size of the jitter points.
order : list of str, optional (default: `True`)
Order in which to show the categories.
scale : {{'area', 'count', 'width'}}, optional (default: 'width')
The method used to scale the width of each violin. If 'area', each
violin will have the same area. If 'count', the width of the violins
will be scaled by the number of observations in that bin. If 'width',
each violin will have the same width.
xlabel : `str`, optional (default: `''`)
Label of the x axis. Defaults to `groupby` if `rotation` is `None`,
otherwise, no label is shown.
rotation : `float`, optional (default: `None`)
Rotation of xtick labels.
{show_save_ax}
**kwds : keyword arguments
Are passed to `seaborn.violinplot`.
Returns
-------
A :class:`~matplotlib.axes.Axes` object if `ax` is `None` else `None`.
"""
sanitize_anndata(adata)
if use_raw is None and adata.raw is not None: use_raw = True
if isinstance(keys, str): keys = [keys]
obs_keys = False
for key in keys:
if key in adata.obs_keys(): obs_keys = True
if obs_keys and key not in set(adata.obs_keys()):
raise ValueError(
'Either use observation keys or variable names, but do not mix. '
'Did not find {} in adata.obs_keys().'.format(key))
if obs_keys:
obs_df = adata.obs
else:
if groupby is None: obs_df = pd.DataFrame()
else: obs_df = pd.DataFrame(adata.obs[groupby])
for key in keys:
if adata.raw is not None and use_raw:
X_col = adata.raw[:, key].X
else:
X_col = adata[:, key].X
obs_df[key] = X_col
if groupby is None:
obs_tidy = pd.melt(obs_df, value_vars=keys)
x = 'variable'
ys = ['value']
else:
obs_tidy = obs_df
x = groupby
ys = keys
if multi_panel:
if groupby is None and len(ys) == 1:
# This is a quick and dirty way for adapting scales across several
# keys if groupby is None.
y = ys[0]
g = sns.FacetGrid(obs_tidy, col=x, col_order=keys, sharey=False)
# don't really know why this gives a warning without passing `order`
g = g.map(sns.violinplot, y, inner=None, orient='vertical',
scale=scale, order=keys, **kwds)
if stripplot:
g = g.map(sns.stripplot, y, orient='vertical', jitter=jitter, size=size, order=keys,
color='black')
if log:
g.set(yscale='log')
g.set_titles(col_template='{col_name}').set_xlabels('')
if rotation is not None:
for ax in g.axes[0]:
ax.tick_params(labelrotation=rotation)
else:
if ax is None:
axs, _, _, _ = setup_axes(
ax=ax, panels=['x'] if groupby is None else keys, show_ticks=True, right_margin=0.3)
else:
axs = [ax]
for ax, y in zip(axs, ys):
ax = sns.violinplot(x, y=y, data=obs_tidy, inner=None, order=order,
orient='vertical', scale=scale, ax=ax, **kwds)
if stripplot:
ax = sns.stripplot(x, y=y, data=obs_tidy, order=order,
jitter=jitter, color='black', size=size, ax=ax)
if xlabel == '' and groupby is not None and rotation is None:
xlabel = groupby.replace('_', ' ')
ax.set_xlabel(xlabel)
if log:
ax.set_yscale('log')
if rotation is not None:
ax.tick_params(labelrotation=rotation)
utils.savefig_or_show('violin', show=show, save=save)
if show is False:
if multi_panel:
return g
elif len(axs) == 1:
return axs[0]
else:
return axs |
def get_all_external_link(self, **kwargs): # noqa: E501
"""Get all external links for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_external_link(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedExternalLink
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_external_link_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_external_link_with_http_info(**kwargs) # noqa: E501
return data | Get all external links for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_external_link(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedExternalLink
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Get all external links for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_external_link(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedExternalLink
If the method is called asynchronously,
returns the request thread.
### Response:
def get_all_external_link(self, **kwargs): # noqa: E501
"""Get all external links for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_external_link(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedExternalLink
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_external_link_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_external_link_with_http_info(**kwargs) # noqa: E501
return data |
def unset(*args, **kw):
"""
Unset IRAF environment variables.
This is not a standard IRAF task, but it is obviously useful. It makes the
resulting variables undefined. It silently ignores variables that are not
defined. It does not change the os environment variables.
"""
if len(kw) != 0:
raise SyntaxError("unset requires a list of variable names")
for arg in args:
if arg in _varDict:
del _varDict[arg] | Unset IRAF environment variables.
This is not a standard IRAF task, but it is obviously useful. It makes the
resulting variables undefined. It silently ignores variables that are not
defined. It does not change the os environment variables. | Below is the the instruction that describes the task:
### Input:
Unset IRAF environment variables.
This is not a standard IRAF task, but it is obviously useful. It makes the
resulting variables undefined. It silently ignores variables that are not
defined. It does not change the os environment variables.
### Response:
def unset(*args, **kw):
"""
Unset IRAF environment variables.
This is not a standard IRAF task, but it is obviously useful. It makes the
resulting variables undefined. It silently ignores variables that are not
defined. It does not change the os environment variables.
"""
if len(kw) != 0:
raise SyntaxError("unset requires a list of variable names")
for arg in args:
if arg in _varDict:
del _varDict[arg] |
def getLogger(name='generalLoggerName',dr='',lvl=20,addFH=True,addSH=True,):
"""This will either return the logging object already
instantiated, or instantiate a new one and return it.
**Use this function to both create and return any logger** to avoid
accidentally adding additional handlers by using the setUpLogger function
instead.
Args:
name (str): The name for the logging object and
name.log will be the output file written to disk.
lvl (int): The severity level of messages printed to the screen with
the stream handler, default = 20.
addFH (boolean): Add a file handler to this logger? Default severity
level for it will be 1, and it will be named following
name+'.log'. Default = True.
addSH (boolean): Add a stream handler to this logger? Severity set with
the lvl argument. Default = True.
Returns:
log (KMlogger object): A KMlogger object that was either
freshly instantiated or determined to
already exist, then returned.
"""
log = False
try:
log = log_dict[name]
except:
log = setUpLogger(name,dr,lvl,addFH,addSH)
return log | This will either return the logging object already
instantiated, or instantiate a new one and return it.
**Use this function to both create and return any logger** to avoid
accidentally adding additional handlers by using the setUpLogger function
instead.
Args:
name (str): The name for the logging object and
name.log will be the output file written to disk.
lvl (int): The severity level of messages printed to the screen with
the stream handler, default = 20.
addFH (boolean): Add a file handler to this logger? Default severity
level for it will be 1, and it will be named following
name+'.log'. Default = True.
addSH (boolean): Add a stream handler to this logger? Severity set with
the lvl argument. Default = True.
Returns:
log (KMlogger object): A KMlogger object that was either
freshly instantiated or determined to
already exist, then returned. | Below is the the instruction that describes the task:
### Input:
This will either return the logging object already
instantiated, or instantiate a new one and return it.
**Use this function to both create and return any logger** to avoid
accidentally adding additional handlers by using the setUpLogger function
instead.
Args:
name (str): The name for the logging object and
name.log will be the output file written to disk.
lvl (int): The severity level of messages printed to the screen with
the stream handler, default = 20.
addFH (boolean): Add a file handler to this logger? Default severity
level for it will be 1, and it will be named following
name+'.log'. Default = True.
addSH (boolean): Add a stream handler to this logger? Severity set with
the lvl argument. Default = True.
Returns:
log (KMlogger object): A KMlogger object that was either
freshly instantiated or determined to
already exist, then returned.
### Response:
def getLogger(name='generalLoggerName',dr='',lvl=20,addFH=True,addSH=True,):
"""This will either return the logging object already
instantiated, or instantiate a new one and return it.
**Use this function to both create and return any logger** to avoid
accidentally adding additional handlers by using the setUpLogger function
instead.
Args:
name (str): The name for the logging object and
name.log will be the output file written to disk.
lvl (int): The severity level of messages printed to the screen with
the stream handler, default = 20.
addFH (boolean): Add a file handler to this logger? Default severity
level for it will be 1, and it will be named following
name+'.log'. Default = True.
addSH (boolean): Add a stream handler to this logger? Severity set with
the lvl argument. Default = True.
Returns:
log (KMlogger object): A KMlogger object that was either
freshly instantiated or determined to
already exist, then returned.
"""
log = False
try:
log = log_dict[name]
except:
log = setUpLogger(name,dr,lvl,addFH,addSH)
return log |
def transform_audio(self, y):
'''Compute HCQT magnitude.
Parameters
----------
y : np.ndarray
the audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, n_bins)
The CQT magnitude
'''
data = super(HCQTMag, self).transform_audio(y)
data.pop('phase')
return data | Compute HCQT magnitude.
Parameters
----------
y : np.ndarray
the audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, n_bins)
The CQT magnitude | Below is the the instruction that describes the task:
### Input:
Compute HCQT magnitude.
Parameters
----------
y : np.ndarray
the audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, n_bins)
The CQT magnitude
### Response:
def transform_audio(self, y):
'''Compute HCQT magnitude.
Parameters
----------
y : np.ndarray
the audio buffer
Returns
-------
data : dict
data['mag'] : np.ndarray, shape=(n_frames, n_bins)
The CQT magnitude
'''
data = super(HCQTMag, self).transform_audio(y)
data.pop('phase')
return data |
def create_version_model(self, task, releasetype, descriptor):
"""Create and return a new model that represents taskfiles for the given task, releasetpye and descriptor
:param task: the task of the taskfiles
:type task: :class:`djadapter.models.Task`
:param releasetype: the releasetype
:type releasetype: str
:param descriptor: the descirptor
:type descriptor: str|None
:returns: the created tree model
:rtype: :class:`jukeboxcore.gui.treemodel.TreeModel`
:raises: None
"""
rootdata = treemodel.ListItemData(['Version', 'Releasetype', 'Path'])
rootitem = treemodel.TreeItem(rootdata)
for tf in task.taskfile_set.filter(releasetype=releasetype, descriptor=descriptor).order_by('-version'):
tfdata = djitemdata.TaskFileItemData(tf)
tfitem = treemodel.TreeItem(tfdata, rootitem)
for note in tf.notes.all():
notedata = djitemdata.NoteItemData(note)
treemodel.TreeItem(notedata, tfitem)
versionmodel = treemodel.TreeModel(rootitem)
return versionmodel | Create and return a new model that represents taskfiles for the given task, releasetpye and descriptor
:param task: the task of the taskfiles
:type task: :class:`djadapter.models.Task`
:param releasetype: the releasetype
:type releasetype: str
:param descriptor: the descirptor
:type descriptor: str|None
:returns: the created tree model
:rtype: :class:`jukeboxcore.gui.treemodel.TreeModel`
:raises: None | Below is the the instruction that describes the task:
### Input:
Create and return a new model that represents taskfiles for the given task, releasetpye and descriptor
:param task: the task of the taskfiles
:type task: :class:`djadapter.models.Task`
:param releasetype: the releasetype
:type releasetype: str
:param descriptor: the descirptor
:type descriptor: str|None
:returns: the created tree model
:rtype: :class:`jukeboxcore.gui.treemodel.TreeModel`
:raises: None
### Response:
def create_version_model(self, task, releasetype, descriptor):
"""Create and return a new model that represents taskfiles for the given task, releasetpye and descriptor
:param task: the task of the taskfiles
:type task: :class:`djadapter.models.Task`
:param releasetype: the releasetype
:type releasetype: str
:param descriptor: the descirptor
:type descriptor: str|None
:returns: the created tree model
:rtype: :class:`jukeboxcore.gui.treemodel.TreeModel`
:raises: None
"""
rootdata = treemodel.ListItemData(['Version', 'Releasetype', 'Path'])
rootitem = treemodel.TreeItem(rootdata)
for tf in task.taskfile_set.filter(releasetype=releasetype, descriptor=descriptor).order_by('-version'):
tfdata = djitemdata.TaskFileItemData(tf)
tfitem = treemodel.TreeItem(tfdata, rootitem)
for note in tf.notes.all():
notedata = djitemdata.NoteItemData(note)
treemodel.TreeItem(notedata, tfitem)
versionmodel = treemodel.TreeModel(rootitem)
return versionmodel |
def encode_cookie(payload, key=None):
'''
This will encode a ``unicode`` value into a cookie, and sign that cookie
with the app's secret key.
:param payload: The value to encode, as `unicode`.
:type payload: unicode
:param key: The key to use when creating the cookie digest. If not
specified, the SECRET_KEY value from app config will be used.
:type key: str
'''
return u'{0}|{1}'.format(payload, _cookie_digest(payload, key=key)) | This will encode a ``unicode`` value into a cookie, and sign that cookie
with the app's secret key.
:param payload: The value to encode, as `unicode`.
:type payload: unicode
:param key: The key to use when creating the cookie digest. If not
specified, the SECRET_KEY value from app config will be used.
:type key: str | Below is the the instruction that describes the task:
### Input:
This will encode a ``unicode`` value into a cookie, and sign that cookie
with the app's secret key.
:param payload: The value to encode, as `unicode`.
:type payload: unicode
:param key: The key to use when creating the cookie digest. If not
specified, the SECRET_KEY value from app config will be used.
:type key: str
### Response:
def encode_cookie(payload, key=None):
'''
This will encode a ``unicode`` value into a cookie, and sign that cookie
with the app's secret key.
:param payload: The value to encode, as `unicode`.
:type payload: unicode
:param key: The key to use when creating the cookie digest. If not
specified, the SECRET_KEY value from app config will be used.
:type key: str
'''
return u'{0}|{1}'.format(payload, _cookie_digest(payload, key=key)) |
def read_tuple(self):
"""Read a tuple from the pipe to Storm."""
cmd = self.read_command()
source = cmd["comp"]
stream = cmd["stream"]
values = cmd["tuple"]
val_type = self._source_tuple_types[source].get(stream)
return Tuple(
cmd["id"],
source,
stream,
cmd["task"],
tuple(values) if val_type is None else val_type(*values),
) | Read a tuple from the pipe to Storm. | Below is the the instruction that describes the task:
### Input:
Read a tuple from the pipe to Storm.
### Response:
def read_tuple(self):
"""Read a tuple from the pipe to Storm."""
cmd = self.read_command()
source = cmd["comp"]
stream = cmd["stream"]
values = cmd["tuple"]
val_type = self._source_tuple_types[source].get(stream)
return Tuple(
cmd["id"],
source,
stream,
cmd["task"],
tuple(values) if val_type is None else val_type(*values),
) |
def get_month_from_date_str(date_str, lang=DEFAULT_DATE_LANG):
"""Find the month name for the given locale, in the given string.
Returns a tuple ``(number_of_month, abbr_name)``.
"""
date_str = date_str.lower()
with calendar.different_locale(LOCALES[lang]):
month_abbrs = list(calendar.month_abbr)
for seq, abbr in enumerate(month_abbrs):
if abbr and abbr.lower() in date_str:
return seq, abbr
return () | Find the month name for the given locale, in the given string.
Returns a tuple ``(number_of_month, abbr_name)``. | Below is the the instruction that describes the task:
### Input:
Find the month name for the given locale, in the given string.
Returns a tuple ``(number_of_month, abbr_name)``.
### Response:
def get_month_from_date_str(date_str, lang=DEFAULT_DATE_LANG):
"""Find the month name for the given locale, in the given string.
Returns a tuple ``(number_of_month, abbr_name)``.
"""
date_str = date_str.lower()
with calendar.different_locale(LOCALES[lang]):
month_abbrs = list(calendar.month_abbr)
for seq, abbr in enumerate(month_abbrs):
if abbr and abbr.lower() in date_str:
return seq, abbr
return () |
def get_border_index(I, shape, size):
"""
Get flattened indices for the border of the region I.
Parameters
----------
I : np.ndarray(dtype=int)
indices in the flattened region.
size : int
region size (technically computable from shape argument)
shape : tuple(int, int)
region shape
Returns
-------
J : np.ndarray(dtype=int)
indices orthogonally and diagonally bordering I
"""
J = get_adjacent_index(I, shape, size)
# instead of setdiff?
# border = np.zeros(size)
# border[J] = 1
# border[I] = 0
# J, = np.where(border)
return np.setdiff1d(J, I) | Get flattened indices for the border of the region I.
Parameters
----------
I : np.ndarray(dtype=int)
indices in the flattened region.
size : int
region size (technically computable from shape argument)
shape : tuple(int, int)
region shape
Returns
-------
J : np.ndarray(dtype=int)
indices orthogonally and diagonally bordering I | Below is the the instruction that describes the task:
### Input:
Get flattened indices for the border of the region I.
Parameters
----------
I : np.ndarray(dtype=int)
indices in the flattened region.
size : int
region size (technically computable from shape argument)
shape : tuple(int, int)
region shape
Returns
-------
J : np.ndarray(dtype=int)
indices orthogonally and diagonally bordering I
### Response:
def get_border_index(I, shape, size):
"""
Get flattened indices for the border of the region I.
Parameters
----------
I : np.ndarray(dtype=int)
indices in the flattened region.
size : int
region size (technically computable from shape argument)
shape : tuple(int, int)
region shape
Returns
-------
J : np.ndarray(dtype=int)
indices orthogonally and diagonally bordering I
"""
J = get_adjacent_index(I, shape, size)
# instead of setdiff?
# border = np.zeros(size)
# border[J] = 1
# border[I] = 0
# J, = np.where(border)
return np.setdiff1d(J, I) |
def to_dataframe(fit, pars=None, permuted=False, dtypes=None, inc_warmup=False, diagnostics=True, header=True):
"""Extract samples as a pandas dataframe for different parameters.
Parameters
----------
pars : {str, sequence of str}
parameter (or quantile) name(s).
permuted : bool
If True, returned samples are permuted.
If inc_warmup is True, warmup samples have negative order.
dtypes : dict
datatype of parameter(s).
If nothing is passed, float will be used for all parameters.
inc_warmup : bool
If True, warmup samples are kept; otherwise they are
discarded.
diagnostics : bool
If True, include hmc diagnostics in dataframe.
header : bool
If True, include header columns.
Returns
-------
df : pandas dataframe
Returned dataframe contains: [header_df]|[draws_df]|[diagnostics_df],
where all groups are optional.
To exclude draws_df use `pars=[]`.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("Pandas module not found. You can install pandas with: pip install pandas")
fit._verify_has_samples()
pars_original = pars
if pars is None:
pars = fit.sim['pars_oi']
elif isinstance(pars, string_types):
pars = [pars]
if pars:
pars = pystan.misc._remove_empty_pars(pars, fit.sim['pars_oi'], fit.sim['dims_oi'])
allpars = fit.sim['pars_oi'] + fit.sim['fnames_oi']
_check_pars(allpars, pars)
if dtypes is None:
dtypes = {}
n_kept = [s if inc_warmup else s-w for s, w in zip(fit.sim['n_save'], fit.sim['warmup2'])]
chains = len(fit.sim['samples'])
diagnostic_type = {'divergent__':int,
'energy__':float,
'treedepth__':int,
'accept_stat__':float,
'stepsize__':float,
'n_leapfrog__':int}
header_dict = OrderedDict()
if header:
idx = np.concatenate([np.full(n_kept[chain], chain, dtype=int) for chain in range(chains)])
warmup = [np.zeros(n_kept[chain], dtype=np.int64) for chain in range(chains)]
if inc_warmup:
draw = []
for chain, w in zip(range(chains), fit.sim['warmup2']):
warmup[chain][:w] = 1
draw.append(np.arange(n_kept[chain], dtype=np.int64) - w)
draw = np.concatenate(draw)
else:
draw = np.concatenate([np.arange(n_kept[chain], dtype=np.int64) for chain in range(chains)])
warmup = np.concatenate(warmup)
header_dict = OrderedDict(zip(['chain', 'draw', 'warmup'], [idx, draw, warmup]))
if permuted:
if inc_warmup:
chain_permutation = []
chain_permutation_order = []
permutation = []
permutation_order = []
for chain, p, w in zip(range(chains), fit.sim['permutation'], fit.sim['warmup2']):
chain_permutation.append(list(range(-w, 0)) + p)
chain_permutation_order.append(list(range(-w, 0)) + list(np.argsort(p)))
permutation.append(sum(n_kept[:chain])+chain_permutation[-1]+w)
permutation_order.append(sum(n_kept[:chain])+chain_permutation_order[-1]+w)
chain_permutation = np.concatenate(chain_permutation)
chain_permutation_order = np.concatenate(chain_permutation_order)
permutation = np.concatenate(permutation)
permutation_order = np.concatenate(permutation_order)
else:
chain_permutation = np.concatenate(fit.sim['permutation'])
chain_permutation_order = np.concatenate([np.argsort(item) for item in fit.sim['permutation']])
permutation = np.concatenate([sum(n_kept[:chain])+p for chain, p in enumerate(fit.sim['permutation'])])
permutation_order = np.argsort(permutation)
header_dict["permutation"] = permutation
header_dict["chain_permutation"] = chain_permutation
header_dict["permutation_order"] = permutation_order
header_dict["chain_permutation_order"] = chain_permutation_order
if header:
header_df = pd.DataFrame.from_dict(header_dict)
else:
if permuted:
header_df = pd.DataFrame.from_dict({"permutation_order" : header_dict["permutation_order"]})
else:
header_df = pd.DataFrame()
fnames_set = set(fit.sim['fnames_oi'])
pars_set = set(pars)
if pars_original is None or fnames_set == pars_set:
dfs = [pd.DataFrame.from_dict(pyholder.chains).iloc[-n:] for pyholder, n in zip(fit.sim['samples'], n_kept)]
df = pd.concat(dfs, axis=0, sort=False, ignore_index=True)
if dtypes:
if not fnames_set.issuperset(pars_set):
par_keys = OrderedDict([(par, []) for par in fit.sim['pars_oi']])
for key in fit.sim['fnames_oi']:
par = key.split("[")
par = par[0]
par_keys[par].append(key)
for par, dtype in dtypes.items():
if isinstance(dtype, (float, np.float64)):
continue
for key in par_keys.get(par, [par]):
df.loc[:, key] = df.loc[:, key].astype(dtype)
elif pars:
par_keys = dict()
if not fnames_set.issuperset(pars_set):
par_keys = OrderedDict([(par, []) for par in fit.sim['pars_oi']])
for key in fit.sim['fnames_oi']:
par = key.split("[")
par = par[0]
par_keys[par].append(key)
columns = []
for par in pars:
columns.extend(par_keys.get(par, [par]))
columns = list(np.unique(columns))
df = pd.DataFrame(index=np.arange(sum(n_kept)), columns=columns, dtype=float)
for key in columns:
key_values = []
for chain, (pyholder, n) in enumerate(zip(fit.sim['samples'], n_kept)):
key_values.append(pyholder.chains[key][-n:])
df.loc[:, key] = np.concatenate(key_values)
for par, dtype in dtypes.items():
if isinstance(dtype, (float, np.float64)):
continue
for key in par_keys.get(par, [par]):
df.loc[:, key] = df.loc[:, key].astype(dtype)
else:
df = pd.DataFrame()
if diagnostics:
diagnostics_dfs = []
for idx, (pyholder, permutation, n) in enumerate(zip(fit.sim['samples'], fit.sim['permutation'], n_kept), 1):
diagnostics_df = pd.DataFrame(pyholder['sampler_params'], index=pyholder['sampler_param_names']).T
diagnostics_df = diagnostics_df.iloc[-n:, :]
for key, dtype in diagnostic_type.items():
if key in diagnostics_df:
diagnostics_df.loc[:, key] = diagnostics_df.loc[:, key].astype(dtype)
diagnostics_dfs.append(diagnostics_df)
if diagnostics_dfs:
diagnostics_df = pd.concat(diagnostics_dfs, axis=0, sort=False, ignore_index=True)
else:
diagnostics_df = pd.DataFrame()
else:
diagnostics_df = pd.DataFrame()
df = pd.concat((header_df, df, diagnostics_df), axis=1, sort=False)
if permuted:
df.sort_values(by='permutation_order', inplace=True)
if not header:
df.drop(columns='permutation_order', inplace=True)
return df | Extract samples as a pandas dataframe for different parameters.
Parameters
----------
pars : {str, sequence of str}
parameter (or quantile) name(s).
permuted : bool
If True, returned samples are permuted.
If inc_warmup is True, warmup samples have negative order.
dtypes : dict
datatype of parameter(s).
If nothing is passed, float will be used for all parameters.
inc_warmup : bool
If True, warmup samples are kept; otherwise they are
discarded.
diagnostics : bool
If True, include hmc diagnostics in dataframe.
header : bool
If True, include header columns.
Returns
-------
df : pandas dataframe
Returned dataframe contains: [header_df]|[draws_df]|[diagnostics_df],
where all groups are optional.
To exclude draws_df use `pars=[]`. | Below is the the instruction that describes the task:
### Input:
Extract samples as a pandas dataframe for different parameters.
Parameters
----------
pars : {str, sequence of str}
parameter (or quantile) name(s).
permuted : bool
If True, returned samples are permuted.
If inc_warmup is True, warmup samples have negative order.
dtypes : dict
datatype of parameter(s).
If nothing is passed, float will be used for all parameters.
inc_warmup : bool
If True, warmup samples are kept; otherwise they are
discarded.
diagnostics : bool
If True, include hmc diagnostics in dataframe.
header : bool
If True, include header columns.
Returns
-------
df : pandas dataframe
Returned dataframe contains: [header_df]|[draws_df]|[diagnostics_df],
where all groups are optional.
To exclude draws_df use `pars=[]`.
### Response:
def to_dataframe(fit, pars=None, permuted=False, dtypes=None, inc_warmup=False, diagnostics=True, header=True):
"""Extract samples as a pandas dataframe for different parameters.
Parameters
----------
pars : {str, sequence of str}
parameter (or quantile) name(s).
permuted : bool
If True, returned samples are permuted.
If inc_warmup is True, warmup samples have negative order.
dtypes : dict
datatype of parameter(s).
If nothing is passed, float will be used for all parameters.
inc_warmup : bool
If True, warmup samples are kept; otherwise they are
discarded.
diagnostics : bool
If True, include hmc diagnostics in dataframe.
header : bool
If True, include header columns.
Returns
-------
df : pandas dataframe
Returned dataframe contains: [header_df]|[draws_df]|[diagnostics_df],
where all groups are optional.
To exclude draws_df use `pars=[]`.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("Pandas module not found. You can install pandas with: pip install pandas")
fit._verify_has_samples()
pars_original = pars
if pars is None:
pars = fit.sim['pars_oi']
elif isinstance(pars, string_types):
pars = [pars]
if pars:
pars = pystan.misc._remove_empty_pars(pars, fit.sim['pars_oi'], fit.sim['dims_oi'])
allpars = fit.sim['pars_oi'] + fit.sim['fnames_oi']
_check_pars(allpars, pars)
if dtypes is None:
dtypes = {}
n_kept = [s if inc_warmup else s-w for s, w in zip(fit.sim['n_save'], fit.sim['warmup2'])]
chains = len(fit.sim['samples'])
diagnostic_type = {'divergent__':int,
'energy__':float,
'treedepth__':int,
'accept_stat__':float,
'stepsize__':float,
'n_leapfrog__':int}
header_dict = OrderedDict()
if header:
idx = np.concatenate([np.full(n_kept[chain], chain, dtype=int) for chain in range(chains)])
warmup = [np.zeros(n_kept[chain], dtype=np.int64) for chain in range(chains)]
if inc_warmup:
draw = []
for chain, w in zip(range(chains), fit.sim['warmup2']):
warmup[chain][:w] = 1
draw.append(np.arange(n_kept[chain], dtype=np.int64) - w)
draw = np.concatenate(draw)
else:
draw = np.concatenate([np.arange(n_kept[chain], dtype=np.int64) for chain in range(chains)])
warmup = np.concatenate(warmup)
header_dict = OrderedDict(zip(['chain', 'draw', 'warmup'], [idx, draw, warmup]))
if permuted:
if inc_warmup:
chain_permutation = []
chain_permutation_order = []
permutation = []
permutation_order = []
for chain, p, w in zip(range(chains), fit.sim['permutation'], fit.sim['warmup2']):
chain_permutation.append(list(range(-w, 0)) + p)
chain_permutation_order.append(list(range(-w, 0)) + list(np.argsort(p)))
permutation.append(sum(n_kept[:chain])+chain_permutation[-1]+w)
permutation_order.append(sum(n_kept[:chain])+chain_permutation_order[-1]+w)
chain_permutation = np.concatenate(chain_permutation)
chain_permutation_order = np.concatenate(chain_permutation_order)
permutation = np.concatenate(permutation)
permutation_order = np.concatenate(permutation_order)
else:
chain_permutation = np.concatenate(fit.sim['permutation'])
chain_permutation_order = np.concatenate([np.argsort(item) for item in fit.sim['permutation']])
permutation = np.concatenate([sum(n_kept[:chain])+p for chain, p in enumerate(fit.sim['permutation'])])
permutation_order = np.argsort(permutation)
header_dict["permutation"] = permutation
header_dict["chain_permutation"] = chain_permutation
header_dict["permutation_order"] = permutation_order
header_dict["chain_permutation_order"] = chain_permutation_order
if header:
header_df = pd.DataFrame.from_dict(header_dict)
else:
if permuted:
header_df = pd.DataFrame.from_dict({"permutation_order" : header_dict["permutation_order"]})
else:
header_df = pd.DataFrame()
fnames_set = set(fit.sim['fnames_oi'])
pars_set = set(pars)
if pars_original is None or fnames_set == pars_set:
dfs = [pd.DataFrame.from_dict(pyholder.chains).iloc[-n:] for pyholder, n in zip(fit.sim['samples'], n_kept)]
df = pd.concat(dfs, axis=0, sort=False, ignore_index=True)
if dtypes:
if not fnames_set.issuperset(pars_set):
par_keys = OrderedDict([(par, []) for par in fit.sim['pars_oi']])
for key in fit.sim['fnames_oi']:
par = key.split("[")
par = par[0]
par_keys[par].append(key)
for par, dtype in dtypes.items():
if isinstance(dtype, (float, np.float64)):
continue
for key in par_keys.get(par, [par]):
df.loc[:, key] = df.loc[:, key].astype(dtype)
elif pars:
par_keys = dict()
if not fnames_set.issuperset(pars_set):
par_keys = OrderedDict([(par, []) for par in fit.sim['pars_oi']])
for key in fit.sim['fnames_oi']:
par = key.split("[")
par = par[0]
par_keys[par].append(key)
columns = []
for par in pars:
columns.extend(par_keys.get(par, [par]))
columns = list(np.unique(columns))
df = pd.DataFrame(index=np.arange(sum(n_kept)), columns=columns, dtype=float)
for key in columns:
key_values = []
for chain, (pyholder, n) in enumerate(zip(fit.sim['samples'], n_kept)):
key_values.append(pyholder.chains[key][-n:])
df.loc[:, key] = np.concatenate(key_values)
for par, dtype in dtypes.items():
if isinstance(dtype, (float, np.float64)):
continue
for key in par_keys.get(par, [par]):
df.loc[:, key] = df.loc[:, key].astype(dtype)
else:
df = pd.DataFrame()
if diagnostics:
diagnostics_dfs = []
for idx, (pyholder, permutation, n) in enumerate(zip(fit.sim['samples'], fit.sim['permutation'], n_kept), 1):
diagnostics_df = pd.DataFrame(pyholder['sampler_params'], index=pyholder['sampler_param_names']).T
diagnostics_df = diagnostics_df.iloc[-n:, :]
for key, dtype in diagnostic_type.items():
if key in diagnostics_df:
diagnostics_df.loc[:, key] = diagnostics_df.loc[:, key].astype(dtype)
diagnostics_dfs.append(diagnostics_df)
if diagnostics_dfs:
diagnostics_df = pd.concat(diagnostics_dfs, axis=0, sort=False, ignore_index=True)
else:
diagnostics_df = pd.DataFrame()
else:
diagnostics_df = pd.DataFrame()
df = pd.concat((header_df, df, diagnostics_df), axis=1, sort=False)
if permuted:
df.sort_values(by='permutation_order', inplace=True)
if not header:
df.drop(columns='permutation_order', inplace=True)
return df |
def workflow_status(obj, details):
""" Show the status of the workflows. """
show_colors = obj['show_color']
config_cli = obj['config'].cli
if details:
temp_form = '{:>{}} {:20} {:25} {:25} {:38} {}'
else:
temp_form = '{:>{}} {:20} {:25} {} {} {}'
click.echo('\n')
click.echo(temp_form.format(
'Status',
12,
'Name',
'Start Time',
'ID' if details else '',
'Job' if details else '',
'Arguments'
))
click.echo('-' * (138 if details else 75))
def print_jobs(jobs, *, label='', color='green'):
for job in jobs:
start_time = job.start_time if job.start_time is not None else 'unknown'
click.echo(temp_form.format(
_style(show_colors, label, fg=color, bold=True),
25 if show_colors else 12,
job.name,
start_time.replace(tzinfo=pytz.utc).astimezone().strftime(
config_cli['time_format']),
job.workflow_id if details else '',
job.id if details else '',
','.join(['{}={}'.format(k, v) for k, v in job.arguments.items()]))
)
# running jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Active,
filter_by_type=JobType.Workflow),
label='Running', color='green')
# scheduled jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Scheduled,
filter_by_type=JobType.Workflow),
label='Scheduled', color='blue')
# registered jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Registered,
filter_by_type=JobType.Workflow),
label='Registered', color='yellow')
# reserved jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Reserved,
filter_by_type=JobType.Workflow),
label='Reserved', color='yellow') | Show the status of the workflows. | Below is the the instruction that describes the task:
### Input:
Show the status of the workflows.
### Response:
def workflow_status(obj, details):
""" Show the status of the workflows. """
show_colors = obj['show_color']
config_cli = obj['config'].cli
if details:
temp_form = '{:>{}} {:20} {:25} {:25} {:38} {}'
else:
temp_form = '{:>{}} {:20} {:25} {} {} {}'
click.echo('\n')
click.echo(temp_form.format(
'Status',
12,
'Name',
'Start Time',
'ID' if details else '',
'Job' if details else '',
'Arguments'
))
click.echo('-' * (138 if details else 75))
def print_jobs(jobs, *, label='', color='green'):
for job in jobs:
start_time = job.start_time if job.start_time is not None else 'unknown'
click.echo(temp_form.format(
_style(show_colors, label, fg=color, bold=True),
25 if show_colors else 12,
job.name,
start_time.replace(tzinfo=pytz.utc).astimezone().strftime(
config_cli['time_format']),
job.workflow_id if details else '',
job.id if details else '',
','.join(['{}={}'.format(k, v) for k, v in job.arguments.items()]))
)
# running jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Active,
filter_by_type=JobType.Workflow),
label='Running', color='green')
# scheduled jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Scheduled,
filter_by_type=JobType.Workflow),
label='Scheduled', color='blue')
# registered jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Registered,
filter_by_type=JobType.Workflow),
label='Registered', color='yellow')
# reserved jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Reserved,
filter_by_type=JobType.Workflow),
label='Reserved', color='yellow') |
def projection_name(self, **kwargs: Dict[str, Any]) -> str:
""" Define the projection name for this projector.
Note:
This function is just a basic placeholder and likely should be overridden.
Args:
kwargs: Projection information dict combined with additional arguments passed to the
projection function.
Returns:
Projection name string formatted with the passed options. By default, it returns
``projection_name_format`` formatted with the arguments to this function.
"""
return self.projection_name_format.format(**kwargs) | Define the projection name for this projector.
Note:
This function is just a basic placeholder and likely should be overridden.
Args:
kwargs: Projection information dict combined with additional arguments passed to the
projection function.
Returns:
Projection name string formatted with the passed options. By default, it returns
``projection_name_format`` formatted with the arguments to this function. | Below is the the instruction that describes the task:
### Input:
Define the projection name for this projector.
Note:
This function is just a basic placeholder and likely should be overridden.
Args:
kwargs: Projection information dict combined with additional arguments passed to the
projection function.
Returns:
Projection name string formatted with the passed options. By default, it returns
``projection_name_format`` formatted with the arguments to this function.
### Response:
def projection_name(self, **kwargs: Dict[str, Any]) -> str:
""" Define the projection name for this projector.
Note:
This function is just a basic placeholder and likely should be overridden.
Args:
kwargs: Projection information dict combined with additional arguments passed to the
projection function.
Returns:
Projection name string formatted with the passed options. By default, it returns
``projection_name_format`` formatted with the arguments to this function.
"""
return self.projection_name_format.format(**kwargs) |
def _startProductionCrewNode(self, name, attrs):
"""Process the start of a node under xtvd/productionCrew"""
if name == 'crew':
self._programId = attrs.get('program')
elif name == 'member':
self._role = None
self._givenname = None
self._surname = None | Process the start of a node under xtvd/productionCrew | Below is the the instruction that describes the task:
### Input:
Process the start of a node under xtvd/productionCrew
### Response:
def _startProductionCrewNode(self, name, attrs):
"""Process the start of a node under xtvd/productionCrew"""
if name == 'crew':
self._programId = attrs.get('program')
elif name == 'member':
self._role = None
self._givenname = None
self._surname = None |
def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for finder in sys.path_importer_cache.values():
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches() | Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented). | Below is the the instruction that describes the task:
### Input:
Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented).
### Response:
def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for finder in sys.path_importer_cache.values():
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches() |
def get_canvas_square_dpi(pageinfo, options):
"""Get the DPI when we require xres == yres, in Postscript units"""
return float(
max(
(pageinfo.xres) or VECTOR_PAGE_DPI,
(pageinfo.yres) or VECTOR_PAGE_DPI,
VECTOR_PAGE_DPI if pageinfo.has_vector else 0,
options.oversample or 0,
)
) | Get the DPI when we require xres == yres, in Postscript units | Below is the the instruction that describes the task:
### Input:
Get the DPI when we require xres == yres, in Postscript units
### Response:
def get_canvas_square_dpi(pageinfo, options):
"""Get the DPI when we require xres == yres, in Postscript units"""
return float(
max(
(pageinfo.xres) or VECTOR_PAGE_DPI,
(pageinfo.yres) or VECTOR_PAGE_DPI,
VECTOR_PAGE_DPI if pageinfo.has_vector else 0,
options.oversample or 0,
)
) |
def mouseDown(self, button):
""" Send a mouse button down at the last set position
button: int: [1-n]
"""
log.debug('mouseDown %s', button)
self.buttons |= 1 << (button - 1)
self.pointerEvent(self.x, self.y, buttonmask=self.buttons)
return self | Send a mouse button down at the last set position
button: int: [1-n] | Below is the the instruction that describes the task:
### Input:
Send a mouse button down at the last set position
button: int: [1-n]
### Response:
def mouseDown(self, button):
""" Send a mouse button down at the last set position
button: int: [1-n]
"""
log.debug('mouseDown %s', button)
self.buttons |= 1 << (button - 1)
self.pointerEvent(self.x, self.y, buttonmask=self.buttons)
return self |
def _kl_blockwise_blockwise(b0, b1, name=None):
"""Calculate the batched KL divergence KL(b0 || b1) with b0 and b1 Blockwise distributions.
Args:
b0: instance of a Blockwise distribution object.
b1: instance of a Blockwise distribution object.
name: (optional) Name to use for created operations. Default is
"kl_blockwise_blockwise".
Returns:
kl_blockwise_blockwise: `Tensor`. The batchwise KL(b0 || b1).
"""
if len(b0.distributions) != len(b1.distributions):
raise ValueError(
'Can only compute KL divergence between Blockwise distributions with '
'the same number of component distributions.')
# We also need to check that the event shapes match for each one.
b0_event_sizes = [_event_size(d) for d in b0.distributions]
b1_event_sizes = [_event_size(d) for d in b1.distributions]
assertions = []
message = ('Can only compute KL divergence between Blockwise distributions '
'with the same pairwise event shapes.')
if (all(isinstance(event_size, int) for event_size in b0_event_sizes) and
all(isinstance(event_size, int) for event_size in b1_event_sizes)):
if b0_event_sizes != b1_event_sizes:
raise ValueError(message)
else:
if b0.validate_args or b1.validate_args:
assertions.extend(
assert_util.assert_equal( # pylint: disable=g-complex-comprehension
e1, e2, message=message)
for e1, e2 in zip(b0_event_sizes, b1_event_sizes))
with tf.name_scope(name or 'kl_blockwise_blockwise'):
with tf.control_dependencies(assertions):
return sum([
kullback_leibler.kl_divergence(d1, d2) for d1, d2 in zip(
b0.distributions, b1.distributions)]) | Calculate the batched KL divergence KL(b0 || b1) with b0 and b1 Blockwise distributions.
Args:
b0: instance of a Blockwise distribution object.
b1: instance of a Blockwise distribution object.
name: (optional) Name to use for created operations. Default is
"kl_blockwise_blockwise".
Returns:
kl_blockwise_blockwise: `Tensor`. The batchwise KL(b0 || b1). | Below is the the instruction that describes the task:
### Input:
Calculate the batched KL divergence KL(b0 || b1) with b0 and b1 Blockwise distributions.
Args:
b0: instance of a Blockwise distribution object.
b1: instance of a Blockwise distribution object.
name: (optional) Name to use for created operations. Default is
"kl_blockwise_blockwise".
Returns:
kl_blockwise_blockwise: `Tensor`. The batchwise KL(b0 || b1).
### Response:
def _kl_blockwise_blockwise(b0, b1, name=None):
"""Calculate the batched KL divergence KL(b0 || b1) with b0 and b1 Blockwise distributions.
Args:
b0: instance of a Blockwise distribution object.
b1: instance of a Blockwise distribution object.
name: (optional) Name to use for created operations. Default is
"kl_blockwise_blockwise".
Returns:
kl_blockwise_blockwise: `Tensor`. The batchwise KL(b0 || b1).
"""
if len(b0.distributions) != len(b1.distributions):
raise ValueError(
'Can only compute KL divergence between Blockwise distributions with '
'the same number of component distributions.')
# We also need to check that the event shapes match for each one.
b0_event_sizes = [_event_size(d) for d in b0.distributions]
b1_event_sizes = [_event_size(d) for d in b1.distributions]
assertions = []
message = ('Can only compute KL divergence between Blockwise distributions '
'with the same pairwise event shapes.')
if (all(isinstance(event_size, int) for event_size in b0_event_sizes) and
all(isinstance(event_size, int) for event_size in b1_event_sizes)):
if b0_event_sizes != b1_event_sizes:
raise ValueError(message)
else:
if b0.validate_args or b1.validate_args:
assertions.extend(
assert_util.assert_equal( # pylint: disable=g-complex-comprehension
e1, e2, message=message)
for e1, e2 in zip(b0_event_sizes, b1_event_sizes))
with tf.name_scope(name or 'kl_blockwise_blockwise'):
with tf.control_dependencies(assertions):
return sum([
kullback_leibler.kl_divergence(d1, d2) for d1, d2 in zip(
b0.distributions, b1.distributions)]) |
def default_is_local(hadoop_conf=None, hadoop_home=None):
"""\
Is Hadoop configured to use the local file system?
By default, it is. A DFS must be explicitly configured.
"""
params = pydoop.hadoop_params(hadoop_conf, hadoop_home)
for k in 'fs.defaultFS', 'fs.default.name':
if not params.get(k, 'file:').startswith('file:'):
return False
return True | \
Is Hadoop configured to use the local file system?
By default, it is. A DFS must be explicitly configured. | Below is the the instruction that describes the task:
### Input:
\
Is Hadoop configured to use the local file system?
By default, it is. A DFS must be explicitly configured.
### Response:
def default_is_local(hadoop_conf=None, hadoop_home=None):
"""\
Is Hadoop configured to use the local file system?
By default, it is. A DFS must be explicitly configured.
"""
params = pydoop.hadoop_params(hadoop_conf, hadoop_home)
for k in 'fs.defaultFS', 'fs.default.name':
if not params.get(k, 'file:').startswith('file:'):
return False
return True |
def setPhaseDuration(self, tlsID, phaseDuration):
"""setPhaseDuration(string, integer or float) -> None
Set the phase duration of the current phase in seconds.
"""
self._connection._sendIntCmd(
tc.CMD_SET_TL_VARIABLE, tc.TL_PHASE_DURATION, tlsID, int(1000 * phaseDuration)) | setPhaseDuration(string, integer or float) -> None
Set the phase duration of the current phase in seconds. | Below is the the instruction that describes the task:
### Input:
setPhaseDuration(string, integer or float) -> None
Set the phase duration of the current phase in seconds.
### Response:
def setPhaseDuration(self, tlsID, phaseDuration):
"""setPhaseDuration(string, integer or float) -> None
Set the phase duration of the current phase in seconds.
"""
self._connection._sendIntCmd(
tc.CMD_SET_TL_VARIABLE, tc.TL_PHASE_DURATION, tlsID, int(1000 * phaseDuration)) |
def PhyDMSParser():
"""Returns *argparse.ArgumentParser* for ``phydms`` script."""
parser = ArgumentParserNoArgHelp(description=('Phylogenetic analysis '
'informed by deep mutational scanning data. {0} Version {1}. Full'
' documentation at {2}').format(phydmslib.__acknowledgments__,
phydmslib.__version__, phydmslib.__url__),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('alignment', type=ExistingFile,
help='Existing FASTA file with aligned codon sequences.')
parser.add_argument('tree', type=ExistingFile,
help="Existing Newick file giving input tree.")
parser.add_argument('model', type=ModelOption,
help=("Substitution model: ExpCM_<prefsfile> or YNGKP_<m> ("
"where <m> is {0}). For ExpCM, <prefsfile> has first "
"column labeled 'site' and others labeled by 1-letter "
"amino-acid code.").format(', '.join(yngkp_modelvariants)))
parser.add_argument('outprefix', help='Output file prefix.', type=str)
parser.add_argument('--brlen', choices=['scale', 'optimize'],
default='optimize', help=("How to handle branch lengths: "
"scale by single parameter or optimize each one"))
parser.set_defaults(gammaomega=False)
parser.add_argument('--gammaomega', action='store_true',
dest='gammaomega', help="Omega for ExpCM from gamma "
"distribution rather than single value. To achieve "
"same for YNGKP, use 'model' of YNGKP_M5.")
parser.set_defaults(gammabeta=False)
parser.add_argument('--gammabeta', action='store_true',
dest='gammabeta', help="Beta for ExpCM from gamma "
"distribution rather than single value.")
parser.set_defaults(omegabysite=False)
parser.add_argument('--omegabysite', dest='omegabysite',
action='store_true', help="Fit omega (dN/dS) for each site.")
parser.set_defaults(omegabysite_fixsyn=False)
parser.add_argument('--omegabysite_fixsyn', dest='omegabysite_fixsyn',
action='store_true', help="For '--omegabysite', assign all "
"sites same dS rather than fit for each site.")
parser.set_defaults(diffprefsbysite=False)
parser.add_argument('--diffprefsbysite', dest='diffprefsbysite',
action='store_true', help="Fit differential preferences "
"for each site.")
parser.add_argument('--diffprefsprior', default='invquadratic,150,0.5',
type=diffPrefsPrior, help="Regularizing prior for "
"'--diffprefsbysite': 'invquadratic,C1,C2' is prior in "
"Bloom, Biology Direct, 12:1.")
parser.set_defaults(fitphi=False)
parser.add_argument('--fitphi', action='store_true', dest='fitphi',
help='Fit ExpCM phi rather than setting so stationary '
'state matches alignment frequencies.')
parser.set_defaults(randprefs=False)
parser.add_argument('--randprefs', dest='randprefs', action='store_true',
help="Randomize preferences among sites for ExpCM.")
parser.set_defaults(avgprefs=False)
parser.add_argument('--avgprefs', dest='avgprefs', action='store_true',
help="Average preferences across sites for ExpCM.")
parser.add_argument('--divpressure', type=ExistingFileOrNone,
help=("Known diversifying pressure at sites: file with column 1 "
"= position, column 2 = diversification pressure; columns space-, "
"tab-, or comma-delimited."))
parser.add_argument('--ncpus', default=1, type=int,
help='Use this many CPUs; -1 means all available.')
parser.add_argument('--fitprefsmethod', choices=[1, 2], default=2,
help='Implementation to we use when fitting prefs.', type=int)
parser.add_argument('--ncats', default=4, type=IntGreaterThanOne,
help='Number of categories for gamma-distribution.')
parser.add_argument('--minbrlen', type=FloatGreaterThanZero,
default=phydmslib.constants.ALMOST_ZERO,
help="Adjust all branch lengths in starting 'tree' to >= this.")
parser.add_argument('--minpref', default=0.002, type=FloatGreaterThanZero,
help="Adjust all preferences in ExpCM 'prefsfile' to >= this.")
parser.add_argument('--seed', type=int, default=1, help="Random number seed.")
parser.add_argument('--initparams', type=ExistingFile, help="Initialize "
"model params from this file, which should be format of "
"'*_modelparams.txt' file created by 'phydms' with this model.")
parser.set_defaults(profile=False)
parser.add_argument('--profile', dest='profile', action='store_true',
help="Profile likelihood maximization, write pstats files. "
"For code-development purposes.")
parser.set_defaults(opt_details=False)
parser.add_argument('--opt_details', dest='opt_details',
action='store_true', help='Print details about optimization')
parser.set_defaults(nograd=False)
parser.add_argument('--nograd', dest='nograd', action='store_true',
help="Do not use gradients for likelihood maximization.")
parser.add_argument('-v', '--version', action='version', version=(
('%(prog)s {version}'.format(version=phydmslib.__version__))))
return parser | Returns *argparse.ArgumentParser* for ``phydms`` script. | Below is the the instruction that describes the task:
### Input:
Returns *argparse.ArgumentParser* for ``phydms`` script.
### Response:
def PhyDMSParser():
"""Returns *argparse.ArgumentParser* for ``phydms`` script."""
parser = ArgumentParserNoArgHelp(description=('Phylogenetic analysis '
'informed by deep mutational scanning data. {0} Version {1}. Full'
' documentation at {2}').format(phydmslib.__acknowledgments__,
phydmslib.__version__, phydmslib.__url__),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('alignment', type=ExistingFile,
help='Existing FASTA file with aligned codon sequences.')
parser.add_argument('tree', type=ExistingFile,
help="Existing Newick file giving input tree.")
parser.add_argument('model', type=ModelOption,
help=("Substitution model: ExpCM_<prefsfile> or YNGKP_<m> ("
"where <m> is {0}). For ExpCM, <prefsfile> has first "
"column labeled 'site' and others labeled by 1-letter "
"amino-acid code.").format(', '.join(yngkp_modelvariants)))
parser.add_argument('outprefix', help='Output file prefix.', type=str)
parser.add_argument('--brlen', choices=['scale', 'optimize'],
default='optimize', help=("How to handle branch lengths: "
"scale by single parameter or optimize each one"))
parser.set_defaults(gammaomega=False)
parser.add_argument('--gammaomega', action='store_true',
dest='gammaomega', help="Omega for ExpCM from gamma "
"distribution rather than single value. To achieve "
"same for YNGKP, use 'model' of YNGKP_M5.")
parser.set_defaults(gammabeta=False)
parser.add_argument('--gammabeta', action='store_true',
dest='gammabeta', help="Beta for ExpCM from gamma "
"distribution rather than single value.")
parser.set_defaults(omegabysite=False)
parser.add_argument('--omegabysite', dest='omegabysite',
action='store_true', help="Fit omega (dN/dS) for each site.")
parser.set_defaults(omegabysite_fixsyn=False)
parser.add_argument('--omegabysite_fixsyn', dest='omegabysite_fixsyn',
action='store_true', help="For '--omegabysite', assign all "
"sites same dS rather than fit for each site.")
parser.set_defaults(diffprefsbysite=False)
parser.add_argument('--diffprefsbysite', dest='diffprefsbysite',
action='store_true', help="Fit differential preferences "
"for each site.")
parser.add_argument('--diffprefsprior', default='invquadratic,150,0.5',
type=diffPrefsPrior, help="Regularizing prior for "
"'--diffprefsbysite': 'invquadratic,C1,C2' is prior in "
"Bloom, Biology Direct, 12:1.")
parser.set_defaults(fitphi=False)
parser.add_argument('--fitphi', action='store_true', dest='fitphi',
help='Fit ExpCM phi rather than setting so stationary '
'state matches alignment frequencies.')
parser.set_defaults(randprefs=False)
parser.add_argument('--randprefs', dest='randprefs', action='store_true',
help="Randomize preferences among sites for ExpCM.")
parser.set_defaults(avgprefs=False)
parser.add_argument('--avgprefs', dest='avgprefs', action='store_true',
help="Average preferences across sites for ExpCM.")
parser.add_argument('--divpressure', type=ExistingFileOrNone,
help=("Known diversifying pressure at sites: file with column 1 "
"= position, column 2 = diversification pressure; columns space-, "
"tab-, or comma-delimited."))
parser.add_argument('--ncpus', default=1, type=int,
help='Use this many CPUs; -1 means all available.')
parser.add_argument('--fitprefsmethod', choices=[1, 2], default=2,
help='Implementation to we use when fitting prefs.', type=int)
parser.add_argument('--ncats', default=4, type=IntGreaterThanOne,
help='Number of categories for gamma-distribution.')
parser.add_argument('--minbrlen', type=FloatGreaterThanZero,
default=phydmslib.constants.ALMOST_ZERO,
help="Adjust all branch lengths in starting 'tree' to >= this.")
parser.add_argument('--minpref', default=0.002, type=FloatGreaterThanZero,
help="Adjust all preferences in ExpCM 'prefsfile' to >= this.")
parser.add_argument('--seed', type=int, default=1, help="Random number seed.")
parser.add_argument('--initparams', type=ExistingFile, help="Initialize "
"model params from this file, which should be format of "
"'*_modelparams.txt' file created by 'phydms' with this model.")
parser.set_defaults(profile=False)
parser.add_argument('--profile', dest='profile', action='store_true',
help="Profile likelihood maximization, write pstats files. "
"For code-development purposes.")
parser.set_defaults(opt_details=False)
parser.add_argument('--opt_details', dest='opt_details',
action='store_true', help='Print details about optimization')
parser.set_defaults(nograd=False)
parser.add_argument('--nograd', dest='nograd', action='store_true',
help="Do not use gradients for likelihood maximization.")
parser.add_argument('-v', '--version', action='version', version=(
('%(prog)s {version}'.format(version=phydmslib.__version__))))
return parser |
def dragEnterEvent(self, event):
"""
Listens for query's being dragged and dropped onto this tree.
:param event | <QDragEnterEvent>
"""
data = event.mimeData()
if data.hasFormat('application/x-orb-table') and \
data.hasFormat('application/x-orb-query'):
tableName = self.tableTypeName()
if nativestring(data.data('application/x-orb-table')) == tableName:
event.acceptProposedAction()
return
super(XOrbTreeWidget, self).dragEnterEvent(event) | Listens for query's being dragged and dropped onto this tree.
:param event | <QDragEnterEvent> | Below is the the instruction that describes the task:
### Input:
Listens for query's being dragged and dropped onto this tree.
:param event | <QDragEnterEvent>
### Response:
def dragEnterEvent(self, event):
"""
Listens for query's being dragged and dropped onto this tree.
:param event | <QDragEnterEvent>
"""
data = event.mimeData()
if data.hasFormat('application/x-orb-table') and \
data.hasFormat('application/x-orb-query'):
tableName = self.tableTypeName()
if nativestring(data.data('application/x-orb-table')) == tableName:
event.acceptProposedAction()
return
super(XOrbTreeWidget, self).dragEnterEvent(event) |
def items(self):
'''access for filtered items'''
if hasattr(self, '_items'):
return self.filter_items(self._items)
self._items = self.get_items()
return self.filter_items(self._items) | access for filtered items | Below is the the instruction that describes the task:
### Input:
access for filtered items
### Response:
def items(self):
'''access for filtered items'''
if hasattr(self, '_items'):
return self.filter_items(self._items)
self._items = self.get_items()
return self.filter_items(self._items) |
def predict(self, u=0):
"""
Predict next position.
Parameters
----------
u : ndarray
Optional control vector. If non-zero, it is multiplied by `B`
to create the control input into the system.
"""
# x = Fx + Bu
self.x = dot(self.F, self.x) + dot(self.B, u) | Predict next position.
Parameters
----------
u : ndarray
Optional control vector. If non-zero, it is multiplied by `B`
to create the control input into the system. | Below is the the instruction that describes the task:
### Input:
Predict next position.
Parameters
----------
u : ndarray
Optional control vector. If non-zero, it is multiplied by `B`
to create the control input into the system.
### Response:
def predict(self, u=0):
"""
Predict next position.
Parameters
----------
u : ndarray
Optional control vector. If non-zero, it is multiplied by `B`
to create the control input into the system.
"""
# x = Fx + Bu
self.x = dot(self.F, self.x) + dot(self.B, u) |
def update_variables(func):
"""
Use this decorator on Step.action implementation.
Your action method should always return variables, or
both variables and output.
This decorator will update variables with output.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if isinstance(result, tuple):
return self.process_register(result[0], result[1])
else:
return self.process_register(result)
return wrapper | Use this decorator on Step.action implementation.
Your action method should always return variables, or
both variables and output.
This decorator will update variables with output. | Below is the the instruction that describes the task:
### Input:
Use this decorator on Step.action implementation.
Your action method should always return variables, or
both variables and output.
This decorator will update variables with output.
### Response:
def update_variables(func):
"""
Use this decorator on Step.action implementation.
Your action method should always return variables, or
both variables and output.
This decorator will update variables with output.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if isinstance(result, tuple):
return self.process_register(result[0], result[1])
else:
return self.process_register(result)
return wrapper |
def porosimetry(im, sizes=25, inlets=None, access_limited=True,
mode='hybrid'):
r"""
Performs a porosimetry simulution on the image
Parameters
----------
im : ND-array
An ND image of the porous material containing True values in the
pore space.
sizes : array_like or scalar
The sizes to invade. If a list of values of provided they are used
directly. If a scalar is provided then that number of points spanning
the min and max of the distance transform are used.
inlets : ND-array, boolean
A boolean mask with True values indicating where the invasion
enters the image. By default all faces are considered inlets,
akin to a mercury porosimetry experiment. Users can also apply
solid boundaries to their image externally before passing it in,
allowing for complex inlets like circular openings, etc. This argument
is only used if ``access_limited`` is ``True``.
access_limited : Boolean
This flag indicates if the intrusion should only occur from the
surfaces (``access_limited`` is True, which is the default), or
if the invading phase should be allowed to appear in the core of
the image. The former simulates experimental tools like mercury
intrusion porosimetry, while the latter is useful for comparison
to gauge the extent of shielding effects in the sample.
mode : string
Controls with method is used to compute the result. Options are:
'hybrid' - (default) Performs a distance tranform of the void space,
thresholds to find voxels larger than ``sizes[i]``, trims the resulting
mask if ``access_limitations`` is ``True``, then dilates it using the
efficient fft-method to obtain the non-wetting fluid configuration.
'dt' - Same as 'hybrid', except uses a second distance transform,
relative to the thresholded mask, to find the invading fluid
configuration. The choice of 'dt' or 'hybrid' depends on speed, which
is system and installation specific.
'mio' - Using a single morphological image opening step to obtain the
invading fluid confirguration directly, *then* trims if
``access_limitations`` is ``True``. This method is not ideal and is
included mostly for comparison purposes. The morphological operations
are done using fft-based method implementations.
Returns
-------
image : ND-array
A copy of ``im`` with voxel values indicating the sphere radius at
which it becomes accessible from the inlets. This image can be used
to find invading fluid configurations as a function of applied
capillary pressure by applying a boolean comparison:
``inv_phase = im > r`` where ``r`` is the radius (in voxels) of the
invading sphere. Of course, ``r`` can be converted to capillary
pressure using your favorite model.
Notes
-----
There are many ways to perform this filter, and PoreSpy offer 3, which
users can choose between via the ``mode`` argument. These methods all
work in a similar way by finding which foreground voxels can accomodate
a sphere of a given radius, then repeating for smaller radii.
See Also
--------
fftmorphology
local_thickness
"""
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
dt = spim.distance_transform_edt(im > 0)
if inlets is None:
inlets = get_border(im.shape, mode='faces')
if isinstance(sizes, int):
sizes = sp.logspace(start=sp.log10(sp.amax(dt)), stop=0, num=sizes)
else:
sizes = sp.unique(sizes)[-1::-1]
if im.ndim == 2:
strel = ps_disk
else:
strel = ps_ball
if mode == 'mio':
pw = int(sp.floor(dt.max()))
impad = sp.pad(im, mode='symmetric', pad_width=pw)
inletspad = sp.pad(inlets, mode='symmetric', pad_width=pw)
inlets = sp.where(inletspad)
# sizes = sp.unique(sp.around(sizes, decimals=0).astype(int))[-1::-1]
imresults = sp.zeros(sp.shape(impad))
for r in tqdm(sizes):
imtemp = fftmorphology(impad, strel(r), mode='erosion')
if access_limited:
imtemp = trim_disconnected_blobs(imtemp, inlets)
imtemp = fftmorphology(imtemp, strel(r), mode='dilation')
if sp.any(imtemp):
imresults[(imresults == 0)*imtemp] = r
imresults = extract_subsection(imresults, shape=im.shape)
elif mode == 'dt':
inlets = sp.where(inlets)
imresults = sp.zeros(sp.shape(im))
for r in tqdm(sizes):
imtemp = dt >= r
if access_limited:
imtemp = trim_disconnected_blobs(imtemp, inlets)
if sp.any(imtemp):
imtemp = spim.distance_transform_edt(~imtemp) < r
imresults[(imresults == 0)*imtemp] = r
elif mode == 'hybrid':
inlets = sp.where(inlets)
imresults = sp.zeros(sp.shape(im))
for r in tqdm(sizes):
imtemp = dt >= r
if access_limited:
imtemp = trim_disconnected_blobs(imtemp, inlets)
if sp.any(imtemp):
imtemp = fftconvolve(imtemp, strel(r), mode='same') > 0.0001
imresults[(imresults == 0)*imtemp] = r
else:
raise Exception('Unreckognized mode ' + mode)
return imresults | r"""
Performs a porosimetry simulution on the image
Parameters
----------
im : ND-array
An ND image of the porous material containing True values in the
pore space.
sizes : array_like or scalar
The sizes to invade. If a list of values of provided they are used
directly. If a scalar is provided then that number of points spanning
the min and max of the distance transform are used.
inlets : ND-array, boolean
A boolean mask with True values indicating where the invasion
enters the image. By default all faces are considered inlets,
akin to a mercury porosimetry experiment. Users can also apply
solid boundaries to their image externally before passing it in,
allowing for complex inlets like circular openings, etc. This argument
is only used if ``access_limited`` is ``True``.
access_limited : Boolean
This flag indicates if the intrusion should only occur from the
surfaces (``access_limited`` is True, which is the default), or
if the invading phase should be allowed to appear in the core of
the image. The former simulates experimental tools like mercury
intrusion porosimetry, while the latter is useful for comparison
to gauge the extent of shielding effects in the sample.
mode : string
Controls with method is used to compute the result. Options are:
'hybrid' - (default) Performs a distance tranform of the void space,
thresholds to find voxels larger than ``sizes[i]``, trims the resulting
mask if ``access_limitations`` is ``True``, then dilates it using the
efficient fft-method to obtain the non-wetting fluid configuration.
'dt' - Same as 'hybrid', except uses a second distance transform,
relative to the thresholded mask, to find the invading fluid
configuration. The choice of 'dt' or 'hybrid' depends on speed, which
is system and installation specific.
'mio' - Using a single morphological image opening step to obtain the
invading fluid confirguration directly, *then* trims if
``access_limitations`` is ``True``. This method is not ideal and is
included mostly for comparison purposes. The morphological operations
are done using fft-based method implementations.
Returns
-------
image : ND-array
A copy of ``im`` with voxel values indicating the sphere radius at
which it becomes accessible from the inlets. This image can be used
to find invading fluid configurations as a function of applied
capillary pressure by applying a boolean comparison:
``inv_phase = im > r`` where ``r`` is the radius (in voxels) of the
invading sphere. Of course, ``r`` can be converted to capillary
pressure using your favorite model.
Notes
-----
There are many ways to perform this filter, and PoreSpy offer 3, which
users can choose between via the ``mode`` argument. These methods all
work in a similar way by finding which foreground voxels can accomodate
a sphere of a given radius, then repeating for smaller radii.
See Also
--------
fftmorphology
local_thickness | Below is the the instruction that describes the task:
### Input:
r"""
Performs a porosimetry simulution on the image
Parameters
----------
im : ND-array
An ND image of the porous material containing True values in the
pore space.
sizes : array_like or scalar
The sizes to invade. If a list of values of provided they are used
directly. If a scalar is provided then that number of points spanning
the min and max of the distance transform are used.
inlets : ND-array, boolean
A boolean mask with True values indicating where the invasion
enters the image. By default all faces are considered inlets,
akin to a mercury porosimetry experiment. Users can also apply
solid boundaries to their image externally before passing it in,
allowing for complex inlets like circular openings, etc. This argument
is only used if ``access_limited`` is ``True``.
access_limited : Boolean
This flag indicates if the intrusion should only occur from the
surfaces (``access_limited`` is True, which is the default), or
if the invading phase should be allowed to appear in the core of
the image. The former simulates experimental tools like mercury
intrusion porosimetry, while the latter is useful for comparison
to gauge the extent of shielding effects in the sample.
mode : string
Controls with method is used to compute the result. Options are:
'hybrid' - (default) Performs a distance tranform of the void space,
thresholds to find voxels larger than ``sizes[i]``, trims the resulting
mask if ``access_limitations`` is ``True``, then dilates it using the
efficient fft-method to obtain the non-wetting fluid configuration.
'dt' - Same as 'hybrid', except uses a second distance transform,
relative to the thresholded mask, to find the invading fluid
configuration. The choice of 'dt' or 'hybrid' depends on speed, which
is system and installation specific.
'mio' - Using a single morphological image opening step to obtain the
invading fluid confirguration directly, *then* trims if
``access_limitations`` is ``True``. This method is not ideal and is
included mostly for comparison purposes. The morphological operations
are done using fft-based method implementations.
Returns
-------
image : ND-array
A copy of ``im`` with voxel values indicating the sphere radius at
which it becomes accessible from the inlets. This image can be used
to find invading fluid configurations as a function of applied
capillary pressure by applying a boolean comparison:
``inv_phase = im > r`` where ``r`` is the radius (in voxels) of the
invading sphere. Of course, ``r`` can be converted to capillary
pressure using your favorite model.
Notes
-----
There are many ways to perform this filter, and PoreSpy offer 3, which
users can choose between via the ``mode`` argument. These methods all
work in a similar way by finding which foreground voxels can accomodate
a sphere of a given radius, then repeating for smaller radii.
See Also
--------
fftmorphology
local_thickness
### Response:
def porosimetry(im, sizes=25, inlets=None, access_limited=True,
mode='hybrid'):
r"""
Performs a porosimetry simulution on the image
Parameters
----------
im : ND-array
An ND image of the porous material containing True values in the
pore space.
sizes : array_like or scalar
The sizes to invade. If a list of values of provided they are used
directly. If a scalar is provided then that number of points spanning
the min and max of the distance transform are used.
inlets : ND-array, boolean
A boolean mask with True values indicating where the invasion
enters the image. By default all faces are considered inlets,
akin to a mercury porosimetry experiment. Users can also apply
solid boundaries to their image externally before passing it in,
allowing for complex inlets like circular openings, etc. This argument
is only used if ``access_limited`` is ``True``.
access_limited : Boolean
This flag indicates if the intrusion should only occur from the
surfaces (``access_limited`` is True, which is the default), or
if the invading phase should be allowed to appear in the core of
the image. The former simulates experimental tools like mercury
intrusion porosimetry, while the latter is useful for comparison
to gauge the extent of shielding effects in the sample.
mode : string
Controls with method is used to compute the result. Options are:
'hybrid' - (default) Performs a distance tranform of the void space,
thresholds to find voxels larger than ``sizes[i]``, trims the resulting
mask if ``access_limitations`` is ``True``, then dilates it using the
efficient fft-method to obtain the non-wetting fluid configuration.
'dt' - Same as 'hybrid', except uses a second distance transform,
relative to the thresholded mask, to find the invading fluid
configuration. The choice of 'dt' or 'hybrid' depends on speed, which
is system and installation specific.
'mio' - Using a single morphological image opening step to obtain the
invading fluid confirguration directly, *then* trims if
``access_limitations`` is ``True``. This method is not ideal and is
included mostly for comparison purposes. The morphological operations
are done using fft-based method implementations.
Returns
-------
image : ND-array
A copy of ``im`` with voxel values indicating the sphere radius at
which it becomes accessible from the inlets. This image can be used
to find invading fluid configurations as a function of applied
capillary pressure by applying a boolean comparison:
``inv_phase = im > r`` where ``r`` is the radius (in voxels) of the
invading sphere. Of course, ``r`` can be converted to capillary
pressure using your favorite model.
Notes
-----
There are many ways to perform this filter, and PoreSpy offer 3, which
users can choose between via the ``mode`` argument. These methods all
work in a similar way by finding which foreground voxels can accomodate
a sphere of a given radius, then repeating for smaller radii.
See Also
--------
fftmorphology
local_thickness
"""
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
dt = spim.distance_transform_edt(im > 0)
if inlets is None:
inlets = get_border(im.shape, mode='faces')
if isinstance(sizes, int):
sizes = sp.logspace(start=sp.log10(sp.amax(dt)), stop=0, num=sizes)
else:
sizes = sp.unique(sizes)[-1::-1]
if im.ndim == 2:
strel = ps_disk
else:
strel = ps_ball
if mode == 'mio':
pw = int(sp.floor(dt.max()))
impad = sp.pad(im, mode='symmetric', pad_width=pw)
inletspad = sp.pad(inlets, mode='symmetric', pad_width=pw)
inlets = sp.where(inletspad)
# sizes = sp.unique(sp.around(sizes, decimals=0).astype(int))[-1::-1]
imresults = sp.zeros(sp.shape(impad))
for r in tqdm(sizes):
imtemp = fftmorphology(impad, strel(r), mode='erosion')
if access_limited:
imtemp = trim_disconnected_blobs(imtemp, inlets)
imtemp = fftmorphology(imtemp, strel(r), mode='dilation')
if sp.any(imtemp):
imresults[(imresults == 0)*imtemp] = r
imresults = extract_subsection(imresults, shape=im.shape)
elif mode == 'dt':
inlets = sp.where(inlets)
imresults = sp.zeros(sp.shape(im))
for r in tqdm(sizes):
imtemp = dt >= r
if access_limited:
imtemp = trim_disconnected_blobs(imtemp, inlets)
if sp.any(imtemp):
imtemp = spim.distance_transform_edt(~imtemp) < r
imresults[(imresults == 0)*imtemp] = r
elif mode == 'hybrid':
inlets = sp.where(inlets)
imresults = sp.zeros(sp.shape(im))
for r in tqdm(sizes):
imtemp = dt >= r
if access_limited:
imtemp = trim_disconnected_blobs(imtemp, inlets)
if sp.any(imtemp):
imtemp = fftconvolve(imtemp, strel(r), mode='same') > 0.0001
imresults[(imresults == 0)*imtemp] = r
else:
raise Exception('Unreckognized mode ' + mode)
return imresults |
def do_tree(self, path, max_depth, level, full_path, include_stat):
""" tree's work horse """
try:
children = self.get_children(path)
except (NoNodeError, NoAuthError):
children = []
for child in children:
cpath = os.path.join(path, child) if full_path else child
if include_stat:
yield cpath, level, self.stat(os.path.join(path, child))
else:
yield cpath, level
if max_depth == 0 or level + 1 < max_depth:
cpath = os.path.join(path, child)
for rchild_rlevel_rstat in self.do_tree(cpath, max_depth, level + 1, full_path, include_stat):
yield rchild_rlevel_rstat | tree's work horse | Below is the the instruction that describes the task:
### Input:
tree's work horse
### Response:
def do_tree(self, path, max_depth, level, full_path, include_stat):
""" tree's work horse """
try:
children = self.get_children(path)
except (NoNodeError, NoAuthError):
children = []
for child in children:
cpath = os.path.join(path, child) if full_path else child
if include_stat:
yield cpath, level, self.stat(os.path.join(path, child))
else:
yield cpath, level
if max_depth == 0 or level + 1 < max_depth:
cpath = os.path.join(path, child)
for rchild_rlevel_rstat in self.do_tree(cpath, max_depth, level + 1, full_path, include_stat):
yield rchild_rlevel_rstat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.