code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
async def upload_file(self, bucket, file, uploadpath=None, key=None,
ContentType=None, **kw):
"""Upload a file to S3 possibly using the multi-part uploader
Return the key uploaded
"""
is_filename = False
if hasattr(file, 'read'):
if hasattr(file, 'seek'):
file.seek(0)
file = file.read()
size = len(file)
elif key:
size = len(file)
else:
is_filename = True
size = os.stat(file).st_size
key = os.path.basename(file)
assert key, 'key not available'
if not ContentType:
ContentType, _ = mimetypes.guess_type(key)
if uploadpath:
if not uploadpath.endswith('/'):
uploadpath = '%s/' % uploadpath
key = '%s%s' % (uploadpath, key)
params = dict(Bucket=bucket, Key=key)
if not ContentType:
ContentType = 'application/octet-stream'
params['ContentType'] = ContentType
if size > MULTI_PART_SIZE and is_filename:
resp = await _multipart(self, file, params)
elif is_filename:
with open(file, 'rb') as fp:
params['Body'] = fp.read()
resp = await self.put_object(**params)
else:
params['Body'] = file
resp = await self.put_object(**params)
if 'Key' not in resp:
resp['Key'] = key
if 'Bucket' not in resp:
resp['Bucket'] = bucket
return resp | Upload a file to S3 possibly using the multi-part uploader
Return the key uploaded | Below is the the instruction that describes the task:
### Input:
Upload a file to S3 possibly using the multi-part uploader
Return the key uploaded
### Response:
async def upload_file(self, bucket, file, uploadpath=None, key=None,
ContentType=None, **kw):
"""Upload a file to S3 possibly using the multi-part uploader
Return the key uploaded
"""
is_filename = False
if hasattr(file, 'read'):
if hasattr(file, 'seek'):
file.seek(0)
file = file.read()
size = len(file)
elif key:
size = len(file)
else:
is_filename = True
size = os.stat(file).st_size
key = os.path.basename(file)
assert key, 'key not available'
if not ContentType:
ContentType, _ = mimetypes.guess_type(key)
if uploadpath:
if not uploadpath.endswith('/'):
uploadpath = '%s/' % uploadpath
key = '%s%s' % (uploadpath, key)
params = dict(Bucket=bucket, Key=key)
if not ContentType:
ContentType = 'application/octet-stream'
params['ContentType'] = ContentType
if size > MULTI_PART_SIZE and is_filename:
resp = await _multipart(self, file, params)
elif is_filename:
with open(file, 'rb') as fp:
params['Body'] = fp.read()
resp = await self.put_object(**params)
else:
params['Body'] = file
resp = await self.put_object(**params)
if 'Key' not in resp:
resp['Key'] = key
if 'Bucket' not in resp:
resp['Bucket'] = bucket
return resp |
def spi_write(self, data):
"""Write a stream of bytes to a SPI device."""
data_out = array.array('B', data)
data_in = array.array('B', (0,) * len(data_out))
ret = api.py_aa_spi_write(self.handle, len(data_out), data_out,
len(data_in), data_in)
_raise_error_if_negative(ret)
return bytes(data_in) | Write a stream of bytes to a SPI device. | Below is the the instruction that describes the task:
### Input:
Write a stream of bytes to a SPI device.
### Response:
def spi_write(self, data):
"""Write a stream of bytes to a SPI device."""
data_out = array.array('B', data)
data_in = array.array('B', (0,) * len(data_out))
ret = api.py_aa_spi_write(self.handle, len(data_out), data_out,
len(data_in), data_in)
_raise_error_if_negative(ret)
return bytes(data_in) |
def next_frame_ae():
"""Conv autoencoder."""
hparams = next_frame_basic_deterministic()
hparams.bottom["inputs"] = modalities.video_bitwise_bottom
hparams.top["inputs"] = modalities.video_top
hparams.hidden_size = 256
hparams.batch_size = 8
hparams.num_hidden_layers = 4
hparams.num_compress_steps = 4
hparams.dropout = 0.4
return hparams | Conv autoencoder. | Below is the the instruction that describes the task:
### Input:
Conv autoencoder.
### Response:
def next_frame_ae():
"""Conv autoencoder."""
hparams = next_frame_basic_deterministic()
hparams.bottom["inputs"] = modalities.video_bitwise_bottom
hparams.top["inputs"] = modalities.video_top
hparams.hidden_size = 256
hparams.batch_size = 8
hparams.num_hidden_layers = 4
hparams.num_compress_steps = 4
hparams.dropout = 0.4
return hparams |
def transpose(a, axes=None):
"""Returns a view of the array with axes transposed.
For a 1-D array, this has no effect.
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted
Args:
a (array_like): Input array.
axes (list of int, optional): By default, reverse the dimensions,
otherwise permute the axes according to the values given.
"""
if isinstance(a, np.ndarray):
return np.transpose(a, axes)
elif isinstance(a, RemoteArray):
return a.transpose(*axes)
elif isinstance(a, Remote):
return _remote_to_array(a).transpose(*axes)
elif isinstance(a, DistArray):
if axes is None:
axes = range(a.ndim - 1, -1, -1)
axes = list(axes)
if len(set(axes)) < len(axes):
raise ValueError("repeated axis in transpose")
if sorted(axes) != list(range(a.ndim)):
raise ValueError("axes don't match array")
distaxis = a._distaxis
new_distaxis = axes.index(distaxis)
new_subarrays = [ra.transpose(*axes) for ra in a._subarrays]
return DistArray(new_subarrays, new_distaxis)
else:
return np.transpose(a, axes) | Returns a view of the array with axes transposed.
For a 1-D array, this has no effect.
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted
Args:
a (array_like): Input array.
axes (list of int, optional): By default, reverse the dimensions,
otherwise permute the axes according to the values given. | Below is the the instruction that describes the task:
### Input:
Returns a view of the array with axes transposed.
For a 1-D array, this has no effect.
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted
Args:
a (array_like): Input array.
axes (list of int, optional): By default, reverse the dimensions,
otherwise permute the axes according to the values given.
### Response:
def transpose(a, axes=None):
"""Returns a view of the array with axes transposed.
For a 1-D array, this has no effect.
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted
Args:
a (array_like): Input array.
axes (list of int, optional): By default, reverse the dimensions,
otherwise permute the axes according to the values given.
"""
if isinstance(a, np.ndarray):
return np.transpose(a, axes)
elif isinstance(a, RemoteArray):
return a.transpose(*axes)
elif isinstance(a, Remote):
return _remote_to_array(a).transpose(*axes)
elif isinstance(a, DistArray):
if axes is None:
axes = range(a.ndim - 1, -1, -1)
axes = list(axes)
if len(set(axes)) < len(axes):
raise ValueError("repeated axis in transpose")
if sorted(axes) != list(range(a.ndim)):
raise ValueError("axes don't match array")
distaxis = a._distaxis
new_distaxis = axes.index(distaxis)
new_subarrays = [ra.transpose(*axes) for ra in a._subarrays]
return DistArray(new_subarrays, new_distaxis)
else:
return np.transpose(a, axes) |
def add_filter(self, table, cols, condition):
"""
Add a filter. When reading *table*, rows in *table* will be
filtered by filter_rows().
Args:
table: The table the filter applies to.
cols: The columns in *table* to filter on.
condition: The filter function.
"""
if table is not None and table not in self.relations:
raise ItsdbError('Cannot add filter; table "{}" is not defined '
'by the relations file.'
.format(table))
# this is a hack, though perhaps well-motivated
if cols is None:
cols = [None]
self.filters[table].append((cols, condition)) | Add a filter. When reading *table*, rows in *table* will be
filtered by filter_rows().
Args:
table: The table the filter applies to.
cols: The columns in *table* to filter on.
condition: The filter function. | Below is the the instruction that describes the task:
### Input:
Add a filter. When reading *table*, rows in *table* will be
filtered by filter_rows().
Args:
table: The table the filter applies to.
cols: The columns in *table* to filter on.
condition: The filter function.
### Response:
def add_filter(self, table, cols, condition):
"""
Add a filter. When reading *table*, rows in *table* will be
filtered by filter_rows().
Args:
table: The table the filter applies to.
cols: The columns in *table* to filter on.
condition: The filter function.
"""
if table is not None and table not in self.relations:
raise ItsdbError('Cannot add filter; table "{}" is not defined '
'by the relations file.'
.format(table))
# this is a hack, though perhaps well-motivated
if cols is None:
cols = [None]
self.filters[table].append((cols, condition)) |
def next(self):
"""A `next` that caches the returned results. Together with the
slightly different `__iter__`, these cursors can be iterated over
more than once."""
if self.__tailable:
return PymongoCursor.next(self)
try:
ret = PymongoCursor.next(self)
except StopIteration:
self.__fullcache = True
raise
self.__itercache.append(ret)
return ret | A `next` that caches the returned results. Together with the
slightly different `__iter__`, these cursors can be iterated over
more than once. | Below is the the instruction that describes the task:
### Input:
A `next` that caches the returned results. Together with the
slightly different `__iter__`, these cursors can be iterated over
more than once.
### Response:
def next(self):
"""A `next` that caches the returned results. Together with the
slightly different `__iter__`, these cursors can be iterated over
more than once."""
if self.__tailable:
return PymongoCursor.next(self)
try:
ret = PymongoCursor.next(self)
except StopIteration:
self.__fullcache = True
raise
self.__itercache.append(ret)
return ret |
def fromTable(table, paramName):
"""
Converts a table to GPRecordSet object
Inputs:
table - path to the table
paramName - name of the parameter
"""
from ..common.spatial import recordset_to_json
g = GPRecordSet()
g.paramName = paramName
g.value = json.loads(recordset_to_json(table))
return g | Converts a table to GPRecordSet object
Inputs:
table - path to the table
paramName - name of the parameter | Below is the the instruction that describes the task:
### Input:
Converts a table to GPRecordSet object
Inputs:
table - path to the table
paramName - name of the parameter
### Response:
def fromTable(table, paramName):
"""
Converts a table to GPRecordSet object
Inputs:
table - path to the table
paramName - name of the parameter
"""
from ..common.spatial import recordset_to_json
g = GPRecordSet()
g.paramName = paramName
g.value = json.loads(recordset_to_json(table))
return g |
def fuzz(p, _inplace=0):
"""Transform a layer into a fuzzy layer by replacing some default values by random objects""" # noqa: E501
if not _inplace:
p = p.copy()
q = p
while not isinstance(q, NoPayload):
for f in q.fields_desc:
if isinstance(f, PacketListField):
for r in getattr(q, f.name):
print("fuzzing", repr(r))
fuzz(r, _inplace=1)
elif f.default is not None:
if not isinstance(f, ConditionalField) or f._evalcond(q):
rnd = f.randval()
if rnd is not None:
q.default_fields[f.name] = rnd
q = q.payload
return p | Transform a layer into a fuzzy layer by replacing some default values by random objects | Below is the the instruction that describes the task:
### Input:
Transform a layer into a fuzzy layer by replacing some default values by random objects
### Response:
def fuzz(p, _inplace=0):
"""Transform a layer into a fuzzy layer by replacing some default values by random objects""" # noqa: E501
if not _inplace:
p = p.copy()
q = p
while not isinstance(q, NoPayload):
for f in q.fields_desc:
if isinstance(f, PacketListField):
for r in getattr(q, f.name):
print("fuzzing", repr(r))
fuzz(r, _inplace=1)
elif f.default is not None:
if not isinstance(f, ConditionalField) or f._evalcond(q):
rnd = f.randval()
if rnd is not None:
q.default_fields[f.name] = rnd
q = q.payload
return p |
def write_version(name=None, path=None):
"""Write the version info to ../version.json, for setup.py.
Args:
name (Optional[str]): this is for the ``write_version(name=__name__)``
below. That's one way to both follow the
``if __name__ == '__main__':`` convention but also allow for full
coverage without ignoring parts of the file.
path (Optional[str]): the path to write the version json to. Defaults
to ../version.json
"""
# Written like this for coverage purposes.
# http://stackoverflow.com/questions/5850268/how-to-test-or-mock-if-name-main-contents/27084447#27084447
if name in (None, '__main__'):
path = path or os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"version.json")
contents = {
'version': __version__,
'version_string': __version_string__,
}
with open(path, 'w') as filehandle:
filehandle.write(json.dumps(contents, sort_keys=True, indent=4)) | Write the version info to ../version.json, for setup.py.
Args:
name (Optional[str]): this is for the ``write_version(name=__name__)``
below. That's one way to both follow the
``if __name__ == '__main__':`` convention but also allow for full
coverage without ignoring parts of the file.
path (Optional[str]): the path to write the version json to. Defaults
to ../version.json | Below is the the instruction that describes the task:
### Input:
Write the version info to ../version.json, for setup.py.
Args:
name (Optional[str]): this is for the ``write_version(name=__name__)``
below. That's one way to both follow the
``if __name__ == '__main__':`` convention but also allow for full
coverage without ignoring parts of the file.
path (Optional[str]): the path to write the version json to. Defaults
to ../version.json
### Response:
def write_version(name=None, path=None):
"""Write the version info to ../version.json, for setup.py.
Args:
name (Optional[str]): this is for the ``write_version(name=__name__)``
below. That's one way to both follow the
``if __name__ == '__main__':`` convention but also allow for full
coverage without ignoring parts of the file.
path (Optional[str]): the path to write the version json to. Defaults
to ../version.json
"""
# Written like this for coverage purposes.
# http://stackoverflow.com/questions/5850268/how-to-test-or-mock-if-name-main-contents/27084447#27084447
if name in (None, '__main__'):
path = path or os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"version.json")
contents = {
'version': __version__,
'version_string': __version_string__,
}
with open(path, 'w') as filehandle:
filehandle.write(json.dumps(contents, sort_keys=True, indent=4)) |
def ipv6_acl_ipv6_access_list_standard_seq_seq_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ipv6_acl = ET.SubElement(config, "ipv6-acl", xmlns="urn:brocade.com:mgmt:brocade-ipv6-access-list")
ipv6 = ET.SubElement(ipv6_acl, "ipv6")
access_list = ET.SubElement(ipv6, "access-list")
standard = ET.SubElement(access_list, "standard")
name_key = ET.SubElement(standard, "name")
name_key.text = kwargs.pop('name')
seq = ET.SubElement(standard, "seq")
seq_id = ET.SubElement(seq, "seq-id")
seq_id.text = kwargs.pop('seq_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def ipv6_acl_ipv6_access_list_standard_seq_seq_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ipv6_acl = ET.SubElement(config, "ipv6-acl", xmlns="urn:brocade.com:mgmt:brocade-ipv6-access-list")
ipv6 = ET.SubElement(ipv6_acl, "ipv6")
access_list = ET.SubElement(ipv6, "access-list")
standard = ET.SubElement(access_list, "standard")
name_key = ET.SubElement(standard, "name")
name_key.text = kwargs.pop('name')
seq = ET.SubElement(standard, "seq")
seq_id = ET.SubElement(seq, "seq-id")
seq_id.text = kwargs.pop('seq_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _cimdatetime_constructor(loader, node):
"""
PyYAML constructor function for CIMDateTime objects.
This is needed for yaml.safe_load() to support CIMDateTime.
"""
cimdatetime_str = loader.construct_scalar(node)
cimdatetime = CIMDateTime(cimdatetime_str)
return cimdatetime | PyYAML constructor function for CIMDateTime objects.
This is needed for yaml.safe_load() to support CIMDateTime. | Below is the the instruction that describes the task:
### Input:
PyYAML constructor function for CIMDateTime objects.
This is needed for yaml.safe_load() to support CIMDateTime.
### Response:
def _cimdatetime_constructor(loader, node):
"""
PyYAML constructor function for CIMDateTime objects.
This is needed for yaml.safe_load() to support CIMDateTime.
"""
cimdatetime_str = loader.construct_scalar(node)
cimdatetime = CIMDateTime(cimdatetime_str)
return cimdatetime |
def preview_view(self, context):
"""
Preview view - used by StudioContainerWithNestedXBlocksMixin to render nested xblocks in preview context.
Default implementation uses author_view if available, otherwise falls back to student_view
Child classes can override this method to control their presentation in preview context
"""
view_to_render = 'author_view' if hasattr(self, 'author_view') else 'student_view'
renderer = getattr(self, view_to_render)
return renderer(context) | Preview view - used by StudioContainerWithNestedXBlocksMixin to render nested xblocks in preview context.
Default implementation uses author_view if available, otherwise falls back to student_view
Child classes can override this method to control their presentation in preview context | Below is the the instruction that describes the task:
### Input:
Preview view - used by StudioContainerWithNestedXBlocksMixin to render nested xblocks in preview context.
Default implementation uses author_view if available, otherwise falls back to student_view
Child classes can override this method to control their presentation in preview context
### Response:
def preview_view(self, context):
"""
Preview view - used by StudioContainerWithNestedXBlocksMixin to render nested xblocks in preview context.
Default implementation uses author_view if available, otherwise falls back to student_view
Child classes can override this method to control their presentation in preview context
"""
view_to_render = 'author_view' if hasattr(self, 'author_view') else 'student_view'
renderer = getattr(self, view_to_render)
return renderer(context) |
def place_objects(self):
"""Places objects randomly until no collisions or max iterations hit."""
placed_objects = []
index = 0
# place objects by rejection sampling
for _, obj_mjcf in self.mujoco_objects.items():
horizontal_radius = obj_mjcf.get_horizontal_radius()
bottom_offset = obj_mjcf.get_bottom_offset()
success = False
for _ in range(5000): # 5000 retries
bin_x_half = self.bin_size[0] / 2 - horizontal_radius - 0.05
bin_y_half = self.bin_size[1] / 2 - horizontal_radius - 0.05
object_x = np.random.uniform(high=bin_x_half, low=-bin_x_half)
object_y = np.random.uniform(high=bin_y_half, low=-bin_y_half)
# make sure objects do not overlap
object_xy = np.array([object_x, object_y, 0])
pos = self.bin_offset - bottom_offset + object_xy
location_valid = True
for pos2, r in placed_objects:
dist = np.linalg.norm(pos[:2] - pos2[:2], np.inf)
if dist <= r + horizontal_radius:
location_valid = False
break
# place the object
if location_valid:
# add object to the position
placed_objects.append((pos, horizontal_radius))
self.objects[index].set("pos", array_to_string(pos))
# random z-rotation
quat = self.sample_quat()
self.objects[index].set("quat", array_to_string(quat))
success = True
break
# raise error if all objects cannot be placed after maximum retries
if not success:
raise RandomizationError("Cannot place all objects in the bins")
index += 1 | Places objects randomly until no collisions or max iterations hit. | Below is the the instruction that describes the task:
### Input:
Places objects randomly until no collisions or max iterations hit.
### Response:
def place_objects(self):
"""Places objects randomly until no collisions or max iterations hit."""
placed_objects = []
index = 0
# place objects by rejection sampling
for _, obj_mjcf in self.mujoco_objects.items():
horizontal_radius = obj_mjcf.get_horizontal_radius()
bottom_offset = obj_mjcf.get_bottom_offset()
success = False
for _ in range(5000): # 5000 retries
bin_x_half = self.bin_size[0] / 2 - horizontal_radius - 0.05
bin_y_half = self.bin_size[1] / 2 - horizontal_radius - 0.05
object_x = np.random.uniform(high=bin_x_half, low=-bin_x_half)
object_y = np.random.uniform(high=bin_y_half, low=-bin_y_half)
# make sure objects do not overlap
object_xy = np.array([object_x, object_y, 0])
pos = self.bin_offset - bottom_offset + object_xy
location_valid = True
for pos2, r in placed_objects:
dist = np.linalg.norm(pos[:2] - pos2[:2], np.inf)
if dist <= r + horizontal_radius:
location_valid = False
break
# place the object
if location_valid:
# add object to the position
placed_objects.append((pos, horizontal_radius))
self.objects[index].set("pos", array_to_string(pos))
# random z-rotation
quat = self.sample_quat()
self.objects[index].set("quat", array_to_string(quat))
success = True
break
# raise error if all objects cannot be placed after maximum retries
if not success:
raise RandomizationError("Cannot place all objects in the bins")
index += 1 |
def visualize_qual_stats_dict_single(D, dest, title):
"""
same as visualize_qual_stats_dict, but puts all tiles together.
"""
# first find out how many cycles were there. it is going to be about 101 for
# hiseq runs, and 251 in miseq runs, but these values may change from run to
# run. although all lanes are expected to have the identical number of cycles
# the following code makes sure that the number_of_cycles variable holds the
# longest one if there is a variation between the number of cycles between
# lanes
number_of_cycles = 0
for pair in ['1', '2']:
if pair not in D:
continue
for tile in D[pair]:
if len(D[pair][tile]['mean']) > number_of_cycles:
number_of_cycles = len(D[pair][tile]['mean'])
fig = plt.figure(figsize = (12, 8))
plt.rcParams.update({'axes.linewidth' : 0.9})
plt.rc('grid', color='0.50', linestyle='-', linewidth=0.1)
all_tiles = {'1': {'mean': [0] * number_of_cycles, 'count': [0] * number_of_cycles},
'2': {'mean': [0] * number_of_cycles, 'count': [0] * number_of_cycles}
}
for i in range(0, number_of_cycles):
means_p1 = []
counts_p1 = []
means_p2 = []
counts_p2 = []
for tile_id in D['1']:
tile = D['1'][tile_id]
means_p1.append(tile['mean'][i])
counts_p1.append(tile['count'][i])
if '2' in D and D['2']:
tile = D['2'][tile_id]
means_p2.append(tile['mean'][i])
counts_p2.append(tile['count'][i])
all_tiles['1']['mean'][i] = numpy.mean(means_p1)
all_tiles['1']['count'][i] = sum(counts_p1)
if '2' in D and D['2']:
all_tiles['2']['mean'][i] = numpy.mean(means_p2)
all_tiles['2']['count'][i] = sum(counts_p2)
colors = cm.get_cmap('RdYlGn', lut=256)
plt.grid(True)
plt.subplots_adjust(left=0.02, bottom = 0.03, top = 0.95, right = 0.98)
plt.xticks(list(range(number_of_cycles / 10, number_of_cycles, number_of_cycles / 10)), rotation=90, size='xx-small')
plt.ylim(ymin = 0, ymax = 42)
plt.xlim(xmin = 0, xmax = number_of_cycles - 1)
plt.yticks(list(range(5, 41, 5)), size='xx-small')
plt.fill_between(list(range(0, number_of_cycles)), [42 for _ in range(0, number_of_cycles)], y2 = 0, color = colors(0), alpha = 0.2)
plt.plot(all_tiles['1']['mean'], color = 'orange', lw = 6)
read_number_percent_dropdown = [42 * (x / all_tiles['1']['count'][0]) for x in all_tiles['1']['count']]
if not len(set(read_number_percent_dropdown)) <= 1:
plt.fill_between(list(range(0, number_of_cycles)), read_number_percent_dropdown, y2 = 0, color = 'black', alpha = 0.08)
plt.text(5, 2.5, '%s' % big_number_pretty_print(all_tiles['1']['count'][0]), alpha=0.5)
else:
plt.text(5, 2.5, '%s' % big_number_pretty_print(all_tiles['1']['count'][0]), alpha=0.5)
if '2' in all_tiles and all_tiles['2']:
plt.plot(all_tiles['2']['mean'], color = 'purple', lw = 6)
plt.figtext(0.5, 0.97, '%s' % (title), weight = 'black', size = 'xx-large', ha = 'center')
try:
plt.savefig(dest + '.tiff')
except:
plt.savefig(dest + '.png')
return (all_tiles['1']['mean'], all_tiles['2']['mean']) | same as visualize_qual_stats_dict, but puts all tiles together. | Below is the the instruction that describes the task:
### Input:
same as visualize_qual_stats_dict, but puts all tiles together.
### Response:
def visualize_qual_stats_dict_single(D, dest, title):
"""
same as visualize_qual_stats_dict, but puts all tiles together.
"""
# first find out how many cycles were there. it is going to be about 101 for
# hiseq runs, and 251 in miseq runs, but these values may change from run to
# run. although all lanes are expected to have the identical number of cycles
# the following code makes sure that the number_of_cycles variable holds the
# longest one if there is a variation between the number of cycles between
# lanes
number_of_cycles = 0
for pair in ['1', '2']:
if pair not in D:
continue
for tile in D[pair]:
if len(D[pair][tile]['mean']) > number_of_cycles:
number_of_cycles = len(D[pair][tile]['mean'])
fig = plt.figure(figsize = (12, 8))
plt.rcParams.update({'axes.linewidth' : 0.9})
plt.rc('grid', color='0.50', linestyle='-', linewidth=0.1)
all_tiles = {'1': {'mean': [0] * number_of_cycles, 'count': [0] * number_of_cycles},
'2': {'mean': [0] * number_of_cycles, 'count': [0] * number_of_cycles}
}
for i in range(0, number_of_cycles):
means_p1 = []
counts_p1 = []
means_p2 = []
counts_p2 = []
for tile_id in D['1']:
tile = D['1'][tile_id]
means_p1.append(tile['mean'][i])
counts_p1.append(tile['count'][i])
if '2' in D and D['2']:
tile = D['2'][tile_id]
means_p2.append(tile['mean'][i])
counts_p2.append(tile['count'][i])
all_tiles['1']['mean'][i] = numpy.mean(means_p1)
all_tiles['1']['count'][i] = sum(counts_p1)
if '2' in D and D['2']:
all_tiles['2']['mean'][i] = numpy.mean(means_p2)
all_tiles['2']['count'][i] = sum(counts_p2)
colors = cm.get_cmap('RdYlGn', lut=256)
plt.grid(True)
plt.subplots_adjust(left=0.02, bottom = 0.03, top = 0.95, right = 0.98)
plt.xticks(list(range(number_of_cycles / 10, number_of_cycles, number_of_cycles / 10)), rotation=90, size='xx-small')
plt.ylim(ymin = 0, ymax = 42)
plt.xlim(xmin = 0, xmax = number_of_cycles - 1)
plt.yticks(list(range(5, 41, 5)), size='xx-small')
plt.fill_between(list(range(0, number_of_cycles)), [42 for _ in range(0, number_of_cycles)], y2 = 0, color = colors(0), alpha = 0.2)
plt.plot(all_tiles['1']['mean'], color = 'orange', lw = 6)
read_number_percent_dropdown = [42 * (x / all_tiles['1']['count'][0]) for x in all_tiles['1']['count']]
if not len(set(read_number_percent_dropdown)) <= 1:
plt.fill_between(list(range(0, number_of_cycles)), read_number_percent_dropdown, y2 = 0, color = 'black', alpha = 0.08)
plt.text(5, 2.5, '%s' % big_number_pretty_print(all_tiles['1']['count'][0]), alpha=0.5)
else:
plt.text(5, 2.5, '%s' % big_number_pretty_print(all_tiles['1']['count'][0]), alpha=0.5)
if '2' in all_tiles and all_tiles['2']:
plt.plot(all_tiles['2']['mean'], color = 'purple', lw = 6)
plt.figtext(0.5, 0.97, '%s' % (title), weight = 'black', size = 'xx-large', ha = 'center')
try:
plt.savefig(dest + '.tiff')
except:
plt.savefig(dest + '.png')
return (all_tiles['1']['mean'], all_tiles['2']['mean']) |
def enable_parallel(processnum=None):
"""
Change the module's `cut` and `cut_for_search` functions to the
parallel version.
Note that this only works using dt, custom Tokenizer
instances are not supported.
"""
global pool, dt, cut, cut_for_search
from multiprocessing import cpu_count
if os.name == 'nt':
raise NotImplementedError(
"jieba: parallel mode only supports posix system")
else:
from multiprocessing import Pool
dt.check_initialized()
if processnum is None:
processnum = cpu_count()
pool = Pool(processnum)
cut = _pcut
cut_for_search = _pcut_for_search | Change the module's `cut` and `cut_for_search` functions to the
parallel version.
Note that this only works using dt, custom Tokenizer
instances are not supported. | Below is the the instruction that describes the task:
### Input:
Change the module's `cut` and `cut_for_search` functions to the
parallel version.
Note that this only works using dt, custom Tokenizer
instances are not supported.
### Response:
def enable_parallel(processnum=None):
"""
Change the module's `cut` and `cut_for_search` functions to the
parallel version.
Note that this only works using dt, custom Tokenizer
instances are not supported.
"""
global pool, dt, cut, cut_for_search
from multiprocessing import cpu_count
if os.name == 'nt':
raise NotImplementedError(
"jieba: parallel mode only supports posix system")
else:
from multiprocessing import Pool
dt.check_initialized()
if processnum is None:
processnum = cpu_count()
pool = Pool(processnum)
cut = _pcut
cut_for_search = _pcut_for_search |
def from_vizier_table(cls, table_id, nside=256):
"""
Creates a `~mocpy.moc.MOC` object from a VizieR table.
**Info**: This method is already implemented in `astroquery.cds <https://astroquery.readthedocs.io/en/latest/cds/cds.html>`__. You can ask to get a `mocpy.moc.MOC` object
from a vizier catalog ID.
Parameters
----------
table_id : str
table index
nside : int, optional
256 by default
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
"""
nside_possible_values = (8, 16, 32, 64, 128, 256, 512)
if nside not in nside_possible_values:
raise ValueError('Bad value for nside. Must be in {0}'.format(nside_possible_values))
result = cls.from_ivorn('ivo://CDS/' + table_id, nside)
return result | Creates a `~mocpy.moc.MOC` object from a VizieR table.
**Info**: This method is already implemented in `astroquery.cds <https://astroquery.readthedocs.io/en/latest/cds/cds.html>`__. You can ask to get a `mocpy.moc.MOC` object
from a vizier catalog ID.
Parameters
----------
table_id : str
table index
nside : int, optional
256 by default
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC. | Below is the the instruction that describes the task:
### Input:
Creates a `~mocpy.moc.MOC` object from a VizieR table.
**Info**: This method is already implemented in `astroquery.cds <https://astroquery.readthedocs.io/en/latest/cds/cds.html>`__. You can ask to get a `mocpy.moc.MOC` object
from a vizier catalog ID.
Parameters
----------
table_id : str
table index
nside : int, optional
256 by default
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
### Response:
def from_vizier_table(cls, table_id, nside=256):
"""
Creates a `~mocpy.moc.MOC` object from a VizieR table.
**Info**: This method is already implemented in `astroquery.cds <https://astroquery.readthedocs.io/en/latest/cds/cds.html>`__. You can ask to get a `mocpy.moc.MOC` object
from a vizier catalog ID.
Parameters
----------
table_id : str
table index
nside : int, optional
256 by default
Returns
-------
result : `~mocpy.moc.MOC`
The resulting MOC.
"""
nside_possible_values = (8, 16, 32, 64, 128, 256, 512)
if nside not in nside_possible_values:
raise ValueError('Bad value for nside. Must be in {0}'.format(nside_possible_values))
result = cls.from_ivorn('ivo://CDS/' + table_id, nside)
return result |
def html_to_fc(html=None, clean_html=None, clean_visible=None, encoding=None, url=None,
timestamp=None, other_features=None):
'''`html` is expected to be a raw string received over the wire from a
remote webserver, and `encoding`, if provided, is used to decode
it. Typically, encoding comes from the Content-Type header field.
The :func:`~streamcorpus_pipeline._clean_html.make_clean_html`
function handles character encodings.
'''
def add_feature(name, xs):
if name not in fc:
fc[name] = StringCounter()
fc[name] += StringCounter(xs)
timestamp = timestamp or int(time.time() * 1000)
other_features = other_features or {}
if clean_html is None:
if html is not None:
try:
clean_html_utf8 = make_clean_html(html, encoding=encoding)
except:
logger.warn('dropping doc because:', exc_info=True)
return
clean_html = clean_html_utf8.decode('utf-8')
else:
clean_html_utf8 = u''
clean_html = u''
else:
clean_html_utf8 = u''
if clean_visible is None or len(clean_visible) == 0:
clean_visible = make_clean_visible(clean_html_utf8).decode('utf-8')
elif isinstance(clean_visible, str):
clean_visible = clean_visible.decode('utf-8')
fc = FeatureCollection()
fc[u'meta_raw'] = html and uni(html, encoding) or u''
fc[u'meta_clean_html'] = clean_html
fc[u'meta_clean_visible'] = clean_visible
fc[u'meta_timestamp'] = unicode(timestamp)
url = url or u''
fc[u'meta_url'] = uni(url)
add_feature(u'icq', features.ICQs(clean_visible))
add_feature(u'skype', features.skypes(clean_visible))
add_feature(u'phone', features.phones(clean_visible))
add_feature(u'email', features.emails(clean_visible))
bowNP, normalizations = features.noun_phrases(
cleanse(clean_visible), included_unnormalized=True)
add_feature(u'bowNP', bowNP)
bowNP_unnorm = chain(*normalizations.values())
add_feature(u'bowNP_unnorm', bowNP_unnorm)
add_feature(u'image_url', features.image_urls(clean_html))
add_feature(u'a_url', features.a_urls(clean_html))
## get parsed versions, extract usernames
fc[u'img_url_path_dirs'] = features.path_dirs(fc[u'image_url'])
fc[u'img_url_hostnames'] = features.host_names(fc[u'image_url'])
fc[u'usernames'] = features.usernames(fc[u'image_url'])
fc[u'a_url_path_dirs'] = features.path_dirs(fc[u'a_url'])
fc[u'a_url_hostnames'] = features.host_names(fc[u'a_url'])
fc[u'usernames'] += features.usernames(fc[u'a_url'])
#fc[u'usernames'] += features.usernames2(
# fc[u'meta_clean_visible'])
# beginning of treating this as a pipeline...
xform = features.entity_names()
fc = xform.process(fc)
for feat_name, feat_val in other_features.iteritems():
fc[feat_name] += StringCounter(feat_val)
return fc | `html` is expected to be a raw string received over the wire from a
remote webserver, and `encoding`, if provided, is used to decode
it. Typically, encoding comes from the Content-Type header field.
The :func:`~streamcorpus_pipeline._clean_html.make_clean_html`
function handles character encodings. | Below is the the instruction that describes the task:
### Input:
`html` is expected to be a raw string received over the wire from a
remote webserver, and `encoding`, if provided, is used to decode
it. Typically, encoding comes from the Content-Type header field.
The :func:`~streamcorpus_pipeline._clean_html.make_clean_html`
function handles character encodings.
### Response:
def html_to_fc(html=None, clean_html=None, clean_visible=None, encoding=None, url=None,
timestamp=None, other_features=None):
'''`html` is expected to be a raw string received over the wire from a
remote webserver, and `encoding`, if provided, is used to decode
it. Typically, encoding comes from the Content-Type header field.
The :func:`~streamcorpus_pipeline._clean_html.make_clean_html`
function handles character encodings.
'''
def add_feature(name, xs):
if name not in fc:
fc[name] = StringCounter()
fc[name] += StringCounter(xs)
timestamp = timestamp or int(time.time() * 1000)
other_features = other_features or {}
if clean_html is None:
if html is not None:
try:
clean_html_utf8 = make_clean_html(html, encoding=encoding)
except:
logger.warn('dropping doc because:', exc_info=True)
return
clean_html = clean_html_utf8.decode('utf-8')
else:
clean_html_utf8 = u''
clean_html = u''
else:
clean_html_utf8 = u''
if clean_visible is None or len(clean_visible) == 0:
clean_visible = make_clean_visible(clean_html_utf8).decode('utf-8')
elif isinstance(clean_visible, str):
clean_visible = clean_visible.decode('utf-8')
fc = FeatureCollection()
fc[u'meta_raw'] = html and uni(html, encoding) or u''
fc[u'meta_clean_html'] = clean_html
fc[u'meta_clean_visible'] = clean_visible
fc[u'meta_timestamp'] = unicode(timestamp)
url = url or u''
fc[u'meta_url'] = uni(url)
add_feature(u'icq', features.ICQs(clean_visible))
add_feature(u'skype', features.skypes(clean_visible))
add_feature(u'phone', features.phones(clean_visible))
add_feature(u'email', features.emails(clean_visible))
bowNP, normalizations = features.noun_phrases(
cleanse(clean_visible), included_unnormalized=True)
add_feature(u'bowNP', bowNP)
bowNP_unnorm = chain(*normalizations.values())
add_feature(u'bowNP_unnorm', bowNP_unnorm)
add_feature(u'image_url', features.image_urls(clean_html))
add_feature(u'a_url', features.a_urls(clean_html))
## get parsed versions, extract usernames
fc[u'img_url_path_dirs'] = features.path_dirs(fc[u'image_url'])
fc[u'img_url_hostnames'] = features.host_names(fc[u'image_url'])
fc[u'usernames'] = features.usernames(fc[u'image_url'])
fc[u'a_url_path_dirs'] = features.path_dirs(fc[u'a_url'])
fc[u'a_url_hostnames'] = features.host_names(fc[u'a_url'])
fc[u'usernames'] += features.usernames(fc[u'a_url'])
#fc[u'usernames'] += features.usernames2(
# fc[u'meta_clean_visible'])
# beginning of treating this as a pipeline...
xform = features.entity_names()
fc = xform.process(fc)
for feat_name, feat_val in other_features.iteritems():
fc[feat_name] += StringCounter(feat_val)
return fc |
def save_list(key, *values):
"""Convert the given list of parameters to a JSON object.
JSON object is of the form:
{ key: [values[0], values[1], ... ] },
where values represent the given list of parameters.
"""
return json.dumps({key: [_get_json(value) for value in values]}) | Convert the given list of parameters to a JSON object.
JSON object is of the form:
{ key: [values[0], values[1], ... ] },
where values represent the given list of parameters. | Below is the the instruction that describes the task:
### Input:
Convert the given list of parameters to a JSON object.
JSON object is of the form:
{ key: [values[0], values[1], ... ] },
where values represent the given list of parameters.
### Response:
def save_list(key, *values):
"""Convert the given list of parameters to a JSON object.
JSON object is of the form:
{ key: [values[0], values[1], ... ] },
where values represent the given list of parameters.
"""
return json.dumps({key: [_get_json(value) for value in values]}) |
def store(self, transient_file, persistent_file):
'''Makes PersistentFile from TransientFile'''
#for i in range(5):
# persistent_file = PersistentFile(self.persistent_root,
# persistent_name, self)
# if not os.path.exists(persistent_file.path):
# break
#else:
# raise Exception('Unable to find free file name')
dirname = os.path.dirname(persistent_file.path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
os.rename(transient_file.path, persistent_file.path)
return persistent_file | Makes PersistentFile from TransientFile | Below is the the instruction that describes the task:
### Input:
Makes PersistentFile from TransientFile
### Response:
def store(self, transient_file, persistent_file):
'''Makes PersistentFile from TransientFile'''
#for i in range(5):
# persistent_file = PersistentFile(self.persistent_root,
# persistent_name, self)
# if not os.path.exists(persistent_file.path):
# break
#else:
# raise Exception('Unable to find free file name')
dirname = os.path.dirname(persistent_file.path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
os.rename(transient_file.path, persistent_file.path)
return persistent_file |
def find_by_index(self, cls, index_name, value):
"""Find all rows matching index query - as per the gludb spec."""
cur = self._conn().cursor()
query = 'select id,value from %s where %s = ?' % (
cls.get_table_name(),
index_name
)
found = []
for row in cur.execute(query, (value,)):
id, data = row[0], row[1]
obj = cls.from_data(data)
assert id == obj.id
found.append(obj)
cur.close()
return found | Find all rows matching index query - as per the gludb spec. | Below is the the instruction that describes the task:
### Input:
Find all rows matching index query - as per the gludb spec.
### Response:
def find_by_index(self, cls, index_name, value):
"""Find all rows matching index query - as per the gludb spec."""
cur = self._conn().cursor()
query = 'select id,value from %s where %s = ?' % (
cls.get_table_name(),
index_name
)
found = []
for row in cur.execute(query, (value,)):
id, data = row[0], row[1]
obj = cls.from_data(data)
assert id == obj.id
found.append(obj)
cur.close()
return found |
def _lock_fxn(direction, lock_mode, xact):
"""Builds a pg advisory lock function name based on various options.
:direction: one of "lock" or "unlock"
:lock_mode: a member of the LockMode enum
:xact: a boolean, if True the lock will be automatically released at the end
of the transaction and cannot be manually released.
"""
if direction == "unlock" or lock_mode == LockMode.wait:
try_mode = ""
else:
try_mode = "_try"
if direction == "lock" and xact:
xact_mode = "_xact"
else:
xact_mode = ""
return "pg{}_advisory{}_{}".format(try_mode, xact_mode, direction) | Builds a pg advisory lock function name based on various options.
:direction: one of "lock" or "unlock"
:lock_mode: a member of the LockMode enum
:xact: a boolean, if True the lock will be automatically released at the end
of the transaction and cannot be manually released. | Below is the the instruction that describes the task:
### Input:
Builds a pg advisory lock function name based on various options.
:direction: one of "lock" or "unlock"
:lock_mode: a member of the LockMode enum
:xact: a boolean, if True the lock will be automatically released at the end
of the transaction and cannot be manually released.
### Response:
def _lock_fxn(direction, lock_mode, xact):
"""Builds a pg advisory lock function name based on various options.
:direction: one of "lock" or "unlock"
:lock_mode: a member of the LockMode enum
:xact: a boolean, if True the lock will be automatically released at the end
of the transaction and cannot be manually released.
"""
if direction == "unlock" or lock_mode == LockMode.wait:
try_mode = ""
else:
try_mode = "_try"
if direction == "lock" and xact:
xact_mode = "_xact"
else:
xact_mode = ""
return "pg{}_advisory{}_{}".format(try_mode, xact_mode, direction) |
def compress(bytes, target):
"""
Compress a list of byte values to a fixed target length.
>>> bytes = [96, 173, 141, 13, 135, 27, 96, 149, 128, 130, 151]
>>> HumanHasher.compress(bytes, 4)
[205, 128, 156, 96]
Attempting to compress a smaller number of bytes to a larger number is
an error:
>>> HumanHasher.compress(bytes, 15) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Fewer input bytes than requested output
"""
length = len(bytes)
if target > length:
raise ValueError("Fewer input bytes than requested output")
# Split `bytes` into `target` segments.
seg_size = length // target
segments = [bytes[i * seg_size:(i + 1) * seg_size]
for i in range(target)]
# Catch any left-over bytes in the last segment.
segments[-1].extend(bytes[target * seg_size:])
# Use a simple XOR checksum-like function for compression.
checksum = lambda bytes: reduce(operator.xor, bytes, 0)
checksums = list(map(checksum, segments))
return checksums | Compress a list of byte values to a fixed target length.
>>> bytes = [96, 173, 141, 13, 135, 27, 96, 149, 128, 130, 151]
>>> HumanHasher.compress(bytes, 4)
[205, 128, 156, 96]
Attempting to compress a smaller number of bytes to a larger number is
an error:
>>> HumanHasher.compress(bytes, 15) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Fewer input bytes than requested output | Below is the the instruction that describes the task:
### Input:
Compress a list of byte values to a fixed target length.
>>> bytes = [96, 173, 141, 13, 135, 27, 96, 149, 128, 130, 151]
>>> HumanHasher.compress(bytes, 4)
[205, 128, 156, 96]
Attempting to compress a smaller number of bytes to a larger number is
an error:
>>> HumanHasher.compress(bytes, 15) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Fewer input bytes than requested output
### Response:
def compress(bytes, target):
"""
Compress a list of byte values to a fixed target length.
>>> bytes = [96, 173, 141, 13, 135, 27, 96, 149, 128, 130, 151]
>>> HumanHasher.compress(bytes, 4)
[205, 128, 156, 96]
Attempting to compress a smaller number of bytes to a larger number is
an error:
>>> HumanHasher.compress(bytes, 15) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Fewer input bytes than requested output
"""
length = len(bytes)
if target > length:
raise ValueError("Fewer input bytes than requested output")
# Split `bytes` into `target` segments.
seg_size = length // target
segments = [bytes[i * seg_size:(i + 1) * seg_size]
for i in range(target)]
# Catch any left-over bytes in the last segment.
segments[-1].extend(bytes[target * seg_size:])
# Use a simple XOR checksum-like function for compression.
checksum = lambda bytes: reduce(operator.xor, bytes, 0)
checksums = list(map(checksum, segments))
return checksums |
def intersect(self, other, strategy=_STRATEGY.GEOMETRIC, _verify=True):
"""Find the common intersection with another surface.
Args:
other (Surface): Other surface to intersect with.
strategy (Optional[~bezier.curve.IntersectionStrategy]): The
intersection algorithm to use. Defaults to geometric.
_verify (Optional[bool]): Indicates if extra caution should be
used to verify assumptions about the algorithm as it
proceeds. Can be disabled to speed up execution time.
Defaults to :data:`True`.
Returns:
List[Union[~bezier.curved_polygon.CurvedPolygon, \
~bezier.surface.Surface]]: List of intersections (possibly empty).
Raises:
TypeError: If ``other`` is not a surface (and ``_verify=True``).
NotImplementedError: If at least one of the surfaces
isn't two-dimensional (and ``_verify=True``).
ValueError: If ``strategy`` is not a valid
:class:`.IntersectionStrategy`.
"""
if _verify:
if not isinstance(other, Surface):
raise TypeError(
"Can only intersect with another surface",
"Received",
other,
)
if self._dimension != 2 or other._dimension != 2:
raise NotImplementedError(
"Intersection only implemented in 2D"
)
if strategy == _STRATEGY.GEOMETRIC:
do_intersect = _surface_intersection.geometric_intersect
elif strategy == _STRATEGY.ALGEBRAIC:
do_intersect = _surface_intersection.algebraic_intersect
else:
raise ValueError("Unexpected strategy.", strategy)
edge_infos, contained, all_edge_nodes = do_intersect(
self._nodes, self._degree, other._nodes, other._degree, _verify
)
if edge_infos is None:
if contained:
return [self]
else:
return [other]
else:
return [
_make_intersection(edge_info, all_edge_nodes)
for edge_info in edge_infos
] | Find the common intersection with another surface.
Args:
other (Surface): Other surface to intersect with.
strategy (Optional[~bezier.curve.IntersectionStrategy]): The
intersection algorithm to use. Defaults to geometric.
_verify (Optional[bool]): Indicates if extra caution should be
used to verify assumptions about the algorithm as it
proceeds. Can be disabled to speed up execution time.
Defaults to :data:`True`.
Returns:
List[Union[~bezier.curved_polygon.CurvedPolygon, \
~bezier.surface.Surface]]: List of intersections (possibly empty).
Raises:
TypeError: If ``other`` is not a surface (and ``_verify=True``).
NotImplementedError: If at least one of the surfaces
isn't two-dimensional (and ``_verify=True``).
ValueError: If ``strategy`` is not a valid
:class:`.IntersectionStrategy`. | Below is the the instruction that describes the task:
### Input:
Find the common intersection with another surface.
Args:
other (Surface): Other surface to intersect with.
strategy (Optional[~bezier.curve.IntersectionStrategy]): The
intersection algorithm to use. Defaults to geometric.
_verify (Optional[bool]): Indicates if extra caution should be
used to verify assumptions about the algorithm as it
proceeds. Can be disabled to speed up execution time.
Defaults to :data:`True`.
Returns:
List[Union[~bezier.curved_polygon.CurvedPolygon, \
~bezier.surface.Surface]]: List of intersections (possibly empty).
Raises:
TypeError: If ``other`` is not a surface (and ``_verify=True``).
NotImplementedError: If at least one of the surfaces
isn't two-dimensional (and ``_verify=True``).
ValueError: If ``strategy`` is not a valid
:class:`.IntersectionStrategy`.
### Response:
def intersect(self, other, strategy=_STRATEGY.GEOMETRIC, _verify=True):
"""Find the common intersection with another surface.
Args:
other (Surface): Other surface to intersect with.
strategy (Optional[~bezier.curve.IntersectionStrategy]): The
intersection algorithm to use. Defaults to geometric.
_verify (Optional[bool]): Indicates if extra caution should be
used to verify assumptions about the algorithm as it
proceeds. Can be disabled to speed up execution time.
Defaults to :data:`True`.
Returns:
List[Union[~bezier.curved_polygon.CurvedPolygon, \
~bezier.surface.Surface]]: List of intersections (possibly empty).
Raises:
TypeError: If ``other`` is not a surface (and ``_verify=True``).
NotImplementedError: If at least one of the surfaces
isn't two-dimensional (and ``_verify=True``).
ValueError: If ``strategy`` is not a valid
:class:`.IntersectionStrategy`.
"""
if _verify:
if not isinstance(other, Surface):
raise TypeError(
"Can only intersect with another surface",
"Received",
other,
)
if self._dimension != 2 or other._dimension != 2:
raise NotImplementedError(
"Intersection only implemented in 2D"
)
if strategy == _STRATEGY.GEOMETRIC:
do_intersect = _surface_intersection.geometric_intersect
elif strategy == _STRATEGY.ALGEBRAIC:
do_intersect = _surface_intersection.algebraic_intersect
else:
raise ValueError("Unexpected strategy.", strategy)
edge_infos, contained, all_edge_nodes = do_intersect(
self._nodes, self._degree, other._nodes, other._degree, _verify
)
if edge_infos is None:
if contained:
return [self]
else:
return [other]
else:
return [
_make_intersection(edge_info, all_edge_nodes)
for edge_info in edge_infos
] |
def dv(self, orb):
"""Computation of the velocity increment in the reference frame of the orbit
Args:
orb (Orbit):
Return:
numpy.array: Velocity increment, length 3
"""
orb = orb.copy(form="cartesian")
if self.frame == "QSW":
mat = to_qsw(orb).T
elif self.frame == "TNW":
mat = to_tnw(orb).T
else:
mat = np.identity(3)
# velocity increment in the same reference frame as the orbit
return mat @ self._dv | Computation of the velocity increment in the reference frame of the orbit
Args:
orb (Orbit):
Return:
numpy.array: Velocity increment, length 3 | Below is the the instruction that describes the task:
### Input:
Computation of the velocity increment in the reference frame of the orbit
Args:
orb (Orbit):
Return:
numpy.array: Velocity increment, length 3
### Response:
def dv(self, orb):
"""Computation of the velocity increment in the reference frame of the orbit
Args:
orb (Orbit):
Return:
numpy.array: Velocity increment, length 3
"""
orb = orb.copy(form="cartesian")
if self.frame == "QSW":
mat = to_qsw(orb).T
elif self.frame == "TNW":
mat = to_tnw(orb).T
else:
mat = np.identity(3)
# velocity increment in the same reference frame as the orbit
return mat @ self._dv |
def set_sequence_to_default(self):
"""Set the new sequence to the default value defined in the config."""
sequence = CONF.get_default(
'shortcuts', "{}/{}".format(self.context, self.name))
self._qsequences = sequence.split(', ')
self.update_warning() | Set the new sequence to the default value defined in the config. | Below is the the instruction that describes the task:
### Input:
Set the new sequence to the default value defined in the config.
### Response:
def set_sequence_to_default(self):
"""Set the new sequence to the default value defined in the config."""
sequence = CONF.get_default(
'shortcuts', "{}/{}".format(self.context, self.name))
self._qsequences = sequence.split(', ')
self.update_warning() |
def getSRTemplateInfo(self):
"""
Returns a dict with the SRTemplate infomration
{'uid':'xxxx','id':'xxxx','title':'xxx','url':'xxx'}
"""
pc = getToolByName(api.portal.get(), 'portal_catalog')
contentFilter = {'portal_type': 'SRTemplate',
'UID': self.sr_template}
srt = pc(contentFilter)
srtdict = {'uid': '', 'id': '', 'title': '', 'url': ''}
if len(srt) == 1:
template = srt[0].getObject()
srtdict = {
'uid': template.id,
'id': template.UID(),
'title': template.title,
'url': template.absolute_url(),
}
else:
from bika.lims import logger
error = "Error when looking for sr template with uid '%s'. "
logger.exception(error, self.sr_template)
return srtdict | Returns a dict with the SRTemplate infomration
{'uid':'xxxx','id':'xxxx','title':'xxx','url':'xxx'} | Below is the the instruction that describes the task:
### Input:
Returns a dict with the SRTemplate infomration
{'uid':'xxxx','id':'xxxx','title':'xxx','url':'xxx'}
### Response:
def getSRTemplateInfo(self):
"""
Returns a dict with the SRTemplate infomration
{'uid':'xxxx','id':'xxxx','title':'xxx','url':'xxx'}
"""
pc = getToolByName(api.portal.get(), 'portal_catalog')
contentFilter = {'portal_type': 'SRTemplate',
'UID': self.sr_template}
srt = pc(contentFilter)
srtdict = {'uid': '', 'id': '', 'title': '', 'url': ''}
if len(srt) == 1:
template = srt[0].getObject()
srtdict = {
'uid': template.id,
'id': template.UID(),
'title': template.title,
'url': template.absolute_url(),
}
else:
from bika.lims import logger
error = "Error when looking for sr template with uid '%s'. "
logger.exception(error, self.sr_template)
return srtdict |
def approximate_cholesky(self, epsilon=1e-6):
r""" Compute low-rank approximation to the Cholesky decomposition of target matrix.
The decomposition will be conducted while ensuring that the spectrum of `A_k^{-1}` is positive.
Parameters
----------
epsilon : float, optional, default 1e-6
Cutoff for eigenvalue norms. If negative eigenvalues occur, with norms larger than epsilon, the largest
negative eigenvalue norm will be used instead of epsilon, i.e. a band including all negative eigenvalues
will be cut off.
Returns
-------
L : ndarray((n,m), dtype=float)
Cholesky matrix such that `A \approx L L^{\top}`. Number of columns :math:`m` is most at the number of columns
used in the Nystroem approximation, but may be smaller depending on epsilon.
"""
# compute the Eigenvalues of C0 using Schur factorization
Wk = self._C_k[self._columns, :]
L0 = spd_inv_split(Wk, epsilon=epsilon)
L = np.dot(self._C_k, L0)
return L | r""" Compute low-rank approximation to the Cholesky decomposition of target matrix.
The decomposition will be conducted while ensuring that the spectrum of `A_k^{-1}` is positive.
Parameters
----------
epsilon : float, optional, default 1e-6
Cutoff for eigenvalue norms. If negative eigenvalues occur, with norms larger than epsilon, the largest
negative eigenvalue norm will be used instead of epsilon, i.e. a band including all negative eigenvalues
will be cut off.
Returns
-------
L : ndarray((n,m), dtype=float)
Cholesky matrix such that `A \approx L L^{\top}`. Number of columns :math:`m` is most at the number of columns
used in the Nystroem approximation, but may be smaller depending on epsilon. | Below is the the instruction that describes the task:
### Input:
r""" Compute low-rank approximation to the Cholesky decomposition of target matrix.
The decomposition will be conducted while ensuring that the spectrum of `A_k^{-1}` is positive.
Parameters
----------
epsilon : float, optional, default 1e-6
Cutoff for eigenvalue norms. If negative eigenvalues occur, with norms larger than epsilon, the largest
negative eigenvalue norm will be used instead of epsilon, i.e. a band including all negative eigenvalues
will be cut off.
Returns
-------
L : ndarray((n,m), dtype=float)
Cholesky matrix such that `A \approx L L^{\top}`. Number of columns :math:`m` is most at the number of columns
used in the Nystroem approximation, but may be smaller depending on epsilon.
### Response:
def approximate_cholesky(self, epsilon=1e-6):
r""" Compute low-rank approximation to the Cholesky decomposition of target matrix.
The decomposition will be conducted while ensuring that the spectrum of `A_k^{-1}` is positive.
Parameters
----------
epsilon : float, optional, default 1e-6
Cutoff for eigenvalue norms. If negative eigenvalues occur, with norms larger than epsilon, the largest
negative eigenvalue norm will be used instead of epsilon, i.e. a band including all negative eigenvalues
will be cut off.
Returns
-------
L : ndarray((n,m), dtype=float)
Cholesky matrix such that `A \approx L L^{\top}`. Number of columns :math:`m` is most at the number of columns
used in the Nystroem approximation, but may be smaller depending on epsilon.
"""
# compute the Eigenvalues of C0 using Schur factorization
Wk = self._C_k[self._columns, :]
L0 = spd_inv_split(Wk, epsilon=epsilon)
L = np.dot(self._C_k, L0)
return L |
def agent_heartbeat(self, agent_id, metrics, run_states):
"""Notify server about agent state, receive commands.
Args:
agent_id (str): agent_id
metrics (dict): system metrics
run_states (dict): run_id: state mapping
Returns:
List of commands to execute.
"""
mutation = gql('''
mutation Heartbeat(
$id: ID!,
$metrics: JSONString,
$runState: JSONString
) {
agentHeartbeat(input: {
id: $id,
metrics: $metrics,
runState: $runState
}) {
agent {
id
}
commands
}
}
''')
try:
response = self.gql(mutation, variable_values={
'id': agent_id,
'metrics': json.dumps(metrics),
'runState': json.dumps(run_states)})
except Exception as e:
# GQL raises exceptions with stringified python dictionaries :/
message = ast.literal_eval(e.args[0])["message"]
logger.error('Error communicating with W&B: %s', message)
return []
else:
return json.loads(response['agentHeartbeat']['commands']) | Notify server about agent state, receive commands.
Args:
agent_id (str): agent_id
metrics (dict): system metrics
run_states (dict): run_id: state mapping
Returns:
List of commands to execute. | Below is the the instruction that describes the task:
### Input:
Notify server about agent state, receive commands.
Args:
agent_id (str): agent_id
metrics (dict): system metrics
run_states (dict): run_id: state mapping
Returns:
List of commands to execute.
### Response:
def agent_heartbeat(self, agent_id, metrics, run_states):
"""Notify server about agent state, receive commands.
Args:
agent_id (str): agent_id
metrics (dict): system metrics
run_states (dict): run_id: state mapping
Returns:
List of commands to execute.
"""
mutation = gql('''
mutation Heartbeat(
$id: ID!,
$metrics: JSONString,
$runState: JSONString
) {
agentHeartbeat(input: {
id: $id,
metrics: $metrics,
runState: $runState
}) {
agent {
id
}
commands
}
}
''')
try:
response = self.gql(mutation, variable_values={
'id': agent_id,
'metrics': json.dumps(metrics),
'runState': json.dumps(run_states)})
except Exception as e:
# GQL raises exceptions with stringified python dictionaries :/
message = ast.literal_eval(e.args[0])["message"]
logger.error('Error communicating with W&B: %s', message)
return []
else:
return json.loads(response['agentHeartbeat']['commands']) |
def get_status(self):
'''
Get the status of the report and its sub-reports.
:rtype: str
:return: report status ('passed', 'failed' or 'error')
'''
status = self.get('status')
if status == Report.PASSED:
for sr_name in self._sub_reports:
sr = self._sub_reports[sr_name]
sr_status = sr.get_status()
reason = sr.get('reason')
if sr_status == Report.ERROR:
self.error(reason)
break
if sr_status == Report.FAILED:
self.failed(reason)
break
status = self.get('status')
return status | Get the status of the report and its sub-reports.
:rtype: str
:return: report status ('passed', 'failed' or 'error') | Below is the the instruction that describes the task:
### Input:
Get the status of the report and its sub-reports.
:rtype: str
:return: report status ('passed', 'failed' or 'error')
### Response:
def get_status(self):
'''
Get the status of the report and its sub-reports.
:rtype: str
:return: report status ('passed', 'failed' or 'error')
'''
status = self.get('status')
if status == Report.PASSED:
for sr_name in self._sub_reports:
sr = self._sub_reports[sr_name]
sr_status = sr.get_status()
reason = sr.get('reason')
if sr_status == Report.ERROR:
self.error(reason)
break
if sr_status == Report.FAILED:
self.failed(reason)
break
status = self.get('status')
return status |
def exit(self, status=EXIT_OK, message=None):
"""
Terminate the script.
"""
if not self.parser:
self.parser = argparse.ArgumentParser()
if self.msg_on_error_only:
# if msg_on_error_only is True
if status != EXIT_OK:
# if we have an error we'll exit with the message also.
self.parser.exit(status, message)
else:
# else we'll exit with the status ongly
self.parser.exit(status, None)
else:
# else if msg_on_error_only is not True
# we'll exit with the status and the message
self.parser.exit(status, message) | Terminate the script. | Below is the the instruction that describes the task:
### Input:
Terminate the script.
### Response:
def exit(self, status=EXIT_OK, message=None):
"""
Terminate the script.
"""
if not self.parser:
self.parser = argparse.ArgumentParser()
if self.msg_on_error_only:
# if msg_on_error_only is True
if status != EXIT_OK:
# if we have an error we'll exit with the message also.
self.parser.exit(status, message)
else:
# else we'll exit with the status ongly
self.parser.exit(status, None)
else:
# else if msg_on_error_only is not True
# we'll exit with the status and the message
self.parser.exit(status, message) |
def iso_reference_valid_char(c, raise_error=True):
"""Helper to make sure the given character is valid for a reference number"""
if c in ISO_REFERENCE_VALID:
return True
if raise_error:
raise ValueError("'%s' is not in '%s'" % (c, ISO_REFERENCE_VALID))
return False | Helper to make sure the given character is valid for a reference number | Below is the the instruction that describes the task:
### Input:
Helper to make sure the given character is valid for a reference number
### Response:
def iso_reference_valid_char(c, raise_error=True):
"""Helper to make sure the given character is valid for a reference number"""
if c in ISO_REFERENCE_VALID:
return True
if raise_error:
raise ValueError("'%s' is not in '%s'" % (c, ISO_REFERENCE_VALID))
return False |
def numpy_from_yaml(constructor: Constructor, data: ruamel.yaml.nodes.SequenceNode) -> np.ndarray:
""" Read an array from YAML to numpy.
It reads arrays registered under the tag ``!numpy_array``.
Use with:
.. code-block:: python
>>> yaml = ruamel.yaml.YAML()
>>> yaml.constructor.add_constructor("!numpy_array", yaml.numpy_from_yaml)
Note:
We cannot use ``yaml.register_class`` because it won't register the proper type.
(It would register the type of the class, rather than of `numpy.ndarray`). Instead,
we use the above approach to register this method explicitly with the representer.
"""
# Construct the contained values so that we properly construct int, float, etc.
# We just leave this to YAML because it already stores this information.
values = [constructor.construct_object(n) for n in data.value]
logger.debug(f"{data}, {values}")
return np.array(values) | Read an array from YAML to numpy.
It reads arrays registered under the tag ``!numpy_array``.
Use with:
.. code-block:: python
>>> yaml = ruamel.yaml.YAML()
>>> yaml.constructor.add_constructor("!numpy_array", yaml.numpy_from_yaml)
Note:
We cannot use ``yaml.register_class`` because it won't register the proper type.
(It would register the type of the class, rather than of `numpy.ndarray`). Instead,
we use the above approach to register this method explicitly with the representer. | Below is the the instruction that describes the task:
### Input:
Read an array from YAML to numpy.
It reads arrays registered under the tag ``!numpy_array``.
Use with:
.. code-block:: python
>>> yaml = ruamel.yaml.YAML()
>>> yaml.constructor.add_constructor("!numpy_array", yaml.numpy_from_yaml)
Note:
We cannot use ``yaml.register_class`` because it won't register the proper type.
(It would register the type of the class, rather than of `numpy.ndarray`). Instead,
we use the above approach to register this method explicitly with the representer.
### Response:
def numpy_from_yaml(constructor: Constructor, data: ruamel.yaml.nodes.SequenceNode) -> np.ndarray:
""" Read an array from YAML to numpy.
It reads arrays registered under the tag ``!numpy_array``.
Use with:
.. code-block:: python
>>> yaml = ruamel.yaml.YAML()
>>> yaml.constructor.add_constructor("!numpy_array", yaml.numpy_from_yaml)
Note:
We cannot use ``yaml.register_class`` because it won't register the proper type.
(It would register the type of the class, rather than of `numpy.ndarray`). Instead,
we use the above approach to register this method explicitly with the representer.
"""
# Construct the contained values so that we properly construct int, float, etc.
# We just leave this to YAML because it already stores this information.
values = [constructor.construct_object(n) for n in data.value]
logger.debug(f"{data}, {values}")
return np.array(values) |
def get_minions():
'''
Return a list of minions
'''
serv = _get_serv(ret=None)
sql = "select distinct(id) from returns"
data = serv.query(sql)
ret = []
if data:
for jid in data[0]['points']:
ret.append(jid[1])
return ret | Return a list of minions | Below is the the instruction that describes the task:
### Input:
Return a list of minions
### Response:
def get_minions():
'''
Return a list of minions
'''
serv = _get_serv(ret=None)
sql = "select distinct(id) from returns"
data = serv.query(sql)
ret = []
if data:
for jid in data[0]['points']:
ret.append(jid[1])
return ret |
def logevents(self):
"""Iterator yielding all logevents from groups dictionary."""
for key in self.groups:
for logevent in self.groups[key]:
yield logevent | Iterator yielding all logevents from groups dictionary. | Below is the the instruction that describes the task:
### Input:
Iterator yielding all logevents from groups dictionary.
### Response:
def logevents(self):
"""Iterator yielding all logevents from groups dictionary."""
for key in self.groups:
for logevent in self.groups[key]:
yield logevent |
def values(self):
"""Gets the user enter max and min values of where the
raster points should appear on the y-axis
:returns: (float, float) -- (min, max) y-values to bound the raster plot by
"""
lower = float(self.lowerSpnbx.value())
upper = float(self.upperSpnbx.value())
return (lower, upper) | Gets the user enter max and min values of where the
raster points should appear on the y-axis
:returns: (float, float) -- (min, max) y-values to bound the raster plot by | Below is the the instruction that describes the task:
### Input:
Gets the user enter max and min values of where the
raster points should appear on the y-axis
:returns: (float, float) -- (min, max) y-values to bound the raster plot by
### Response:
def values(self):
"""Gets the user enter max and min values of where the
raster points should appear on the y-axis
:returns: (float, float) -- (min, max) y-values to bound the raster plot by
"""
lower = float(self.lowerSpnbx.value())
upper = float(self.upperSpnbx.value())
return (lower, upper) |
def _integrate(self, time_steps, capture_elements, return_timestamps):
"""
Performs euler integration
Parameters
----------
time_steps: iterable
the time steps that the integrator progresses over
capture_elements: list
which model elements to capture - uses pysafe names
return_timestamps:
which subset of 'timesteps' should be values be returned?
Returns
-------
outputs: list of dictionaries
"""
# Todo: consider adding the timestamp to the return elements, and using that as the index
outputs = []
for t2 in time_steps[1:]:
if self.time() in return_timestamps:
outputs.append({key: getattr(self.components, key)() for key in capture_elements})
self._euler_step(t2 - self.time())
self.time.update(t2) # this will clear the stepwise caches
# need to add one more time step, because we run only the state updates in the previous
# loop and thus may be one short.
if self.time() in return_timestamps:
outputs.append({key: getattr(self.components, key)() for key in capture_elements})
return outputs | Performs euler integration
Parameters
----------
time_steps: iterable
the time steps that the integrator progresses over
capture_elements: list
which model elements to capture - uses pysafe names
return_timestamps:
which subset of 'timesteps' should be values be returned?
Returns
-------
outputs: list of dictionaries | Below is the the instruction that describes the task:
### Input:
Performs euler integration
Parameters
----------
time_steps: iterable
the time steps that the integrator progresses over
capture_elements: list
which model elements to capture - uses pysafe names
return_timestamps:
which subset of 'timesteps' should be values be returned?
Returns
-------
outputs: list of dictionaries
### Response:
def _integrate(self, time_steps, capture_elements, return_timestamps):
"""
Performs euler integration
Parameters
----------
time_steps: iterable
the time steps that the integrator progresses over
capture_elements: list
which model elements to capture - uses pysafe names
return_timestamps:
which subset of 'timesteps' should be values be returned?
Returns
-------
outputs: list of dictionaries
"""
# Todo: consider adding the timestamp to the return elements, and using that as the index
outputs = []
for t2 in time_steps[1:]:
if self.time() in return_timestamps:
outputs.append({key: getattr(self.components, key)() for key in capture_elements})
self._euler_step(t2 - self.time())
self.time.update(t2) # this will clear the stepwise caches
# need to add one more time step, because we run only the state updates in the previous
# loop and thus may be one short.
if self.time() in return_timestamps:
outputs.append({key: getattr(self.components, key)() for key in capture_elements})
return outputs |
def mslike(pop, **kwargs):
"""
Function to establish default parameters
for a single-locus simulation for standard pop-gen
modeling scenarios.
:params pop: An instance of :class:`fwdpy11.DiploidPopulation`
:params kwargs: Keyword arguments.
"""
import fwdpy11
if isinstance(pop, fwdpy11.DiploidPopulation) is False:
raise ValueError("incorrect pop type: " + str(type(pop)))
defaults = {'simlen': 10*pop.N,
'beg': 0.0,
'end': 1.0,
'theta': 100.0,
'pneutral': 1.0,
'rho': 100.0,
'dfe': None
}
for key, value in kwargs.items():
if key in defaults:
defaults[key] = value
import numpy as np
params = {'demography': np.array([pop.N]*defaults['simlen'],
dtype=np.uint32),
'nregions': [fwdpy11.Region(defaults['beg'],
defaults['end'], 1.0)],
'recregions': [fwdpy11.Region(defaults['beg'],
defaults['end'], 1.0)],
'rates': ((defaults['pneutral']*defaults['theta'])/(4.0*pop.N),
((1.0-defaults['pneutral'])*defaults['theta']) /
(4.0*pop.N),
defaults['rho']/(4.0*float(pop.N))),
'gvalue': fwdpy11.Multiplicative(2.0)
}
if defaults['dfe'] is None:
params['sregions'] = []
else:
params['sregions'] = [defaults['dfe']]
return params | Function to establish default parameters
for a single-locus simulation for standard pop-gen
modeling scenarios.
:params pop: An instance of :class:`fwdpy11.DiploidPopulation`
:params kwargs: Keyword arguments. | Below is the the instruction that describes the task:
### Input:
Function to establish default parameters
for a single-locus simulation for standard pop-gen
modeling scenarios.
:params pop: An instance of :class:`fwdpy11.DiploidPopulation`
:params kwargs: Keyword arguments.
### Response:
def mslike(pop, **kwargs):
"""
Function to establish default parameters
for a single-locus simulation for standard pop-gen
modeling scenarios.
:params pop: An instance of :class:`fwdpy11.DiploidPopulation`
:params kwargs: Keyword arguments.
"""
import fwdpy11
if isinstance(pop, fwdpy11.DiploidPopulation) is False:
raise ValueError("incorrect pop type: " + str(type(pop)))
defaults = {'simlen': 10*pop.N,
'beg': 0.0,
'end': 1.0,
'theta': 100.0,
'pneutral': 1.0,
'rho': 100.0,
'dfe': None
}
for key, value in kwargs.items():
if key in defaults:
defaults[key] = value
import numpy as np
params = {'demography': np.array([pop.N]*defaults['simlen'],
dtype=np.uint32),
'nregions': [fwdpy11.Region(defaults['beg'],
defaults['end'], 1.0)],
'recregions': [fwdpy11.Region(defaults['beg'],
defaults['end'], 1.0)],
'rates': ((defaults['pneutral']*defaults['theta'])/(4.0*pop.N),
((1.0-defaults['pneutral'])*defaults['theta']) /
(4.0*pop.N),
defaults['rho']/(4.0*float(pop.N))),
'gvalue': fwdpy11.Multiplicative(2.0)
}
if defaults['dfe'] is None:
params['sregions'] = []
else:
params['sregions'] = [defaults['dfe']]
return params |
def var_set(session, **kwargs):
"""
Sets the given variables or prints the current ones. "set answer=42"
"""
if not kwargs:
for name, value in session.variables.items():
session.write_line("{0}={1}".format(name, value))
else:
for name, value in kwargs.items():
name = name.strip()
session.set(name, value)
session.write_line("{0}={1}", name, value) | Sets the given variables or prints the current ones. "set answer=42" | Below is the the instruction that describes the task:
### Input:
Sets the given variables or prints the current ones. "set answer=42"
### Response:
def var_set(session, **kwargs):
"""
Sets the given variables or prints the current ones. "set answer=42"
"""
if not kwargs:
for name, value in session.variables.items():
session.write_line("{0}={1}".format(name, value))
else:
for name, value in kwargs.items():
name = name.strip()
session.set(name, value)
session.write_line("{0}={1}", name, value) |
def result(self, state, action):
'''Return the resulting state after moving a piece to the empty space.
(the "action" parameter contains the piece to move)
'''
rows = string_to_list(state)
row_e, col_e = find_location(rows, 'e')
row_n, col_n = find_location(rows, action)
rows[row_e][col_e], rows[row_n][col_n] = rows[row_n][col_n], rows[row_e][col_e]
return list_to_string(rows) | Return the resulting state after moving a piece to the empty space.
(the "action" parameter contains the piece to move) | Below is the the instruction that describes the task:
### Input:
Return the resulting state after moving a piece to the empty space.
(the "action" parameter contains the piece to move)
### Response:
def result(self, state, action):
'''Return the resulting state after moving a piece to the empty space.
(the "action" parameter contains the piece to move)
'''
rows = string_to_list(state)
row_e, col_e = find_location(rows, 'e')
row_n, col_n = find_location(rows, action)
rows[row_e][col_e], rows[row_n][col_n] = rows[row_n][col_n], rows[row_e][col_e]
return list_to_string(rows) |
def _createGaVariantAnnotation(self):
"""
Convenience method to set the common fields in a GA VariantAnnotation
object from this variant set.
"""
ret = protocol.VariantAnnotation()
ret.created = self._creationTime
ret.variant_annotation_set_id = self.getId()
return ret | Convenience method to set the common fields in a GA VariantAnnotation
object from this variant set. | Below is the the instruction that describes the task:
### Input:
Convenience method to set the common fields in a GA VariantAnnotation
object from this variant set.
### Response:
def _createGaVariantAnnotation(self):
"""
Convenience method to set the common fields in a GA VariantAnnotation
object from this variant set.
"""
ret = protocol.VariantAnnotation()
ret.created = self._creationTime
ret.variant_annotation_set_id = self.getId()
return ret |
def sigfig_str(number, sigfig):
"""
References:
http://stackoverflow.com/questions/2663612/nicely-repr-float-in-python
"""
assert(sigfig > 0)
try:
d = decimal.Decimal(number)
except TypeError:
d = float_to_decimal(float(number))
sign, digits, exponent = d.as_tuple()
if len(digits) < sigfig:
digits = list(digits)
digits.extend([0] * (sigfig - len(digits)))
shift = d.adjusted()
result = int(''.join(map(str, digits[:sigfig])))
# Round the result
if len(digits) > sigfig and digits[sigfig] >= 5:
result += 1
result = list(str(result))
# Rounding can change the length of result
# If so, adjust shift
shift += len(result) - sigfig
# reset len of result to sigfig
result = result[:sigfig]
if shift >= sigfig - 1:
# Tack more zeros on the end
result += ['0'] * (shift - sigfig + 1)
elif 0 <= shift:
# Place the decimal point in between digits
result.insert(shift + 1, '.')
else:
# Tack zeros on the front
assert(shift < 0)
result = ['0.'] + ['0'] * (-shift - 1) + result
if sign:
result.insert(0, '-')
return ''.join(result) | References:
http://stackoverflow.com/questions/2663612/nicely-repr-float-in-python | Below is the the instruction that describes the task:
### Input:
References:
http://stackoverflow.com/questions/2663612/nicely-repr-float-in-python
### Response:
def sigfig_str(number, sigfig):
"""
References:
http://stackoverflow.com/questions/2663612/nicely-repr-float-in-python
"""
assert(sigfig > 0)
try:
d = decimal.Decimal(number)
except TypeError:
d = float_to_decimal(float(number))
sign, digits, exponent = d.as_tuple()
if len(digits) < sigfig:
digits = list(digits)
digits.extend([0] * (sigfig - len(digits)))
shift = d.adjusted()
result = int(''.join(map(str, digits[:sigfig])))
# Round the result
if len(digits) > sigfig and digits[sigfig] >= 5:
result += 1
result = list(str(result))
# Rounding can change the length of result
# If so, adjust shift
shift += len(result) - sigfig
# reset len of result to sigfig
result = result[:sigfig]
if shift >= sigfig - 1:
# Tack more zeros on the end
result += ['0'] * (shift - sigfig + 1)
elif 0 <= shift:
# Place the decimal point in between digits
result.insert(shift + 1, '.')
else:
# Tack zeros on the front
assert(shift < 0)
result = ['0.'] + ['0'] * (-shift - 1) + result
if sign:
result.insert(0, '-')
return ''.join(result) |
def cli(env, context_id, subnet_id, subnet_type, network_identifier):
"""Add a subnet to an IPSEC tunnel context.
A subnet id may be specified to link to the existing tunnel context.
Otherwise, a network identifier in CIDR notation should be specified,
indicating that a subnet resource should first be created before associating
it with the tunnel context. Note that this is only supported for remote
subnets, which are also deleted upon failure to attach to a context.
A separate configuration request should be made to realize changes on
network devices.
"""
create_remote = False
if subnet_id is None:
if network_identifier is None:
raise ArgumentError('Either a network identifier or subnet id '
'must be provided.')
if subnet_type != 'remote':
raise ArgumentError('Unable to create {} subnets'
.format(subnet_type))
create_remote = True
manager = SoftLayer.IPSECManager(env.client)
context = manager.get_tunnel_context(context_id)
if create_remote:
subnet = manager.create_remote_subnet(context['accountId'],
identifier=network_identifier[0],
cidr=network_identifier[1])
subnet_id = subnet['id']
env.out('Created subnet {}/{} #{}'
.format(network_identifier[0],
network_identifier[1],
subnet_id))
succeeded = False
if subnet_type == 'internal':
succeeded = manager.add_internal_subnet(context_id, subnet_id)
elif subnet_type == 'remote':
succeeded = manager.add_remote_subnet(context_id, subnet_id)
elif subnet_type == 'service':
succeeded = manager.add_service_subnet(context_id, subnet_id)
if succeeded:
env.out('Added {} subnet #{}'.format(subnet_type, subnet_id))
else:
raise CLIHalt('Failed to add {} subnet #{}'
.format(subnet_type, subnet_id)) | Add a subnet to an IPSEC tunnel context.
A subnet id may be specified to link to the existing tunnel context.
Otherwise, a network identifier in CIDR notation should be specified,
indicating that a subnet resource should first be created before associating
it with the tunnel context. Note that this is only supported for remote
subnets, which are also deleted upon failure to attach to a context.
A separate configuration request should be made to realize changes on
network devices. | Below is the the instruction that describes the task:
### Input:
Add a subnet to an IPSEC tunnel context.
A subnet id may be specified to link to the existing tunnel context.
Otherwise, a network identifier in CIDR notation should be specified,
indicating that a subnet resource should first be created before associating
it with the tunnel context. Note that this is only supported for remote
subnets, which are also deleted upon failure to attach to a context.
A separate configuration request should be made to realize changes on
network devices.
### Response:
def cli(env, context_id, subnet_id, subnet_type, network_identifier):
"""Add a subnet to an IPSEC tunnel context.
A subnet id may be specified to link to the existing tunnel context.
Otherwise, a network identifier in CIDR notation should be specified,
indicating that a subnet resource should first be created before associating
it with the tunnel context. Note that this is only supported for remote
subnets, which are also deleted upon failure to attach to a context.
A separate configuration request should be made to realize changes on
network devices.
"""
create_remote = False
if subnet_id is None:
if network_identifier is None:
raise ArgumentError('Either a network identifier or subnet id '
'must be provided.')
if subnet_type != 'remote':
raise ArgumentError('Unable to create {} subnets'
.format(subnet_type))
create_remote = True
manager = SoftLayer.IPSECManager(env.client)
context = manager.get_tunnel_context(context_id)
if create_remote:
subnet = manager.create_remote_subnet(context['accountId'],
identifier=network_identifier[0],
cidr=network_identifier[1])
subnet_id = subnet['id']
env.out('Created subnet {}/{} #{}'
.format(network_identifier[0],
network_identifier[1],
subnet_id))
succeeded = False
if subnet_type == 'internal':
succeeded = manager.add_internal_subnet(context_id, subnet_id)
elif subnet_type == 'remote':
succeeded = manager.add_remote_subnet(context_id, subnet_id)
elif subnet_type == 'service':
succeeded = manager.add_service_subnet(context_id, subnet_id)
if succeeded:
env.out('Added {} subnet #{}'.format(subnet_type, subnet_id))
else:
raise CLIHalt('Failed to add {} subnet #{}'
.format(subnet_type, subnet_id)) |
def _compute_analysis_extent(self):
"""Compute the minimum extent between layers.
This function will set the self._analysis_extent geometry using
aggregation CRS or crs property.
:return: A tuple with the status of the IF and an error message if
needed.
The status is PREPARE_SUCCESS if everything was fine.
The status is PREPARE_FAILED_INSUFFICIENT_OVERLAP if the client
should fix the analysis extent.
The status is PREPARE_FAILED_BAD_CODE if something went wrong
from the code.
:rtype: (int, m.Message)
"""
exposure_extent = QgsGeometry.fromRect(self.exposure.extent())
hazard_extent = QgsGeometry.fromRect(self.hazard.extent())
if self.aggregation:
analysis_crs = self.aggregation.crs()
else:
analysis_crs = self._crs
if self.hazard.crs().authid() != analysis_crs.authid():
crs_transform = QgsCoordinateTransform(
self.hazard.crs(), analysis_crs, QgsProject.instance())
hazard_extent.transform(crs_transform)
if self.exposure.crs().authid() != analysis_crs.authid():
crs_transform = QgsCoordinateTransform(
self.exposure.crs(), analysis_crs, QgsProject.instance())
exposure_extent.transform(crs_transform)
# We check if the hazard and the exposure overlap.
if not exposure_extent.intersects(hazard_extent):
message = generate_input_error_message(
tr('Layers need to overlap.'),
m.Paragraph(tr(
'The exposure and the hazard layer need to overlap.'))
)
return PREPARE_FAILED_INSUFFICIENT_OVERLAP, message
else:
hazard_exposure = exposure_extent.intersection(hazard_extent)
if not self.aggregation:
if self.requested_extent:
user_bounding_box = QgsGeometry.fromRect(self.requested_extent)
if self._crs != self.exposure.crs():
crs_transform = QgsCoordinateTransform(
self._crs, self.exposure.crs(), QgsProject.instance())
user_bounding_box.transform(crs_transform)
if not hazard_exposure.intersects(user_bounding_box):
message = generate_input_error_message(
tr('The bounding box need to overlap layers.'),
m.Paragraph(tr(
'The requested analysis extent is not overlaping '
'the exposure and the hazard.'))
)
return (
PREPARE_FAILED_INSUFFICIENT_OVERLAP_REQUESTED_EXTENT,
message)
else:
self._analysis_extent = hazard_exposure.intersection(
user_bounding_box)
elif self.use_exposure_view_only:
self._analysis_extent = exposure_extent
else:
self._analysis_extent = hazard_exposure
else:
# We monkey patch if we use selected features only.
self.aggregation.use_selected_features_only = (
self.use_selected_features_only)
self.aggregation = create_valid_aggregation(self.aggregation)
list_geometry = []
for area in self.aggregation.getFeatures():
list_geometry.append(QgsGeometry(area.geometry()))
geometry = QgsGeometry.unaryUnion(list_geometry)
if geometry.isMultipart():
multi_polygon = geometry.asMultiPolygon()
for polygon in multi_polygon:
for ring in polygon[1:]:
polygon.remove(ring)
self._analysis_extent = QgsGeometry.fromMultiPolygonXY(
multi_polygon)
else:
polygon = geometry.asPolygon()
for ring in polygon[1:]:
polygon.remove(ring)
self._analysis_extent = QgsGeometry.fromPolygonXY(polygon)
is_empty = self._analysis_extent.isEmpty()
is_invalid = not self._analysis_extent.isGeosValid()
if is_empty or is_invalid:
message = generate_input_error_message(
tr('There is a problem with the aggregation layer.'),
m.Paragraph(tr(
'The aggregation layer seems to have a problem. '
'Some features might be invalid. You should check the '
'validity of this layer or use a selection within '
'this layer.'))
)
return PREPARE_FAILED_BAD_LAYER, message
return PREPARE_SUCCESS, None | Compute the minimum extent between layers.
This function will set the self._analysis_extent geometry using
aggregation CRS or crs property.
:return: A tuple with the status of the IF and an error message if
needed.
The status is PREPARE_SUCCESS if everything was fine.
The status is PREPARE_FAILED_INSUFFICIENT_OVERLAP if the client
should fix the analysis extent.
The status is PREPARE_FAILED_BAD_CODE if something went wrong
from the code.
:rtype: (int, m.Message) | Below is the the instruction that describes the task:
### Input:
Compute the minimum extent between layers.
This function will set the self._analysis_extent geometry using
aggregation CRS or crs property.
:return: A tuple with the status of the IF and an error message if
needed.
The status is PREPARE_SUCCESS if everything was fine.
The status is PREPARE_FAILED_INSUFFICIENT_OVERLAP if the client
should fix the analysis extent.
The status is PREPARE_FAILED_BAD_CODE if something went wrong
from the code.
:rtype: (int, m.Message)
### Response:
def _compute_analysis_extent(self):
"""Compute the minimum extent between layers.
This function will set the self._analysis_extent geometry using
aggregation CRS or crs property.
:return: A tuple with the status of the IF and an error message if
needed.
The status is PREPARE_SUCCESS if everything was fine.
The status is PREPARE_FAILED_INSUFFICIENT_OVERLAP if the client
should fix the analysis extent.
The status is PREPARE_FAILED_BAD_CODE if something went wrong
from the code.
:rtype: (int, m.Message)
"""
exposure_extent = QgsGeometry.fromRect(self.exposure.extent())
hazard_extent = QgsGeometry.fromRect(self.hazard.extent())
if self.aggregation:
analysis_crs = self.aggregation.crs()
else:
analysis_crs = self._crs
if self.hazard.crs().authid() != analysis_crs.authid():
crs_transform = QgsCoordinateTransform(
self.hazard.crs(), analysis_crs, QgsProject.instance())
hazard_extent.transform(crs_transform)
if self.exposure.crs().authid() != analysis_crs.authid():
crs_transform = QgsCoordinateTransform(
self.exposure.crs(), analysis_crs, QgsProject.instance())
exposure_extent.transform(crs_transform)
# We check if the hazard and the exposure overlap.
if not exposure_extent.intersects(hazard_extent):
message = generate_input_error_message(
tr('Layers need to overlap.'),
m.Paragraph(tr(
'The exposure and the hazard layer need to overlap.'))
)
return PREPARE_FAILED_INSUFFICIENT_OVERLAP, message
else:
hazard_exposure = exposure_extent.intersection(hazard_extent)
if not self.aggregation:
if self.requested_extent:
user_bounding_box = QgsGeometry.fromRect(self.requested_extent)
if self._crs != self.exposure.crs():
crs_transform = QgsCoordinateTransform(
self._crs, self.exposure.crs(), QgsProject.instance())
user_bounding_box.transform(crs_transform)
if not hazard_exposure.intersects(user_bounding_box):
message = generate_input_error_message(
tr('The bounding box need to overlap layers.'),
m.Paragraph(tr(
'The requested analysis extent is not overlaping '
'the exposure and the hazard.'))
)
return (
PREPARE_FAILED_INSUFFICIENT_OVERLAP_REQUESTED_EXTENT,
message)
else:
self._analysis_extent = hazard_exposure.intersection(
user_bounding_box)
elif self.use_exposure_view_only:
self._analysis_extent = exposure_extent
else:
self._analysis_extent = hazard_exposure
else:
# We monkey patch if we use selected features only.
self.aggregation.use_selected_features_only = (
self.use_selected_features_only)
self.aggregation = create_valid_aggregation(self.aggregation)
list_geometry = []
for area in self.aggregation.getFeatures():
list_geometry.append(QgsGeometry(area.geometry()))
geometry = QgsGeometry.unaryUnion(list_geometry)
if geometry.isMultipart():
multi_polygon = geometry.asMultiPolygon()
for polygon in multi_polygon:
for ring in polygon[1:]:
polygon.remove(ring)
self._analysis_extent = QgsGeometry.fromMultiPolygonXY(
multi_polygon)
else:
polygon = geometry.asPolygon()
for ring in polygon[1:]:
polygon.remove(ring)
self._analysis_extent = QgsGeometry.fromPolygonXY(polygon)
is_empty = self._analysis_extent.isEmpty()
is_invalid = not self._analysis_extent.isGeosValid()
if is_empty or is_invalid:
message = generate_input_error_message(
tr('There is a problem with the aggregation layer.'),
m.Paragraph(tr(
'The aggregation layer seems to have a problem. '
'Some features might be invalid. You should check the '
'validity of this layer or use a selection within '
'this layer.'))
)
return PREPARE_FAILED_BAD_LAYER, message
return PREPARE_SUCCESS, None |
def cee_map_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map")
name = ET.SubElement(cee_map, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def cee_map_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map")
name = ET.SubElement(cee_map, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def prune_non_existent_outputs(compound_match_query):
"""Remove non-existent outputs from each MatchQuery in the given CompoundMatchQuery.
Each of the 2^n MatchQuery objects (except one) has been pruned to exclude some Traverse blocks,
For each of these, remove the outputs (that have been implicitly pruned away) from each
corresponding ConstructResult block.
Args:
compound_match_query: CompoundMatchQuery object containing 2^n pruned MatchQuery objects
(see convert_optional_traversals_to_compound_match_query)
Returns:
CompoundMatchQuery with pruned ConstructResult blocks for each of the 2^n MatchQuery objects
"""
if len(compound_match_query.match_queries) == 1:
return compound_match_query
elif len(compound_match_query.match_queries) == 0:
raise AssertionError(u'Received CompoundMatchQuery with '
u'an empty list of MatchQuery objects.')
else:
match_queries = []
for match_query in compound_match_query.match_queries:
match_traversals = match_query.match_traversals
output_block = match_query.output_block
present_locations_tuple = _get_present_locations(match_traversals)
present_locations, present_non_optional_locations = present_locations_tuple
new_output_fields = {}
for output_name, expression in six.iteritems(output_block.fields):
if isinstance(expression, OutputContextField):
# An OutputContextField as an output Expression indicates that we are not
# within an @optional scope. Therefore, the location this output uses must
# be in present_locations, and the output is never pruned.
location_name, _ = expression.location.get_location_name()
if location_name not in present_locations:
raise AssertionError(u'Non-optional output location {} was not found in '
u'present_locations: {}'
.format(expression.location, present_locations))
new_output_fields[output_name] = expression
elif isinstance(expression, FoldedContextField):
# A FoldedContextField as an output Expression indicates that we are not
# within an @optional scope. Therefore, the location this output uses must
# be in present_locations, and the output is never pruned.
base_location = expression.fold_scope_location.base_location
location_name, _ = base_location.get_location_name()
if location_name not in present_locations:
raise AssertionError(u'Folded output location {} was found in '
u'present_locations: {}'
.format(base_location, present_locations))
new_output_fields[output_name] = expression
elif isinstance(expression, TernaryConditional):
# A TernaryConditional indicates that this output is within some optional scope.
# This may be pruned away based on the contents of present_locations.
location_name, _ = expression.if_true.location.get_location_name()
if location_name in present_locations:
if location_name in present_non_optional_locations:
new_output_fields[output_name] = expression.if_true
else:
new_output_fields[output_name] = expression
else:
raise AssertionError(u'Invalid expression of type {} in output block: '
u'{}'.format(type(expression).__name__, output_block))
match_queries.append(
MatchQuery(
match_traversals=match_traversals,
folds=match_query.folds,
output_block=ConstructResult(new_output_fields),
where_block=match_query.where_block,
)
)
return CompoundMatchQuery(match_queries=match_queries) | Remove non-existent outputs from each MatchQuery in the given CompoundMatchQuery.
Each of the 2^n MatchQuery objects (except one) has been pruned to exclude some Traverse blocks,
For each of these, remove the outputs (that have been implicitly pruned away) from each
corresponding ConstructResult block.
Args:
compound_match_query: CompoundMatchQuery object containing 2^n pruned MatchQuery objects
(see convert_optional_traversals_to_compound_match_query)
Returns:
CompoundMatchQuery with pruned ConstructResult blocks for each of the 2^n MatchQuery objects | Below is the the instruction that describes the task:
### Input:
Remove non-existent outputs from each MatchQuery in the given CompoundMatchQuery.
Each of the 2^n MatchQuery objects (except one) has been pruned to exclude some Traverse blocks,
For each of these, remove the outputs (that have been implicitly pruned away) from each
corresponding ConstructResult block.
Args:
compound_match_query: CompoundMatchQuery object containing 2^n pruned MatchQuery objects
(see convert_optional_traversals_to_compound_match_query)
Returns:
CompoundMatchQuery with pruned ConstructResult blocks for each of the 2^n MatchQuery objects
### Response:
def prune_non_existent_outputs(compound_match_query):
"""Remove non-existent outputs from each MatchQuery in the given CompoundMatchQuery.
Each of the 2^n MatchQuery objects (except one) has been pruned to exclude some Traverse blocks,
For each of these, remove the outputs (that have been implicitly pruned away) from each
corresponding ConstructResult block.
Args:
compound_match_query: CompoundMatchQuery object containing 2^n pruned MatchQuery objects
(see convert_optional_traversals_to_compound_match_query)
Returns:
CompoundMatchQuery with pruned ConstructResult blocks for each of the 2^n MatchQuery objects
"""
if len(compound_match_query.match_queries) == 1:
return compound_match_query
elif len(compound_match_query.match_queries) == 0:
raise AssertionError(u'Received CompoundMatchQuery with '
u'an empty list of MatchQuery objects.')
else:
match_queries = []
for match_query in compound_match_query.match_queries:
match_traversals = match_query.match_traversals
output_block = match_query.output_block
present_locations_tuple = _get_present_locations(match_traversals)
present_locations, present_non_optional_locations = present_locations_tuple
new_output_fields = {}
for output_name, expression in six.iteritems(output_block.fields):
if isinstance(expression, OutputContextField):
# An OutputContextField as an output Expression indicates that we are not
# within an @optional scope. Therefore, the location this output uses must
# be in present_locations, and the output is never pruned.
location_name, _ = expression.location.get_location_name()
if location_name not in present_locations:
raise AssertionError(u'Non-optional output location {} was not found in '
u'present_locations: {}'
.format(expression.location, present_locations))
new_output_fields[output_name] = expression
elif isinstance(expression, FoldedContextField):
# A FoldedContextField as an output Expression indicates that we are not
# within an @optional scope. Therefore, the location this output uses must
# be in present_locations, and the output is never pruned.
base_location = expression.fold_scope_location.base_location
location_name, _ = base_location.get_location_name()
if location_name not in present_locations:
raise AssertionError(u'Folded output location {} was found in '
u'present_locations: {}'
.format(base_location, present_locations))
new_output_fields[output_name] = expression
elif isinstance(expression, TernaryConditional):
# A TernaryConditional indicates that this output is within some optional scope.
# This may be pruned away based on the contents of present_locations.
location_name, _ = expression.if_true.location.get_location_name()
if location_name in present_locations:
if location_name in present_non_optional_locations:
new_output_fields[output_name] = expression.if_true
else:
new_output_fields[output_name] = expression
else:
raise AssertionError(u'Invalid expression of type {} in output block: '
u'{}'.format(type(expression).__name__, output_block))
match_queries.append(
MatchQuery(
match_traversals=match_traversals,
folds=match_query.folds,
output_block=ConstructResult(new_output_fields),
where_block=match_query.where_block,
)
)
return CompoundMatchQuery(match_queries=match_queries) |
def _get_fields(self, table_name, **kwargs):
"""return all the fields for the given schema"""
ret = {}
query_str = []
query_args = ['f', table_name]
# I had to brush up on my join knowledge while writing this query
# https://en.wikipedia.org/wiki/Join_(SQL)
#
# other helpful links
# https://wiki.postgresql.org/wiki/Retrieve_primary_key_columns
# https://www.postgresql.org/docs/9.4/static/catalog-pg-attribute.html
# https://www.postgresql.org/docs/9.3/static/catalog-pg-type.html
#
# another approach
# http://dba.stackexchange.com/questions/22362/how-do-i-list-all-columns-for-a-specified-table
# http://gis.stackexchange.com/questions/94049/how-to-get-the-data-type-of-each-column-from-a-postgis-table
query_str.append('SELECT')
query_str.append(', '.join([
'a.attnum',
'a.attname',
'a.attnotnull',
't.typname',
'i.indisprimary',
#'s.conname',
#'pg_get_constraintdef(s.oid, true) as condef',
'c.relname AS confrelname',
]))
query_str.append('FROM')
query_str.append(' pg_attribute a')
query_str.append('JOIN pg_type t ON a.atttypid = t.oid')
query_str.append('LEFT JOIN pg_index i ON a.attrelid = i.indrelid')
query_str.append(' AND a.attnum = any(i.indkey)')
query_str.append('LEFT JOIN pg_constraint s ON a.attrelid = s.conrelid')
query_str.append(' AND s.contype = {} AND a.attnum = any(s.conkey)'.format(self.val_placeholder))
query_str.append('LEFT JOIN pg_class c ON s.confrelid = c.oid')
query_str.append('WHERE')
query_str.append(' a.attrelid = {}::regclass'.format(self.val_placeholder))
query_str.append(' AND a.attisdropped = False')
query_str.append(' AND a.attnum > 0')
query_str.append('ORDER BY a.attnum ASC')
query_str = os.linesep.join(query_str)
fields = self.query(query_str, *query_args, **kwargs)
pg_types = {
"float8": float,
"timestamp": datetime.datetime,
"int2": int,
"int4": int,
"int8": long,
"numeric": decimal.Decimal,
"text": str,
"bpchar": str,
"varchar": str,
"bool": bool,
"date": datetime.date,
"blob": bytearray,
}
# the rows we can set: field_type, name, field_required, min_size, max_size,
# size, unique, pk, <foreign key info>
# These keys will roughly correspond with schema.Field
for row in fields:
field = {
"name": row["attname"],
"field_type": pg_types[row["typname"]],
"field_required": row["attnotnull"],
"pk": bool(row["indisprimary"]),
}
if row["confrelname"]:
# TODO -- I can't decide which name I like
field["schema_table_name"] = row["confrelname"]
field["ref_table_name"] = row["confrelname"]
ret[field["name"]] = field
return ret | return all the fields for the given schema | Below is the the instruction that describes the task:
### Input:
return all the fields for the given schema
### Response:
def _get_fields(self, table_name, **kwargs):
"""return all the fields for the given schema"""
ret = {}
query_str = []
query_args = ['f', table_name]
# I had to brush up on my join knowledge while writing this query
# https://en.wikipedia.org/wiki/Join_(SQL)
#
# other helpful links
# https://wiki.postgresql.org/wiki/Retrieve_primary_key_columns
# https://www.postgresql.org/docs/9.4/static/catalog-pg-attribute.html
# https://www.postgresql.org/docs/9.3/static/catalog-pg-type.html
#
# another approach
# http://dba.stackexchange.com/questions/22362/how-do-i-list-all-columns-for-a-specified-table
# http://gis.stackexchange.com/questions/94049/how-to-get-the-data-type-of-each-column-from-a-postgis-table
query_str.append('SELECT')
query_str.append(', '.join([
'a.attnum',
'a.attname',
'a.attnotnull',
't.typname',
'i.indisprimary',
#'s.conname',
#'pg_get_constraintdef(s.oid, true) as condef',
'c.relname AS confrelname',
]))
query_str.append('FROM')
query_str.append(' pg_attribute a')
query_str.append('JOIN pg_type t ON a.atttypid = t.oid')
query_str.append('LEFT JOIN pg_index i ON a.attrelid = i.indrelid')
query_str.append(' AND a.attnum = any(i.indkey)')
query_str.append('LEFT JOIN pg_constraint s ON a.attrelid = s.conrelid')
query_str.append(' AND s.contype = {} AND a.attnum = any(s.conkey)'.format(self.val_placeholder))
query_str.append('LEFT JOIN pg_class c ON s.confrelid = c.oid')
query_str.append('WHERE')
query_str.append(' a.attrelid = {}::regclass'.format(self.val_placeholder))
query_str.append(' AND a.attisdropped = False')
query_str.append(' AND a.attnum > 0')
query_str.append('ORDER BY a.attnum ASC')
query_str = os.linesep.join(query_str)
fields = self.query(query_str, *query_args, **kwargs)
pg_types = {
"float8": float,
"timestamp": datetime.datetime,
"int2": int,
"int4": int,
"int8": long,
"numeric": decimal.Decimal,
"text": str,
"bpchar": str,
"varchar": str,
"bool": bool,
"date": datetime.date,
"blob": bytearray,
}
# the rows we can set: field_type, name, field_required, min_size, max_size,
# size, unique, pk, <foreign key info>
# These keys will roughly correspond with schema.Field
for row in fields:
field = {
"name": row["attname"],
"field_type": pg_types[row["typname"]],
"field_required": row["attnotnull"],
"pk": bool(row["indisprimary"]),
}
if row["confrelname"]:
# TODO -- I can't decide which name I like
field["schema_table_name"] = row["confrelname"]
field["ref_table_name"] = row["confrelname"]
ret[field["name"]] = field
return ret |
def compute_region_border(start, end):
"""
given the buffer start and end indices of a range, compute the border edges
that should be drawn to enclose the range.
this function currently assumes 0x10 length rows.
the result is a dictionary from buffer index to Cell instance.
the Cell instance has boolean properties "top", "bottom", "left", and "right"
that describe if a border should be drawn on that side of the cell view.
:rtype: Mapping[int, CellT]
"""
cells = defaultdict(Cell)
start_row = row_number(start)
end_row = row_number(end)
if end % 0x10 == 0:
end_row -= 1
## topmost cells
if start_row == end_row:
for i in range(start, end):
cells[i].top = True
else:
for i in range(start, row_end_index(start) + 1):
cells[i].top = True
# cells on second row, top left
if start_row != end_row:
next_row_start = row_start_index(start) + 0x10
for i in range(next_row_start, next_row_start + column_number(start)):
cells[i].top = True
## bottommost cells
if start_row == end_row:
for i in range(start, end):
cells[i].bottom = True
else:
for i in range(row_start_index(end), end):
cells[i].bottom = True
# cells on second-to-last row, bottom right
if start_row != end_row:
prev_row_end = row_end_index(end) - 0x10
for i in range(prev_row_end - (0x10 - column_number(end) - 1), prev_row_end + 1):
cells[i].bottom = True
## leftmost cells
if start_row == end_row:
cells[start].left = True
else:
second_row_start = row_start_index(start) + 0x10
for i in range(second_row_start, row_start_index(end) + 0x10, 0x10):
cells[i].left = True
# cells in first row, top left
if start_row != end_row:
cells[start].left = True
## rightmost cells
if start_row == end_row:
cells[end - 1].right = True
else:
penultimate_row_end = row_end_index(end) - 0x10
for i in range(row_end_index(start), penultimate_row_end + 0x10, 0x10):
cells[i].right = True
# cells in last row, bottom right
if start_row != end_row:
cells[end - 1].right = True
# convert back to standard dict
# trick from: http://stackoverflow.com/a/20428703/87207
cells.default_factory = None
return cells | given the buffer start and end indices of a range, compute the border edges
that should be drawn to enclose the range.
this function currently assumes 0x10 length rows.
the result is a dictionary from buffer index to Cell instance.
the Cell instance has boolean properties "top", "bottom", "left", and "right"
that describe if a border should be drawn on that side of the cell view.
:rtype: Mapping[int, CellT] | Below is the the instruction that describes the task:
### Input:
given the buffer start and end indices of a range, compute the border edges
that should be drawn to enclose the range.
this function currently assumes 0x10 length rows.
the result is a dictionary from buffer index to Cell instance.
the Cell instance has boolean properties "top", "bottom", "left", and "right"
that describe if a border should be drawn on that side of the cell view.
:rtype: Mapping[int, CellT]
### Response:
def compute_region_border(start, end):
"""
given the buffer start and end indices of a range, compute the border edges
that should be drawn to enclose the range.
this function currently assumes 0x10 length rows.
the result is a dictionary from buffer index to Cell instance.
the Cell instance has boolean properties "top", "bottom", "left", and "right"
that describe if a border should be drawn on that side of the cell view.
:rtype: Mapping[int, CellT]
"""
cells = defaultdict(Cell)
start_row = row_number(start)
end_row = row_number(end)
if end % 0x10 == 0:
end_row -= 1
## topmost cells
if start_row == end_row:
for i in range(start, end):
cells[i].top = True
else:
for i in range(start, row_end_index(start) + 1):
cells[i].top = True
# cells on second row, top left
if start_row != end_row:
next_row_start = row_start_index(start) + 0x10
for i in range(next_row_start, next_row_start + column_number(start)):
cells[i].top = True
## bottommost cells
if start_row == end_row:
for i in range(start, end):
cells[i].bottom = True
else:
for i in range(row_start_index(end), end):
cells[i].bottom = True
# cells on second-to-last row, bottom right
if start_row != end_row:
prev_row_end = row_end_index(end) - 0x10
for i in range(prev_row_end - (0x10 - column_number(end) - 1), prev_row_end + 1):
cells[i].bottom = True
## leftmost cells
if start_row == end_row:
cells[start].left = True
else:
second_row_start = row_start_index(start) + 0x10
for i in range(second_row_start, row_start_index(end) + 0x10, 0x10):
cells[i].left = True
# cells in first row, top left
if start_row != end_row:
cells[start].left = True
## rightmost cells
if start_row == end_row:
cells[end - 1].right = True
else:
penultimate_row_end = row_end_index(end) - 0x10
for i in range(row_end_index(start), penultimate_row_end + 0x10, 0x10):
cells[i].right = True
# cells in last row, bottom right
if start_row != end_row:
cells[end - 1].right = True
# convert back to standard dict
# trick from: http://stackoverflow.com/a/20428703/87207
cells.default_factory = None
return cells |
def size(self, units="MiB"):
"""
Returns the volume group size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
"""
self.open()
size = lvm_vg_get_size(self.handle)
self.close()
return size_convert(size, units) | Returns the volume group size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB. | Below is the the instruction that describes the task:
### Input:
Returns the volume group size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
### Response:
def size(self, units="MiB"):
"""
Returns the volume group size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
"""
self.open()
size = lvm_vg_get_size(self.handle)
self.close()
return size_convert(size, units) |
def dispatch_queue(loader):
# type: (DataLoader) -> None
"""
Given the current state of a Loader instance, perform a batch load
from its current queue.
"""
# Take the current loader queue, replacing it with an empty queue.
queue = loader._queue
loader._queue = []
# If a maxBatchSize was provided and the queue is longer, then segment the
# queue into multiple batches, otherwise treat the queue as a single batch.
max_batch_size = loader.max_batch_size
if max_batch_size and max_batch_size < len(queue):
chunks = get_chunks(queue, max_batch_size)
for chunk in chunks:
dispatch_queue_batch(loader, chunk)
else:
dispatch_queue_batch(loader, queue) | Given the current state of a Loader instance, perform a batch load
from its current queue. | Below is the the instruction that describes the task:
### Input:
Given the current state of a Loader instance, perform a batch load
from its current queue.
### Response:
def dispatch_queue(loader):
# type: (DataLoader) -> None
"""
Given the current state of a Loader instance, perform a batch load
from its current queue.
"""
# Take the current loader queue, replacing it with an empty queue.
queue = loader._queue
loader._queue = []
# If a maxBatchSize was provided and the queue is longer, then segment the
# queue into multiple batches, otherwise treat the queue as a single batch.
max_batch_size = loader.max_batch_size
if max_batch_size and max_batch_size < len(queue):
chunks = get_chunks(queue, max_batch_size)
for chunk in chunks:
dispatch_queue_batch(loader, chunk)
else:
dispatch_queue_batch(loader, queue) |
def _generate_base_lsid(self):
"""
Generates and returns a base LSID
:return:
"""
domain = self._generate_domain()
namespace = self._generate_namespace()
# Return the base LSID
return "urn:lsid:" + domain + ":" + namespace | Generates and returns a base LSID
:return: | Below is the the instruction that describes the task:
### Input:
Generates and returns a base LSID
:return:
### Response:
def _generate_base_lsid(self):
"""
Generates and returns a base LSID
:return:
"""
domain = self._generate_domain()
namespace = self._generate_namespace()
# Return the base LSID
return "urn:lsid:" + domain + ":" + namespace |
def compute_exit_code(config, exception=None):
"""Compute an exit code for mutmut mutation testing
The following exit codes are available for mutmut:
* 0 if all mutants were killed (OK_KILLED)
* 1 if a fatal error occurred
* 2 if one or more mutants survived (BAD_SURVIVED)
* 4 if one or more mutants timed out (BAD_TIMEOUT)
* 8 if one or more mutants caused tests to take twice as long (OK_SUSPICIOUS)
Exit codes 1 to 8 will be bit-ORed so that it is possible to know what
different mutant statuses occurred during mutation testing.
:param exception:
:type exception: Exception
:param config:
:type config: Config
:return: integer noting the exit code of the mutation tests.
:rtype: int
"""
code = 0
if exception is not None:
code = code | 1
if config.surviving_mutants > 0:
code = code | 2
if config.surviving_mutants_timeout > 0:
code = code | 4
if config.suspicious_mutants > 0:
code = code | 8
return code | Compute an exit code for mutmut mutation testing
The following exit codes are available for mutmut:
* 0 if all mutants were killed (OK_KILLED)
* 1 if a fatal error occurred
* 2 if one or more mutants survived (BAD_SURVIVED)
* 4 if one or more mutants timed out (BAD_TIMEOUT)
* 8 if one or more mutants caused tests to take twice as long (OK_SUSPICIOUS)
Exit codes 1 to 8 will be bit-ORed so that it is possible to know what
different mutant statuses occurred during mutation testing.
:param exception:
:type exception: Exception
:param config:
:type config: Config
:return: integer noting the exit code of the mutation tests.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Compute an exit code for mutmut mutation testing
The following exit codes are available for mutmut:
* 0 if all mutants were killed (OK_KILLED)
* 1 if a fatal error occurred
* 2 if one or more mutants survived (BAD_SURVIVED)
* 4 if one or more mutants timed out (BAD_TIMEOUT)
* 8 if one or more mutants caused tests to take twice as long (OK_SUSPICIOUS)
Exit codes 1 to 8 will be bit-ORed so that it is possible to know what
different mutant statuses occurred during mutation testing.
:param exception:
:type exception: Exception
:param config:
:type config: Config
:return: integer noting the exit code of the mutation tests.
:rtype: int
### Response:
def compute_exit_code(config, exception=None):
"""Compute an exit code for mutmut mutation testing
The following exit codes are available for mutmut:
* 0 if all mutants were killed (OK_KILLED)
* 1 if a fatal error occurred
* 2 if one or more mutants survived (BAD_SURVIVED)
* 4 if one or more mutants timed out (BAD_TIMEOUT)
* 8 if one or more mutants caused tests to take twice as long (OK_SUSPICIOUS)
Exit codes 1 to 8 will be bit-ORed so that it is possible to know what
different mutant statuses occurred during mutation testing.
:param exception:
:type exception: Exception
:param config:
:type config: Config
:return: integer noting the exit code of the mutation tests.
:rtype: int
"""
code = 0
if exception is not None:
code = code | 1
if config.surviving_mutants > 0:
code = code | 2
if config.surviving_mutants_timeout > 0:
code = code | 4
if config.suspicious_mutants > 0:
code = code | 8
return code |
def delete(self):
"""
::
DELETE /:login/machines/:id/snapshots/:name
Deletes the snapshot from the machine.
"""
_, r = self.machine.datacenter.request('DELETE', self.path)
r.raise_for_status() | ::
DELETE /:login/machines/:id/snapshots/:name
Deletes the snapshot from the machine. | Below is the the instruction that describes the task:
### Input:
::
DELETE /:login/machines/:id/snapshots/:name
Deletes the snapshot from the machine.
### Response:
def delete(self):
"""
::
DELETE /:login/machines/:id/snapshots/:name
Deletes the snapshot from the machine.
"""
_, r = self.machine.datacenter.request('DELETE', self.path)
r.raise_for_status() |
def simxClearIntegerSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if (sys.version_info[0] == 3) and (type(signalName) is str):
signalName=signalName.encode('utf-8')
return c_ClearIntegerSignal(clientID, signalName, operationMode) | Please have a look at the function description/documentation in the V-REP user manual | Below is the the instruction that describes the task:
### Input:
Please have a look at the function description/documentation in the V-REP user manual
### Response:
def simxClearIntegerSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if (sys.version_info[0] == 3) and (type(signalName) is str):
signalName=signalName.encode('utf-8')
return c_ClearIntegerSignal(clientID, signalName, operationMode) |
def policy(self, observations):
""" Calculate only action head for given state """
input_data = self.input_block(observations)
policy_base_output = self.policy_backbone(input_data)
policy_params = self.action_head(policy_base_output)
return policy_params | Calculate only action head for given state | Below is the the instruction that describes the task:
### Input:
Calculate only action head for given state
### Response:
def policy(self, observations):
""" Calculate only action head for given state """
input_data = self.input_block(observations)
policy_base_output = self.policy_backbone(input_data)
policy_params = self.action_head(policy_base_output)
return policy_params |
def create_key_file(service, key):
"""Create a file containing key."""
keyfile = _keyfile_path(service)
if os.path.exists(keyfile):
log('Keyfile exists at %s.' % keyfile, level=WARNING)
return
with open(keyfile, 'w') as fd:
fd.write(key)
log('Created new keyfile at %s.' % keyfile, level=INFO) | Create a file containing key. | Below is the the instruction that describes the task:
### Input:
Create a file containing key.
### Response:
def create_key_file(service, key):
"""Create a file containing key."""
keyfile = _keyfile_path(service)
if os.path.exists(keyfile):
log('Keyfile exists at %s.' % keyfile, level=WARNING)
return
with open(keyfile, 'w') as fd:
fd.write(key)
log('Created new keyfile at %s.' % keyfile, level=INFO) |
def load(self, model):
"""
Load all information from an experimental configuration file.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
self.load_medium(model)
self.load_essentiality(model)
self.load_growth(model)
# self.load_experiment(config.config.get("growth"), model)
return self | Load all information from an experimental configuration file.
Parameters
----------
model : cobra.Model
The metabolic model under investigation. | Below is the the instruction that describes the task:
### Input:
Load all information from an experimental configuration file.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
### Response:
def load(self, model):
"""
Load all information from an experimental configuration file.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
"""
self.load_medium(model)
self.load_essentiality(model)
self.load_growth(model)
# self.load_experiment(config.config.get("growth"), model)
return self |
def xray_driver_removed_handler(self, unused_channel, data):
"""Handle a notification that a driver has been removed.
Args:
unused_channel: The message channel.
data: The message data.
"""
gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(
data, 0)
driver_data = gcs_entries.Entries(0)
message = ray.gcs_utils.DriverTableData.GetRootAsDriverTableData(
driver_data, 0)
driver_id = message.DriverId()
logger.info("Monitor: "
"XRay Driver {} has been removed.".format(
binary_to_hex(driver_id)))
self._xray_clean_up_entries_for_driver(driver_id) | Handle a notification that a driver has been removed.
Args:
unused_channel: The message channel.
data: The message data. | Below is the the instruction that describes the task:
### Input:
Handle a notification that a driver has been removed.
Args:
unused_channel: The message channel.
data: The message data.
### Response:
def xray_driver_removed_handler(self, unused_channel, data):
"""Handle a notification that a driver has been removed.
Args:
unused_channel: The message channel.
data: The message data.
"""
gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(
data, 0)
driver_data = gcs_entries.Entries(0)
message = ray.gcs_utils.DriverTableData.GetRootAsDriverTableData(
driver_data, 0)
driver_id = message.DriverId()
logger.info("Monitor: "
"XRay Driver {} has been removed.".format(
binary_to_hex(driver_id)))
self._xray_clean_up_entries_for_driver(driver_id) |
def svg_to_clipboard(string):
""" Copy a SVG document to the clipboard.
Parameters:
-----------
string : basestring
A Python string containing a SVG document.
"""
if isinstance(string, unicode):
string = string.encode('utf-8')
mime_data = QtCore.QMimeData()
mime_data.setData('image/svg+xml', string)
QtGui.QApplication.clipboard().setMimeData(mime_data) | Copy a SVG document to the clipboard.
Parameters:
-----------
string : basestring
A Python string containing a SVG document. | Below is the the instruction that describes the task:
### Input:
Copy a SVG document to the clipboard.
Parameters:
-----------
string : basestring
A Python string containing a SVG document.
### Response:
def svg_to_clipboard(string):
""" Copy a SVG document to the clipboard.
Parameters:
-----------
string : basestring
A Python string containing a SVG document.
"""
if isinstance(string, unicode):
string = string.encode('utf-8')
mime_data = QtCore.QMimeData()
mime_data.setData('image/svg+xml', string)
QtGui.QApplication.clipboard().setMimeData(mime_data) |
def search_for_subject(self, subject, timeout=None, content_type=None):
"""
Get content of emails, sent to a specific email address.
@Params
email - the recipient email address to search for
timeout - seconds to try beore timing out
content_type - type of email string to return
@Returns
Content of the matched email in the given content type
"""
return self.search(timeout=timeout,
content_type=content_type, SUBJECT=subject) | Get content of emails, sent to a specific email address.
@Params
email - the recipient email address to search for
timeout - seconds to try beore timing out
content_type - type of email string to return
@Returns
Content of the matched email in the given content type | Below is the the instruction that describes the task:
### Input:
Get content of emails, sent to a specific email address.
@Params
email - the recipient email address to search for
timeout - seconds to try beore timing out
content_type - type of email string to return
@Returns
Content of the matched email in the given content type
### Response:
def search_for_subject(self, subject, timeout=None, content_type=None):
"""
Get content of emails, sent to a specific email address.
@Params
email - the recipient email address to search for
timeout - seconds to try beore timing out
content_type - type of email string to return
@Returns
Content of the matched email in the given content type
"""
return self.search(timeout=timeout,
content_type=content_type, SUBJECT=subject) |
def _set_default_refdata():
"""Default refdata set on import."""
global GRAPHTABLE, COMPTABLE, THERMTABLE, PRIMARY_AREA
# Component tables are defined here.
try:
GRAPHTABLE = _refTable(os.path.join('mtab','*_tmg.fits'))
COMPTABLE = _refTable(os.path.join('mtab','*_tmc.fits'))
except IOError as e:
GRAPHTABLE = None
COMPTABLE = None
warnings.warn('No graph or component tables found; '
'functionality will be SEVERELY crippled. ' + str(e))
try:
THERMTABLE = _refTable(os.path.join('mtab','*_tmt.fits'))
except IOError as e:
THERMTABLE = None
warnings.warn('No thermal tables found, '
'no thermal calculations can be performed. ' + str(e))
PRIMARY_AREA = 45238.93416 # cm^2 - default to HST mirror
set_default_waveset() | Default refdata set on import. | Below is the the instruction that describes the task:
### Input:
Default refdata set on import.
### Response:
def _set_default_refdata():
"""Default refdata set on import."""
global GRAPHTABLE, COMPTABLE, THERMTABLE, PRIMARY_AREA
# Component tables are defined here.
try:
GRAPHTABLE = _refTable(os.path.join('mtab','*_tmg.fits'))
COMPTABLE = _refTable(os.path.join('mtab','*_tmc.fits'))
except IOError as e:
GRAPHTABLE = None
COMPTABLE = None
warnings.warn('No graph or component tables found; '
'functionality will be SEVERELY crippled. ' + str(e))
try:
THERMTABLE = _refTable(os.path.join('mtab','*_tmt.fits'))
except IOError as e:
THERMTABLE = None
warnings.warn('No thermal tables found, '
'no thermal calculations can be performed. ' + str(e))
PRIMARY_AREA = 45238.93416 # cm^2 - default to HST mirror
set_default_waveset() |
def calc_effective_diffusivity(self, inlets=None, outlets=None,
domain_area=None, domain_length=None):
r"""
This calculates the effective diffusivity in this linear transport
algorithm.
Parameters
----------
inlets : array_like
The pores where the inlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
outlets : array_like
The pores where the outlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
domain_area : scalar, optional
The area of the inlet (and outlet) boundary faces. If not given
then an attempt is made to estimate it, but it is usually
underestimated.
domain_length : scalar, optional
The length of the domain between the inlet and outlet boundary
faces. If not given then an attempt is made to estimate it, but it
is usually underestimated.
Notes
-----
The area and length of the domain are found using the bounding box
around the inlet and outlet pores which do not necessarily lie on the
edge of the domain, resulting in underestimation of sizes.
"""
return self._calc_eff_prop(inlets=inlets, outlets=outlets,
domain_area=domain_area,
domain_length=domain_length) | r"""
This calculates the effective diffusivity in this linear transport
algorithm.
Parameters
----------
inlets : array_like
The pores where the inlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
outlets : array_like
The pores where the outlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
domain_area : scalar, optional
The area of the inlet (and outlet) boundary faces. If not given
then an attempt is made to estimate it, but it is usually
underestimated.
domain_length : scalar, optional
The length of the domain between the inlet and outlet boundary
faces. If not given then an attempt is made to estimate it, but it
is usually underestimated.
Notes
-----
The area and length of the domain are found using the bounding box
around the inlet and outlet pores which do not necessarily lie on the
edge of the domain, resulting in underestimation of sizes. | Below is the the instruction that describes the task:
### Input:
r"""
This calculates the effective diffusivity in this linear transport
algorithm.
Parameters
----------
inlets : array_like
The pores where the inlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
outlets : array_like
The pores where the outlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
domain_area : scalar, optional
The area of the inlet (and outlet) boundary faces. If not given
then an attempt is made to estimate it, but it is usually
underestimated.
domain_length : scalar, optional
The length of the domain between the inlet and outlet boundary
faces. If not given then an attempt is made to estimate it, but it
is usually underestimated.
Notes
-----
The area and length of the domain are found using the bounding box
around the inlet and outlet pores which do not necessarily lie on the
edge of the domain, resulting in underestimation of sizes.
### Response:
def calc_effective_diffusivity(self, inlets=None, outlets=None,
domain_area=None, domain_length=None):
r"""
This calculates the effective diffusivity in this linear transport
algorithm.
Parameters
----------
inlets : array_like
The pores where the inlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
outlets : array_like
The pores where the outlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
domain_area : scalar, optional
The area of the inlet (and outlet) boundary faces. If not given
then an attempt is made to estimate it, but it is usually
underestimated.
domain_length : scalar, optional
The length of the domain between the inlet and outlet boundary
faces. If not given then an attempt is made to estimate it, but it
is usually underestimated.
Notes
-----
The area and length of the domain are found using the bounding box
around the inlet and outlet pores which do not necessarily lie on the
edge of the domain, resulting in underestimation of sizes.
"""
return self._calc_eff_prop(inlets=inlets, outlets=outlets,
domain_area=domain_area,
domain_length=domain_length) |
def showMessage(self, level, message):
"""
Logs the inputed message for the given level. This will update
both the feedback label and the details widget.
:param level | <int>
message | <str>
"""
self.uiFeedbackLBL.setText(message)
self.uiLoggerEDIT.log(level, message) | Logs the inputed message for the given level. This will update
both the feedback label and the details widget.
:param level | <int>
message | <str> | Below is the the instruction that describes the task:
### Input:
Logs the inputed message for the given level. This will update
both the feedback label and the details widget.
:param level | <int>
message | <str>
### Response:
def showMessage(self, level, message):
"""
Logs the inputed message for the given level. This will update
both the feedback label and the details widget.
:param level | <int>
message | <str>
"""
self.uiFeedbackLBL.setText(message)
self.uiLoggerEDIT.log(level, message) |
def cxx_loop(visit):
"""
Decorator for loop node (For and While) to handle "else" branching.
Decorated node will save flags for a goto statement used instead of usual
break and add this flag at the end of the else statements.
Examples
--------
>> for i in xrange(12):
>> if i == 5:
>> break
>> else:
>> ... some code ...
Becomes
>> for(type i : xrange(12))
>> if(i==5)
>> goto __no_breaking0;
>> ... some code ...
>> __no_breaking0;
"""
def loop_visitor(self, node):
"""
New decorate function.
It push the breaking flag, run the visitor and add "else" statements.
"""
if not node.orelse:
with pushpop(self.break_handlers, None):
res = visit(self, node)
return res
break_handler = "__no_breaking{0}".format(id(node))
with pushpop(self.break_handlers, break_handler):
res = visit(self, node)
# handle the body of the for loop
orelse = [self.visit(stmt) for stmt in node.orelse]
orelse_label = Label(break_handler)
return Block([res] + orelse + [orelse_label])
return loop_visitor | Decorator for loop node (For and While) to handle "else" branching.
Decorated node will save flags for a goto statement used instead of usual
break and add this flag at the end of the else statements.
Examples
--------
>> for i in xrange(12):
>> if i == 5:
>> break
>> else:
>> ... some code ...
Becomes
>> for(type i : xrange(12))
>> if(i==5)
>> goto __no_breaking0;
>> ... some code ...
>> __no_breaking0; | Below is the the instruction that describes the task:
### Input:
Decorator for loop node (For and While) to handle "else" branching.
Decorated node will save flags for a goto statement used instead of usual
break and add this flag at the end of the else statements.
Examples
--------
>> for i in xrange(12):
>> if i == 5:
>> break
>> else:
>> ... some code ...
Becomes
>> for(type i : xrange(12))
>> if(i==5)
>> goto __no_breaking0;
>> ... some code ...
>> __no_breaking0;
### Response:
def cxx_loop(visit):
"""
Decorator for loop node (For and While) to handle "else" branching.
Decorated node will save flags for a goto statement used instead of usual
break and add this flag at the end of the else statements.
Examples
--------
>> for i in xrange(12):
>> if i == 5:
>> break
>> else:
>> ... some code ...
Becomes
>> for(type i : xrange(12))
>> if(i==5)
>> goto __no_breaking0;
>> ... some code ...
>> __no_breaking0;
"""
def loop_visitor(self, node):
"""
New decorate function.
It push the breaking flag, run the visitor and add "else" statements.
"""
if not node.orelse:
with pushpop(self.break_handlers, None):
res = visit(self, node)
return res
break_handler = "__no_breaking{0}".format(id(node))
with pushpop(self.break_handlers, break_handler):
res = visit(self, node)
# handle the body of the for loop
orelse = [self.visit(stmt) for stmt in node.orelse]
orelse_label = Label(break_handler)
return Block([res] + orelse + [orelse_label])
return loop_visitor |
def _on_response_message(self, sequence, topic, message):
"""Process a response message received
Args:
sequence (int): The sequence number of the packet received
topic (string): The topic this message was received on
message (dict): The message itself
"""
try:
conn_key = self._find_connection(topic)
context = self.conns.get_context(conn_key)
except ArgumentError:
self._logger.warn("Dropping message that does not correspond with a known connection, message=%s", message)
return
if 'client' in message and message['client'] != self.name:
self._logger.debug("Dropping message that is for another client %s, we are %s", message['client'], self.name)
if messages.DisconnectionResponse.matches(message):
self.conns.finish_disconnection(conn_key, message['success'], message.get('failure_reason', None))
elif messages.OpenInterfaceResponse.matches(message):
self.conns.finish_operation(conn_key, message['success'], message.get('failure_reason', None))
elif messages.RPCResponse.matches(message):
rpc_message = messages.RPCResponse.verify(message)
self.conns.finish_operation(conn_key, rpc_message['success'], rpc_message.get('failure_reason', None), rpc_message.get('status', None), rpc_message.get('payload', None))
elif messages.ProgressNotification.matches(message):
progress_callback = context.get('progress_callback', None)
if progress_callback is not None:
progress_callback(message['done_count'], message['total_count'])
elif messages.ScriptResponse.matches(message):
if 'progress_callback' in context:
del context['progress_callback']
self.conns.finish_operation(conn_key, message['success'], message.get('failure_reason', None))
elif messages.DisconnectionNotification.matches(message):
try:
conn_key = self._find_connection(topic)
conn_id = self.conns.get_connection_id(conn_key)
except ArgumentError:
self._logger.warn("Dropping disconnect notification that does not correspond with a known connection, topic=%s", topic)
return
self.conns.unexpected_disconnect(conn_key)
self._trigger_callback('on_disconnect', self.id, conn_id)
else:
self._logger.warn("Invalid response message received, message=%s", message) | Process a response message received
Args:
sequence (int): The sequence number of the packet received
topic (string): The topic this message was received on
message (dict): The message itself | Below is the the instruction that describes the task:
### Input:
Process a response message received
Args:
sequence (int): The sequence number of the packet received
topic (string): The topic this message was received on
message (dict): The message itself
### Response:
def _on_response_message(self, sequence, topic, message):
"""Process a response message received
Args:
sequence (int): The sequence number of the packet received
topic (string): The topic this message was received on
message (dict): The message itself
"""
try:
conn_key = self._find_connection(topic)
context = self.conns.get_context(conn_key)
except ArgumentError:
self._logger.warn("Dropping message that does not correspond with a known connection, message=%s", message)
return
if 'client' in message and message['client'] != self.name:
self._logger.debug("Dropping message that is for another client %s, we are %s", message['client'], self.name)
if messages.DisconnectionResponse.matches(message):
self.conns.finish_disconnection(conn_key, message['success'], message.get('failure_reason', None))
elif messages.OpenInterfaceResponse.matches(message):
self.conns.finish_operation(conn_key, message['success'], message.get('failure_reason', None))
elif messages.RPCResponse.matches(message):
rpc_message = messages.RPCResponse.verify(message)
self.conns.finish_operation(conn_key, rpc_message['success'], rpc_message.get('failure_reason', None), rpc_message.get('status', None), rpc_message.get('payload', None))
elif messages.ProgressNotification.matches(message):
progress_callback = context.get('progress_callback', None)
if progress_callback is not None:
progress_callback(message['done_count'], message['total_count'])
elif messages.ScriptResponse.matches(message):
if 'progress_callback' in context:
del context['progress_callback']
self.conns.finish_operation(conn_key, message['success'], message.get('failure_reason', None))
elif messages.DisconnectionNotification.matches(message):
try:
conn_key = self._find_connection(topic)
conn_id = self.conns.get_connection_id(conn_key)
except ArgumentError:
self._logger.warn("Dropping disconnect notification that does not correspond with a known connection, topic=%s", topic)
return
self.conns.unexpected_disconnect(conn_key)
self._trigger_callback('on_disconnect', self.id, conn_id)
else:
self._logger.warn("Invalid response message received, message=%s", message) |
def _mmc_loop(self, rounds, max_angle, max_distance,
temp=298.15, stop_when=None, verbose=True):
"""The main Metropolis Monte Carlo loop."""
current_round = 0
while current_round < rounds:
working_model = copy.deepcopy(self.polypeptide)
random_vector = unit_vector(numpy.random.uniform(-1, 1, size=3))
mode = random.choice(['rotate', 'rotate', 'rotate', 'translate'])
if mode == 'rotate':
random_angle = numpy.random.rand() * max_angle
working_model.rotate(random_angle, random_vector,
working_model.centre_of_mass)
else:
random_translation = random_vector * (numpy.random.rand() *
max_distance)
working_model.translate(random_translation)
proposed_energy = self.eval_fn(working_model, *self.eval_args)
move_accepted = self.check_move(proposed_energy,
self.current_energy, t=temp)
if move_accepted:
self.current_energy = proposed_energy
if self.current_energy < self.best_energy:
self.polypeptide = working_model
self.best_energy = copy.deepcopy(self.current_energy)
self.best_model = copy.deepcopy(working_model)
if verbose:
sys.stdout.write(
'\rRound: {}, Current RMSD: {}, Proposed RMSD: {} '
'(best {}), {}. '
.format(current_round, self.float_f(self.current_energy),
self.float_f(proposed_energy), self.float_f(
self.best_energy),
"ACCEPTED" if move_accepted else "DECLINED")
)
sys.stdout.flush()
current_round += 1
if stop_when:
if self.best_energy <= stop_when:
break
return | The main Metropolis Monte Carlo loop. | Below is the the instruction that describes the task:
### Input:
The main Metropolis Monte Carlo loop.
### Response:
def _mmc_loop(self, rounds, max_angle, max_distance,
temp=298.15, stop_when=None, verbose=True):
"""The main Metropolis Monte Carlo loop."""
current_round = 0
while current_round < rounds:
working_model = copy.deepcopy(self.polypeptide)
random_vector = unit_vector(numpy.random.uniform(-1, 1, size=3))
mode = random.choice(['rotate', 'rotate', 'rotate', 'translate'])
if mode == 'rotate':
random_angle = numpy.random.rand() * max_angle
working_model.rotate(random_angle, random_vector,
working_model.centre_of_mass)
else:
random_translation = random_vector * (numpy.random.rand() *
max_distance)
working_model.translate(random_translation)
proposed_energy = self.eval_fn(working_model, *self.eval_args)
move_accepted = self.check_move(proposed_energy,
self.current_energy, t=temp)
if move_accepted:
self.current_energy = proposed_energy
if self.current_energy < self.best_energy:
self.polypeptide = working_model
self.best_energy = copy.deepcopy(self.current_energy)
self.best_model = copy.deepcopy(working_model)
if verbose:
sys.stdout.write(
'\rRound: {}, Current RMSD: {}, Proposed RMSD: {} '
'(best {}), {}. '
.format(current_round, self.float_f(self.current_energy),
self.float_f(proposed_energy), self.float_f(
self.best_energy),
"ACCEPTED" if move_accepted else "DECLINED")
)
sys.stdout.flush()
current_round += 1
if stop_when:
if self.best_energy <= stop_when:
break
return |
def make_tophat_ee (lower, upper):
"""Return a ufunc-like tophat function on the defined range, left-exclusive
and right-exclusive. Returns 1 if lower < x < upper, 0 otherwise.
"""
if not np.isfinite (lower):
raise ValueError ('"lower" argument must be finite number; got %r' % lower)
if not np.isfinite (upper):
raise ValueError ('"upper" argument must be finite number; got %r' % upper)
def range_tophat_ee (x):
x = np.asarray (x)
x1 = np.atleast_1d (x)
r = ((lower < x1) & (x1 < upper)).astype (x.dtype)
if x.ndim == 0:
return np.asscalar (r)
return r
range_tophat_ee.__doc__ = ('Ranged tophat function, left-exclusive and '
'right-exclusive. Returns 1 if %g < x < %g, '
'0 otherwise.') % (lower, upper)
return range_tophat_ee | Return a ufunc-like tophat function on the defined range, left-exclusive
and right-exclusive. Returns 1 if lower < x < upper, 0 otherwise. | Below is the the instruction that describes the task:
### Input:
Return a ufunc-like tophat function on the defined range, left-exclusive
and right-exclusive. Returns 1 if lower < x < upper, 0 otherwise.
### Response:
def make_tophat_ee (lower, upper):
"""Return a ufunc-like tophat function on the defined range, left-exclusive
and right-exclusive. Returns 1 if lower < x < upper, 0 otherwise.
"""
if not np.isfinite (lower):
raise ValueError ('"lower" argument must be finite number; got %r' % lower)
if not np.isfinite (upper):
raise ValueError ('"upper" argument must be finite number; got %r' % upper)
def range_tophat_ee (x):
x = np.asarray (x)
x1 = np.atleast_1d (x)
r = ((lower < x1) & (x1 < upper)).astype (x.dtype)
if x.ndim == 0:
return np.asscalar (r)
return r
range_tophat_ee.__doc__ = ('Ranged tophat function, left-exclusive and '
'right-exclusive. Returns 1 if %g < x < %g, '
'0 otherwise.') % (lower, upper)
return range_tophat_ee |
def __scale_axes(axes, ax_type, which):
'''Set the axis scaling'''
kwargs = dict()
if which == 'x':
thresh = 'linthreshx'
base = 'basex'
scale = 'linscalex'
scaler = axes.set_xscale
limit = axes.set_xlim
else:
thresh = 'linthreshy'
base = 'basey'
scale = 'linscaley'
scaler = axes.set_yscale
limit = axes.set_ylim
# Map ticker scales
if ax_type == 'mel':
mode = 'symlog'
kwargs[thresh] = 1000.0
kwargs[base] = 2
elif ax_type == 'log':
mode = 'symlog'
kwargs[base] = 2
kwargs[thresh] = core.note_to_hz('C2')
kwargs[scale] = 0.5
elif ax_type in ['cqt', 'cqt_hz', 'cqt_note']:
mode = 'log'
kwargs[base] = 2
elif ax_type == 'tempo':
mode = 'log'
kwargs[base] = 2
limit(16, 480)
else:
return
scaler(mode, **kwargs) | Set the axis scaling | Below is the the instruction that describes the task:
### Input:
Set the axis scaling
### Response:
def __scale_axes(axes, ax_type, which):
'''Set the axis scaling'''
kwargs = dict()
if which == 'x':
thresh = 'linthreshx'
base = 'basex'
scale = 'linscalex'
scaler = axes.set_xscale
limit = axes.set_xlim
else:
thresh = 'linthreshy'
base = 'basey'
scale = 'linscaley'
scaler = axes.set_yscale
limit = axes.set_ylim
# Map ticker scales
if ax_type == 'mel':
mode = 'symlog'
kwargs[thresh] = 1000.0
kwargs[base] = 2
elif ax_type == 'log':
mode = 'symlog'
kwargs[base] = 2
kwargs[thresh] = core.note_to_hz('C2')
kwargs[scale] = 0.5
elif ax_type in ['cqt', 'cqt_hz', 'cqt_note']:
mode = 'log'
kwargs[base] = 2
elif ax_type == 'tempo':
mode = 'log'
kwargs[base] = 2
limit(16, 480)
else:
return
scaler(mode, **kwargs) |
def encode_header(cls, fin, opcode, mask, length, flags):
"""
Encodes a WebSocket header.
:param fin: Whether this is the final frame for this opcode.
:param opcode: The opcode of the payload, see `OPCODE_*`
:param mask: Whether the payload is masked.
:param length: The length of the frame.
:param flags: The RSV* flags.
:return: A bytestring encoded header.
"""
first_byte = opcode
second_byte = 0
if six.PY2:
extra = ''
else:
extra = b''
if fin:
first_byte |= cls.FIN_MASK
if flags & cls.RSV0_MASK:
first_byte |= cls.RSV0_MASK
if flags & cls.RSV1_MASK:
first_byte |= cls.RSV1_MASK
if flags & cls.RSV2_MASK:
first_byte |= cls.RSV2_MASK
# now deal with length complexities
if length < 126:
second_byte += length
elif length <= 0xffff:
second_byte += 126
extra = struct.pack('!H', length)
elif length <= 0xffffffffffffffff:
second_byte += 127
extra = struct.pack('!Q', length)
else:
raise FrameTooLargeException
if mask:
second_byte |= cls.MASK_MASK
extra += mask
if six.PY3:
return bytes([first_byte, second_byte]) + extra
return chr(first_byte) + chr(second_byte) + extra | Encodes a WebSocket header.
:param fin: Whether this is the final frame for this opcode.
:param opcode: The opcode of the payload, see `OPCODE_*`
:param mask: Whether the payload is masked.
:param length: The length of the frame.
:param flags: The RSV* flags.
:return: A bytestring encoded header. | Below is the the instruction that describes the task:
### Input:
Encodes a WebSocket header.
:param fin: Whether this is the final frame for this opcode.
:param opcode: The opcode of the payload, see `OPCODE_*`
:param mask: Whether the payload is masked.
:param length: The length of the frame.
:param flags: The RSV* flags.
:return: A bytestring encoded header.
### Response:
def encode_header(cls, fin, opcode, mask, length, flags):
"""
Encodes a WebSocket header.
:param fin: Whether this is the final frame for this opcode.
:param opcode: The opcode of the payload, see `OPCODE_*`
:param mask: Whether the payload is masked.
:param length: The length of the frame.
:param flags: The RSV* flags.
:return: A bytestring encoded header.
"""
first_byte = opcode
second_byte = 0
if six.PY2:
extra = ''
else:
extra = b''
if fin:
first_byte |= cls.FIN_MASK
if flags & cls.RSV0_MASK:
first_byte |= cls.RSV0_MASK
if flags & cls.RSV1_MASK:
first_byte |= cls.RSV1_MASK
if flags & cls.RSV2_MASK:
first_byte |= cls.RSV2_MASK
# now deal with length complexities
if length < 126:
second_byte += length
elif length <= 0xffff:
second_byte += 126
extra = struct.pack('!H', length)
elif length <= 0xffffffffffffffff:
second_byte += 127
extra = struct.pack('!Q', length)
else:
raise FrameTooLargeException
if mask:
second_byte |= cls.MASK_MASK
extra += mask
if six.PY3:
return bytes([first_byte, second_byte]) + extra
return chr(first_byte) + chr(second_byte) + extra |
def dict_sequence(self, node, keep_var_ambigs=False):
"""
For VCF-based TreeAnc objects, we do not want to store the entire
sequence on every node, as they could be large. Instead, this returns the dict
of variants & their positions for this sequence. This is used in place of
:py:meth:`treetime.TreeAnc.expanded_sequence` for VCF-based objects throughout TreeAnc. However, users can still
call :py:meth:`expanded_sequence` if they require the full sequence.
Parameters
----------
node : PhyloTree.Clade
Tree node
Returns
-------
seq : dict
dict where keys are the basepair position (numbering from 0) and value is the variant call
"""
seq = {}
node_seq = node.cseq
if keep_var_ambigs and hasattr(node, "original_cseq") and node.is_terminal():
node_seq = node.original_cseq
for pos in self.nonref_positions:
cseqLoc = self.full_to_reduced_sequence_map[pos]
base = node_seq[cseqLoc]
if self.ref[pos] != base:
seq[pos] = base
return seq | For VCF-based TreeAnc objects, we do not want to store the entire
sequence on every node, as they could be large. Instead, this returns the dict
of variants & their positions for this sequence. This is used in place of
:py:meth:`treetime.TreeAnc.expanded_sequence` for VCF-based objects throughout TreeAnc. However, users can still
call :py:meth:`expanded_sequence` if they require the full sequence.
Parameters
----------
node : PhyloTree.Clade
Tree node
Returns
-------
seq : dict
dict where keys are the basepair position (numbering from 0) and value is the variant call | Below is the the instruction that describes the task:
### Input:
For VCF-based TreeAnc objects, we do not want to store the entire
sequence on every node, as they could be large. Instead, this returns the dict
of variants & their positions for this sequence. This is used in place of
:py:meth:`treetime.TreeAnc.expanded_sequence` for VCF-based objects throughout TreeAnc. However, users can still
call :py:meth:`expanded_sequence` if they require the full sequence.
Parameters
----------
node : PhyloTree.Clade
Tree node
Returns
-------
seq : dict
dict where keys are the basepair position (numbering from 0) and value is the variant call
### Response:
def dict_sequence(self, node, keep_var_ambigs=False):
"""
For VCF-based TreeAnc objects, we do not want to store the entire
sequence on every node, as they could be large. Instead, this returns the dict
of variants & their positions for this sequence. This is used in place of
:py:meth:`treetime.TreeAnc.expanded_sequence` for VCF-based objects throughout TreeAnc. However, users can still
call :py:meth:`expanded_sequence` if they require the full sequence.
Parameters
----------
node : PhyloTree.Clade
Tree node
Returns
-------
seq : dict
dict where keys are the basepair position (numbering from 0) and value is the variant call
"""
seq = {}
node_seq = node.cseq
if keep_var_ambigs and hasattr(node, "original_cseq") and node.is_terminal():
node_seq = node.original_cseq
for pos in self.nonref_positions:
cseqLoc = self.full_to_reduced_sequence_map[pos]
base = node_seq[cseqLoc]
if self.ref[pos] != base:
seq[pos] = base
return seq |
def estimate_threshold(in1, edge_excl=0, int_excl=0):
"""
This function estimates the noise using the MAD estimator.
INPUTS:
in1 (no default): The array from which the noise is estimated
OUTPUTS:
out1 An array of per-scale noise estimates.
"""
out1 = np.empty([in1.shape[0]])
mid = in1.shape[1]/2
if (edge_excl!=0) | (int_excl!=0):
if edge_excl!=0:
mask = np.zeros([in1.shape[1], in1.shape[2]])
mask[edge_excl:-edge_excl, edge_excl:-edge_excl] = 1
else:
mask = np.ones([in1.shape[1], in1.shape[2]])
if int_excl!=0:
mask[mid-int_excl:mid+int_excl, mid-int_excl:mid+int_excl] = 0
else:
mask = np.ones([in1.shape[1], in1.shape[2]])
for i in range(in1.shape[0]):
out1[i] = np.median(np.abs(in1[i,mask==1]))/0.6745
return out1 | This function estimates the noise using the MAD estimator.
INPUTS:
in1 (no default): The array from which the noise is estimated
OUTPUTS:
out1 An array of per-scale noise estimates. | Below is the the instruction that describes the task:
### Input:
This function estimates the noise using the MAD estimator.
INPUTS:
in1 (no default): The array from which the noise is estimated
OUTPUTS:
out1 An array of per-scale noise estimates.
### Response:
def estimate_threshold(in1, edge_excl=0, int_excl=0):
"""
This function estimates the noise using the MAD estimator.
INPUTS:
in1 (no default): The array from which the noise is estimated
OUTPUTS:
out1 An array of per-scale noise estimates.
"""
out1 = np.empty([in1.shape[0]])
mid = in1.shape[1]/2
if (edge_excl!=0) | (int_excl!=0):
if edge_excl!=0:
mask = np.zeros([in1.shape[1], in1.shape[2]])
mask[edge_excl:-edge_excl, edge_excl:-edge_excl] = 1
else:
mask = np.ones([in1.shape[1], in1.shape[2]])
if int_excl!=0:
mask[mid-int_excl:mid+int_excl, mid-int_excl:mid+int_excl] = 0
else:
mask = np.ones([in1.shape[1], in1.shape[2]])
for i in range(in1.shape[0]):
out1[i] = np.median(np.abs(in1[i,mask==1]))/0.6745
return out1 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'enabled') and self.enabled is not None:
_dict['enabled'] = self.enabled
if hasattr(self, 'time_zone') and self.time_zone is not None:
_dict['time_zone'] = self.time_zone
if hasattr(self, 'frequency') and self.frequency is not None:
_dict['frequency'] = self.frequency
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'enabled') and self.enabled is not None:
_dict['enabled'] = self.enabled
if hasattr(self, 'time_zone') and self.time_zone is not None:
_dict['time_zone'] = self.time_zone
if hasattr(self, 'frequency') and self.frequency is not None:
_dict['frequency'] = self.frequency
return _dict |
def initialize_repo(self):
"""
Clones repository & sets up usernames.
"""
logging.info('Repo {} doesn\'t exist. Cloning...'.format(self.repo_dir))
clone_args = ['git', 'clone']
if self.depth and self.depth > 0:
clone_args.extend(['--depth', str(self.depth)])
clone_args.extend(['--branch', self.branch_name])
clone_args.extend([self.git_url, self.repo_dir])
yield from execute_cmd(clone_args)
yield from execute_cmd(['git', 'config', 'user.email', 'nbgitpuller@example.com'], cwd=self.repo_dir)
yield from execute_cmd(['git', 'config', 'user.name', 'nbgitpuller'], cwd=self.repo_dir)
logging.info('Repo {} initialized'.format(self.repo_dir)) | Clones repository & sets up usernames. | Below is the the instruction that describes the task:
### Input:
Clones repository & sets up usernames.
### Response:
def initialize_repo(self):
"""
Clones repository & sets up usernames.
"""
logging.info('Repo {} doesn\'t exist. Cloning...'.format(self.repo_dir))
clone_args = ['git', 'clone']
if self.depth and self.depth > 0:
clone_args.extend(['--depth', str(self.depth)])
clone_args.extend(['--branch', self.branch_name])
clone_args.extend([self.git_url, self.repo_dir])
yield from execute_cmd(clone_args)
yield from execute_cmd(['git', 'config', 'user.email', 'nbgitpuller@example.com'], cwd=self.repo_dir)
yield from execute_cmd(['git', 'config', 'user.name', 'nbgitpuller'], cwd=self.repo_dir)
logging.info('Repo {} initialized'.format(self.repo_dir)) |
def getrawblob(self, project_id, sha1):
"""
Get the raw file contents for a blob by blob SHA.
:param project_id: The ID of a project
:param sha1: the commit sha
:return: raw blob
"""
request = requests.get(
'{0}/{1}/repository/raw_blobs/{2}'.format(self.projects_url, project_id, sha1),
verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout)
if request.status_code == 200:
return request.content
else:
return False | Get the raw file contents for a blob by blob SHA.
:param project_id: The ID of a project
:param sha1: the commit sha
:return: raw blob | Below is the the instruction that describes the task:
### Input:
Get the raw file contents for a blob by blob SHA.
:param project_id: The ID of a project
:param sha1: the commit sha
:return: raw blob
### Response:
def getrawblob(self, project_id, sha1):
"""
Get the raw file contents for a blob by blob SHA.
:param project_id: The ID of a project
:param sha1: the commit sha
:return: raw blob
"""
request = requests.get(
'{0}/{1}/repository/raw_blobs/{2}'.format(self.projects_url, project_id, sha1),
verify=self.verify_ssl, auth=self.auth, headers=self.headers, timeout=self.timeout)
if request.status_code == 200:
return request.content
else:
return False |
def get_abbreviations(self):
"""
Get abbreviations of the names of the author.
:return: a list of strings (empty list if no abbreviations available).
"""
abbreviations = []
try:
type_abbreviation = self.session.get_resource(BASE_URI_TYPES % "abbreviation"
, self.session.get_class(surf.ns.ECRM['E55_Type']))
abbreviations = [unicode(label)
for name in self.ecrm_P1_is_identified_by
for abbreviation in name.ecrm_P139_has_alternative_form
for label in abbreviation.rdfs_label
if name.uri == surf.ns.EFRBROO['F12_Name']
and abbreviation.ecrm_P2_has_type.first == type_abbreviation]
except Exception as e:
logger.debug("Exception raised when getting abbreviations for %a"%self)
finally:
return abbreviations | Get abbreviations of the names of the author.
:return: a list of strings (empty list if no abbreviations available). | Below is the the instruction that describes the task:
### Input:
Get abbreviations of the names of the author.
:return: a list of strings (empty list if no abbreviations available).
### Response:
def get_abbreviations(self):
"""
Get abbreviations of the names of the author.
:return: a list of strings (empty list if no abbreviations available).
"""
abbreviations = []
try:
type_abbreviation = self.session.get_resource(BASE_URI_TYPES % "abbreviation"
, self.session.get_class(surf.ns.ECRM['E55_Type']))
abbreviations = [unicode(label)
for name in self.ecrm_P1_is_identified_by
for abbreviation in name.ecrm_P139_has_alternative_form
for label in abbreviation.rdfs_label
if name.uri == surf.ns.EFRBROO['F12_Name']
and abbreviation.ecrm_P2_has_type.first == type_abbreviation]
except Exception as e:
logger.debug("Exception raised when getting abbreviations for %a"%self)
finally:
return abbreviations |
def apply_function(self, func):
"""Apply a function to all grid_stack in the grid-stack.
This is used by the *ray-tracing* module to easily apply tracing operations to all grid_stack."""
if self.blurring is not None and self.pix is not None:
return GridStack(func(self.regular), func(self.sub), func(self.blurring), func(self.pix))
elif self.blurring is None and self.pix is not None:
return GridStack(func(self.regular), func(self.sub), self.blurring, func(self.pix))
elif self.blurring is not None and self.pix is None:
return GridStack(func(self.regular), func(self.sub), func(self.blurring), self.pix)
else:
return GridStack(func(self.regular), func(self.sub), self.blurring, self.pix) | Apply a function to all grid_stack in the grid-stack.
This is used by the *ray-tracing* module to easily apply tracing operations to all grid_stack. | Below is the the instruction that describes the task:
### Input:
Apply a function to all grid_stack in the grid-stack.
This is used by the *ray-tracing* module to easily apply tracing operations to all grid_stack.
### Response:
def apply_function(self, func):
"""Apply a function to all grid_stack in the grid-stack.
This is used by the *ray-tracing* module to easily apply tracing operations to all grid_stack."""
if self.blurring is not None and self.pix is not None:
return GridStack(func(self.regular), func(self.sub), func(self.blurring), func(self.pix))
elif self.blurring is None and self.pix is not None:
return GridStack(func(self.regular), func(self.sub), self.blurring, func(self.pix))
elif self.blurring is not None and self.pix is None:
return GridStack(func(self.regular), func(self.sub), func(self.blurring), self.pix)
else:
return GridStack(func(self.regular), func(self.sub), self.blurring, self.pix) |
def get_method_descriptor(self, class_name, method_name, descriptor):
"""
Return the specific method
:param class_name: the class name of the method
:type class_name: string
:param method_name: the name of the method
:type method_name: string
:param descriptor: the descriptor of the method
:type descriptor: string
:rtype: None or a :class:`EncodedMethod` object
"""
key = class_name + method_name + descriptor
if self.__cache_methods is None:
self.__cache_methods = {}
for i in self.get_classes():
for j in i.get_methods():
self.__cache_methods[j.get_class_name() + j.get_name() +
j.get_descriptor()] = j
return self.__cache_methods.get(key) | Return the specific method
:param class_name: the class name of the method
:type class_name: string
:param method_name: the name of the method
:type method_name: string
:param descriptor: the descriptor of the method
:type descriptor: string
:rtype: None or a :class:`EncodedMethod` object | Below is the the instruction that describes the task:
### Input:
Return the specific method
:param class_name: the class name of the method
:type class_name: string
:param method_name: the name of the method
:type method_name: string
:param descriptor: the descriptor of the method
:type descriptor: string
:rtype: None or a :class:`EncodedMethod` object
### Response:
def get_method_descriptor(self, class_name, method_name, descriptor):
"""
Return the specific method
:param class_name: the class name of the method
:type class_name: string
:param method_name: the name of the method
:type method_name: string
:param descriptor: the descriptor of the method
:type descriptor: string
:rtype: None or a :class:`EncodedMethod` object
"""
key = class_name + method_name + descriptor
if self.__cache_methods is None:
self.__cache_methods = {}
for i in self.get_classes():
for j in i.get_methods():
self.__cache_methods[j.get_class_name() + j.get_name() +
j.get_descriptor()] = j
return self.__cache_methods.get(key) |
def _generate_examples(self, file_id, extracted_dirs):
"""Yields examples."""
filedir = os.path.join(extracted_dirs["img_align_celeba"],
"img_align_celeba")
img_list_path = extracted_dirs["list_eval_partition"]
landmarks_path = extracted_dirs["landmarks_celeba"]
attr_path = extracted_dirs["list_attr_celeba"]
with tf.io.gfile.GFile(img_list_path) as f:
files = [
line.split()[0]
for line in f.readlines()
if int(line.split()[1]) == file_id
]
attributes = self._process_celeba_config_file(attr_path)
landmarks = self._process_celeba_config_file(landmarks_path)
for file_name in sorted(files):
path = os.path.join(filedir, file_name)
yield {
"image": path,
"landmarks": {
k: v for k, v in zip(landmarks[0], landmarks[1][file_name])
},
"attributes": {
# atributes value are either 1 or -1, so convert to bool
k: v > 0 for k, v in zip(attributes[0], attributes[1][file_name])
},
} | Yields examples. | Below is the the instruction that describes the task:
### Input:
Yields examples.
### Response:
def _generate_examples(self, file_id, extracted_dirs):
"""Yields examples."""
filedir = os.path.join(extracted_dirs["img_align_celeba"],
"img_align_celeba")
img_list_path = extracted_dirs["list_eval_partition"]
landmarks_path = extracted_dirs["landmarks_celeba"]
attr_path = extracted_dirs["list_attr_celeba"]
with tf.io.gfile.GFile(img_list_path) as f:
files = [
line.split()[0]
for line in f.readlines()
if int(line.split()[1]) == file_id
]
attributes = self._process_celeba_config_file(attr_path)
landmarks = self._process_celeba_config_file(landmarks_path)
for file_name in sorted(files):
path = os.path.join(filedir, file_name)
yield {
"image": path,
"landmarks": {
k: v for k, v in zip(landmarks[0], landmarks[1][file_name])
},
"attributes": {
# atributes value are either 1 or -1, so convert to bool
k: v > 0 for k, v in zip(attributes[0], attributes[1][file_name])
},
} |
def _is_ndb(self):
"""Determine whether the model of the instance is an NDB model.
Returns:
Boolean indicating whether or not the model is an NDB or DB model.
"""
# issubclass will fail if one of the arguments is not a class, only
# need worry about new-style classes since ndb and db models are
# new-style
if isinstance(self._model, type):
if _NDB_MODEL is not None and issubclass(self._model, _NDB_MODEL):
return True
elif issubclass(self._model, db.Model):
return False
raise TypeError(
'Model class not an NDB or DB model: {0}.'.format(self._model)) | Determine whether the model of the instance is an NDB model.
Returns:
Boolean indicating whether or not the model is an NDB or DB model. | Below is the the instruction that describes the task:
### Input:
Determine whether the model of the instance is an NDB model.
Returns:
Boolean indicating whether or not the model is an NDB or DB model.
### Response:
def _is_ndb(self):
"""Determine whether the model of the instance is an NDB model.
Returns:
Boolean indicating whether or not the model is an NDB or DB model.
"""
# issubclass will fail if one of the arguments is not a class, only
# need worry about new-style classes since ndb and db models are
# new-style
if isinstance(self._model, type):
if _NDB_MODEL is not None and issubclass(self._model, _NDB_MODEL):
return True
elif issubclass(self._model, db.Model):
return False
raise TypeError(
'Model class not an NDB or DB model: {0}.'.format(self._model)) |
def settings(self, key=None, section=None):
"""The settings overridden from the wandb/settings file.
Args:
key (str, optional): If provided only this setting is returned
section (str, optional): If provided this section of the setting file is
used, defaults to "default"
Returns:
A dict with the current settings
{
"entity": "models",
"base_url": "https://api.wandb.ai",
"project": None
}
"""
if not self._settings:
self._settings = self.default_settings.copy()
section = section or self._settings['section']
try:
if section in self.settings_parser.sections():
for option in self.settings_parser.options(section):
self._settings[option] = self.settings_parser.get(
section, option)
except configparser.InterpolationSyntaxError:
print("WARNING: Unable to parse settings file")
self._settings["project"] = env.get_project(
self._settings.get("project"))
self._settings["entity"] = env.get_entity(
self._settings.get("entity"))
self._settings["base_url"] = env.get_base_url(
self._settings.get("base_url"))
self._settings["ignore_globs"] = env.get_ignore(
self._settings.get("ignore_globs")
)
return self._settings if key is None else self._settings[key] | The settings overridden from the wandb/settings file.
Args:
key (str, optional): If provided only this setting is returned
section (str, optional): If provided this section of the setting file is
used, defaults to "default"
Returns:
A dict with the current settings
{
"entity": "models",
"base_url": "https://api.wandb.ai",
"project": None
} | Below is the the instruction that describes the task:
### Input:
The settings overridden from the wandb/settings file.
Args:
key (str, optional): If provided only this setting is returned
section (str, optional): If provided this section of the setting file is
used, defaults to "default"
Returns:
A dict with the current settings
{
"entity": "models",
"base_url": "https://api.wandb.ai",
"project": None
}
### Response:
def settings(self, key=None, section=None):
"""The settings overridden from the wandb/settings file.
Args:
key (str, optional): If provided only this setting is returned
section (str, optional): If provided this section of the setting file is
used, defaults to "default"
Returns:
A dict with the current settings
{
"entity": "models",
"base_url": "https://api.wandb.ai",
"project": None
}
"""
if not self._settings:
self._settings = self.default_settings.copy()
section = section or self._settings['section']
try:
if section in self.settings_parser.sections():
for option in self.settings_parser.options(section):
self._settings[option] = self.settings_parser.get(
section, option)
except configparser.InterpolationSyntaxError:
print("WARNING: Unable to parse settings file")
self._settings["project"] = env.get_project(
self._settings.get("project"))
self._settings["entity"] = env.get_entity(
self._settings.get("entity"))
self._settings["base_url"] = env.get_base_url(
self._settings.get("base_url"))
self._settings["ignore_globs"] = env.get_ignore(
self._settings.get("ignore_globs")
)
return self._settings if key is None else self._settings[key] |
def _run_lint_on_file_stamped_args(file_path, # suppress(too-many-arguments)
stamp_file_path,
log_technical_terms_to,
linter_functions,
tool_options,
fix_what_you_can):
"""Return tuple of args and kwargs that function would be called with."""
dictionary_path = os.path.abspath("DICTIONARY")
dependencies = [file_path]
if os.path.exists(dictionary_path):
dependencies.append(dictionary_path)
kwargs = OrderedDict()
kwargs["jobstamps_dependencies"] = dependencies
kwargs["jobstamps_cache_output_directory"] = stamp_file_path
if log_technical_terms_to:
kwargs["jobstamps_output_files"] = [log_technical_terms_to]
return ((file_path,
linter_functions,
tool_options,
fix_what_you_can),
kwargs) | Return tuple of args and kwargs that function would be called with. | Below is the the instruction that describes the task:
### Input:
Return tuple of args and kwargs that function would be called with.
### Response:
def _run_lint_on_file_stamped_args(file_path, # suppress(too-many-arguments)
stamp_file_path,
log_technical_terms_to,
linter_functions,
tool_options,
fix_what_you_can):
"""Return tuple of args and kwargs that function would be called with."""
dictionary_path = os.path.abspath("DICTIONARY")
dependencies = [file_path]
if os.path.exists(dictionary_path):
dependencies.append(dictionary_path)
kwargs = OrderedDict()
kwargs["jobstamps_dependencies"] = dependencies
kwargs["jobstamps_cache_output_directory"] = stamp_file_path
if log_technical_terms_to:
kwargs["jobstamps_output_files"] = [log_technical_terms_to]
return ((file_path,
linter_functions,
tool_options,
fix_what_you_can),
kwargs) |
def make_energy_bounds_hdu(self, extname="EBOUNDS"):
""" Builds and returns a FITs HDU with the energy bin boundries
extname : The HDU extension name
"""
if self._ebins is None:
return None
cols = [fits.Column("CHANNEL", "I", array=np.arange(1, len(self._ebins + 1))),
fits.Column("E_MIN", "1E", unit='keV',
array=1000 * self._ebins[0:-1]),
fits.Column("E_MAX", "1E", unit='keV', array=1000 * self._ebins[1:])]
hdu = fits.BinTableHDU.from_columns(
cols, self.make_header(), name=extname)
return hdu | Builds and returns a FITs HDU with the energy bin boundries
extname : The HDU extension name | Below is the the instruction that describes the task:
### Input:
Builds and returns a FITs HDU with the energy bin boundries
extname : The HDU extension name
### Response:
def make_energy_bounds_hdu(self, extname="EBOUNDS"):
""" Builds and returns a FITs HDU with the energy bin boundries
extname : The HDU extension name
"""
if self._ebins is None:
return None
cols = [fits.Column("CHANNEL", "I", array=np.arange(1, len(self._ebins + 1))),
fits.Column("E_MIN", "1E", unit='keV',
array=1000 * self._ebins[0:-1]),
fits.Column("E_MAX", "1E", unit='keV', array=1000 * self._ebins[1:])]
hdu = fits.BinTableHDU.from_columns(
cols, self.make_header(), name=extname)
return hdu |
def to_bytes(self):
"""Convert the entire image to bytes.
:rtype: bytes
"""
# grab the chunks we needs
out = [PNG_SIGN]
# FIXME: it's tricky to define "other_chunks". HoneyView stop the
# animation if it sees chunks other than fctl or idat, so we put other
# chunks to the end of the file
other_chunks = []
seq = 0
# for first frame
png, control = self.frames[0]
# header
out.append(png.hdr)
# acTL
out.append(make_chunk("acTL", struct.pack("!II", len(self.frames), self.num_plays)))
# fcTL
if control:
out.append(make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes()))
seq += 1
# and others...
idat_chunks = []
for type_, data in png.chunks:
if type_ in ("IHDR", "IEND"):
continue
if type_ == "IDAT":
# put at last
idat_chunks.append(data)
continue
out.append(data)
out.extend(idat_chunks)
# FIXME: we should do some optimization to frames...
# for other frames
for png, control in self.frames[1:]:
# fcTL
out.append(
make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes())
)
seq += 1
# and others...
for type_, data in png.chunks:
if type_ in ("IHDR", "IEND") or type_ in CHUNK_BEFORE_IDAT:
continue
elif type_ == "IDAT":
# convert IDAT to fdAT
out.append(
make_chunk("fdAT", struct.pack("!I", seq) + data[8:-4])
)
seq += 1
else:
other_chunks.append(data)
# end
out.extend(other_chunks)
out.append(png.end)
return b"".join(out) | Convert the entire image to bytes.
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Convert the entire image to bytes.
:rtype: bytes
### Response:
def to_bytes(self):
"""Convert the entire image to bytes.
:rtype: bytes
"""
# grab the chunks we needs
out = [PNG_SIGN]
# FIXME: it's tricky to define "other_chunks". HoneyView stop the
# animation if it sees chunks other than fctl or idat, so we put other
# chunks to the end of the file
other_chunks = []
seq = 0
# for first frame
png, control = self.frames[0]
# header
out.append(png.hdr)
# acTL
out.append(make_chunk("acTL", struct.pack("!II", len(self.frames), self.num_plays)))
# fcTL
if control:
out.append(make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes()))
seq += 1
# and others...
idat_chunks = []
for type_, data in png.chunks:
if type_ in ("IHDR", "IEND"):
continue
if type_ == "IDAT":
# put at last
idat_chunks.append(data)
continue
out.append(data)
out.extend(idat_chunks)
# FIXME: we should do some optimization to frames...
# for other frames
for png, control in self.frames[1:]:
# fcTL
out.append(
make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes())
)
seq += 1
# and others...
for type_, data in png.chunks:
if type_ in ("IHDR", "IEND") or type_ in CHUNK_BEFORE_IDAT:
continue
elif type_ == "IDAT":
# convert IDAT to fdAT
out.append(
make_chunk("fdAT", struct.pack("!I", seq) + data[8:-4])
)
seq += 1
else:
other_chunks.append(data)
# end
out.extend(other_chunks)
out.append(png.end)
return b"".join(out) |
def close(self):
"""
Close internal connection to AMQP if connected.
"""
if self.connection:
logging.info("Closing connection to {}.".format(self.host))
self.connection.close()
self.connection = None | Close internal connection to AMQP if connected. | Below is the the instruction that describes the task:
### Input:
Close internal connection to AMQP if connected.
### Response:
def close(self):
"""
Close internal connection to AMQP if connected.
"""
if self.connection:
logging.info("Closing connection to {}.".format(self.host))
self.connection.close()
self.connection = None |
def _convert_html_to_csv(
self):
"""
*contert html to csv*
**Key Arguments:**
# -
**Return:**
- None
.. todo::
- @review: when complete, clean _convert_html_to_csv method
- @review: when complete add logging
"""
self.log.info('starting the ``_convert_html_to_csv`` method')
import codecs
allData = ""
regex1 = re.compile(
r'.*<PRE><strong> (.*?)</strong>(.*?)</PRE></TABLE>.*', re.I | re.S)
regex2 = re.compile(r'\|(\w)\|', re.I | re.S)
for thisFile in self.nedResults:
pathToReadFile = thisFile
try:
self.log.debug("attempting to open the file %s" %
(pathToReadFile,))
readFile = codecs.open(
pathToReadFile, encoding='utf-8', mode='r')
thisData = readFile.read()
readFile.close()
except IOError, e:
message = 'could not open the file %s' % (pathToReadFile,)
self.log.critical(message)
raise IOError(message)
except:
if pathToReadFile == None:
message = 'we have no file to open'
self.log.error(message)
continue
readFile.close()
self.log.debug("regex 1 - sub")
thisData = regex1.sub("\g<1>\g<2>", thisData)
self.log.debug("regex 2 - sub")
thisData = regex2.sub("abs(\g<1>)", thisData)
self.log.debug("replace text")
thisData = thisData.replace("|b|", "abs(b)")
writeFile = codecs.open(pathToReadFile, encoding='utf-8', mode='w')
writeFile.write(thisData)
writeFile.close()
self.log.info('completed the ``_convert_html_to_csv`` method')
return None | *contert html to csv*
**Key Arguments:**
# -
**Return:**
- None
.. todo::
- @review: when complete, clean _convert_html_to_csv method
- @review: when complete add logging | Below is the the instruction that describes the task:
### Input:
*contert html to csv*
**Key Arguments:**
# -
**Return:**
- None
.. todo::
- @review: when complete, clean _convert_html_to_csv method
- @review: when complete add logging
### Response:
def _convert_html_to_csv(
self):
"""
*contert html to csv*
**Key Arguments:**
# -
**Return:**
- None
.. todo::
- @review: when complete, clean _convert_html_to_csv method
- @review: when complete add logging
"""
self.log.info('starting the ``_convert_html_to_csv`` method')
import codecs
allData = ""
regex1 = re.compile(
r'.*<PRE><strong> (.*?)</strong>(.*?)</PRE></TABLE>.*', re.I | re.S)
regex2 = re.compile(r'\|(\w)\|', re.I | re.S)
for thisFile in self.nedResults:
pathToReadFile = thisFile
try:
self.log.debug("attempting to open the file %s" %
(pathToReadFile,))
readFile = codecs.open(
pathToReadFile, encoding='utf-8', mode='r')
thisData = readFile.read()
readFile.close()
except IOError, e:
message = 'could not open the file %s' % (pathToReadFile,)
self.log.critical(message)
raise IOError(message)
except:
if pathToReadFile == None:
message = 'we have no file to open'
self.log.error(message)
continue
readFile.close()
self.log.debug("regex 1 - sub")
thisData = regex1.sub("\g<1>\g<2>", thisData)
self.log.debug("regex 2 - sub")
thisData = regex2.sub("abs(\g<1>)", thisData)
self.log.debug("replace text")
thisData = thisData.replace("|b|", "abs(b)")
writeFile = codecs.open(pathToReadFile, encoding='utf-8', mode='w')
writeFile.write(thisData)
writeFile.close()
self.log.info('completed the ``_convert_html_to_csv`` method')
return None |
def get_gz_cn(offset: int) -> str:
"""Get n-th(0-based) GanZhi
"""
return TextUtils.STEMS[offset % 10] + TextUtils.BRANCHES[offset % 12] | Get n-th(0-based) GanZhi | Below is the the instruction that describes the task:
### Input:
Get n-th(0-based) GanZhi
### Response:
def get_gz_cn(offset: int) -> str:
"""Get n-th(0-based) GanZhi
"""
return TextUtils.STEMS[offset % 10] + TextUtils.BRANCHES[offset % 12] |
def unpack_ext(ext_path):
'''
Unpack the external modules.
'''
modcache = os.path.join(
OPTIONS.saltdir,
'running_data',
'var',
'cache',
'salt',
'minion',
'extmods')
tfile = tarfile.TarFile.gzopen(ext_path)
old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
tfile.extractall(path=modcache)
tfile.close()
os.umask(old_umask) # pylint: disable=blacklisted-function
os.unlink(ext_path)
ver_path = os.path.join(modcache, 'ext_version')
ver_dst = os.path.join(OPTIONS.saltdir, 'ext_version')
shutil.move(ver_path, ver_dst) | Unpack the external modules. | Below is the the instruction that describes the task:
### Input:
Unpack the external modules.
### Response:
def unpack_ext(ext_path):
'''
Unpack the external modules.
'''
modcache = os.path.join(
OPTIONS.saltdir,
'running_data',
'var',
'cache',
'salt',
'minion',
'extmods')
tfile = tarfile.TarFile.gzopen(ext_path)
old_umask = os.umask(0o077) # pylint: disable=blacklisted-function
tfile.extractall(path=modcache)
tfile.close()
os.umask(old_umask) # pylint: disable=blacklisted-function
os.unlink(ext_path)
ver_path = os.path.join(modcache, 'ext_version')
ver_dst = os.path.join(OPTIONS.saltdir, 'ext_version')
shutil.move(ver_path, ver_dst) |
def damping_maintain_sign(x, step, damping=1.0, factor=0.5):
'''Famping function which will maintain the sign of the variable being
manipulated. If the step puts it at the other sign, the distance between
`x` and `step` will be shortened by the multiple of `factor`; i.e. if
factor is `x`, the new value of `x` will be 0 exactly.
The provided `damping` is applied as well.
Parameters
----------
x : float
Previous value in iteration, [-]
step : float
Change in `x`, [-]
damping : float, optional
The damping factor to be applied always, [-]
factor : float, optional
If the calculated step changes sign, this factor will be used instead
of the step, [-]
Returns
-------
x_new : float
The new value in the iteration, [-]
Notes
-----
Examples
--------
>>> damping_maintain_sign(100, -200, factor=.5)
50.0
'''
positive = x > 0.0
step_x = x + step
if (positive and step_x < 0) or (not positive and step_x > 0.0):
# print('damping')
step = -factor*x
return x + step*damping | Famping function which will maintain the sign of the variable being
manipulated. If the step puts it at the other sign, the distance between
`x` and `step` will be shortened by the multiple of `factor`; i.e. if
factor is `x`, the new value of `x` will be 0 exactly.
The provided `damping` is applied as well.
Parameters
----------
x : float
Previous value in iteration, [-]
step : float
Change in `x`, [-]
damping : float, optional
The damping factor to be applied always, [-]
factor : float, optional
If the calculated step changes sign, this factor will be used instead
of the step, [-]
Returns
-------
x_new : float
The new value in the iteration, [-]
Notes
-----
Examples
--------
>>> damping_maintain_sign(100, -200, factor=.5)
50.0 | Below is the the instruction that describes the task:
### Input:
Famping function which will maintain the sign of the variable being
manipulated. If the step puts it at the other sign, the distance between
`x` and `step` will be shortened by the multiple of `factor`; i.e. if
factor is `x`, the new value of `x` will be 0 exactly.
The provided `damping` is applied as well.
Parameters
----------
x : float
Previous value in iteration, [-]
step : float
Change in `x`, [-]
damping : float, optional
The damping factor to be applied always, [-]
factor : float, optional
If the calculated step changes sign, this factor will be used instead
of the step, [-]
Returns
-------
x_new : float
The new value in the iteration, [-]
Notes
-----
Examples
--------
>>> damping_maintain_sign(100, -200, factor=.5)
50.0
### Response:
def damping_maintain_sign(x, step, damping=1.0, factor=0.5):
'''Famping function which will maintain the sign of the variable being
manipulated. If the step puts it at the other sign, the distance between
`x` and `step` will be shortened by the multiple of `factor`; i.e. if
factor is `x`, the new value of `x` will be 0 exactly.
The provided `damping` is applied as well.
Parameters
----------
x : float
Previous value in iteration, [-]
step : float
Change in `x`, [-]
damping : float, optional
The damping factor to be applied always, [-]
factor : float, optional
If the calculated step changes sign, this factor will be used instead
of the step, [-]
Returns
-------
x_new : float
The new value in the iteration, [-]
Notes
-----
Examples
--------
>>> damping_maintain_sign(100, -200, factor=.5)
50.0
'''
positive = x > 0.0
step_x = x + step
if (positive and step_x < 0) or (not positive and step_x > 0.0):
# print('damping')
step = -factor*x
return x + step*damping |
def _decode_temp(byte_1, byte_2):
"""Decode a signed short temperature as two bytes to a single number."""
temp = (byte_1 << 8) + byte_2
if (temp > 32767):
temp = temp - 65536
temp = temp / 10
return temp | Decode a signed short temperature as two bytes to a single number. | Below is the the instruction that describes the task:
### Input:
Decode a signed short temperature as two bytes to a single number.
### Response:
def _decode_temp(byte_1, byte_2):
"""Decode a signed short temperature as two bytes to a single number."""
temp = (byte_1 << 8) + byte_2
if (temp > 32767):
temp = temp - 65536
temp = temp / 10
return temp |
def encode(self):
'''
Encode and store an UNSUBCRIBE control packet
@raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes
'''
header = bytearray(1)
payload = bytearray()
varHeader = encode16Int(self.msgId)
header[0] = 0xA2 # packet with QoS=1
for topic in self.topics:
payload.extend(encodeString(topic)) # topic name
header.extend(encodeLength(len(varHeader) + len(payload)))
header.extend(varHeader)
header.extend(payload)
self.encoded = header
return str(header) if PY2 else bytes(header) | Encode and store an UNSUBCRIBE control packet
@raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes | Below is the the instruction that describes the task:
### Input:
Encode and store an UNSUBCRIBE control packet
@raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes
### Response:
def encode(self):
'''
Encode and store an UNSUBCRIBE control packet
@raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes
'''
header = bytearray(1)
payload = bytearray()
varHeader = encode16Int(self.msgId)
header[0] = 0xA2 # packet with QoS=1
for topic in self.topics:
payload.extend(encodeString(topic)) # topic name
header.extend(encodeLength(len(varHeader) + len(payload)))
header.extend(varHeader)
header.extend(payload)
self.encoded = header
return str(header) if PY2 else bytes(header) |
def replies(self, tweet, recursive=False, prune=()):
"""
replies returns a generator of tweets that are replies for a given
tweet. It includes the original tweet. If you would like to fetch the
replies to the replies use recursive=True which will do a depth-first
recursive walk of the replies. It also walk up the reply chain if you
supply a tweet that is itself a reply to another tweet. You can
optionally supply a tuple of tweet ids to ignore during this traversal
using the prune parameter.
"""
yield tweet
# get replies to the tweet
screen_name = tweet['user']['screen_name']
tweet_id = tweet['id_str']
log.info("looking for replies to: %s", tweet_id)
for reply in self.search("to:%s" % screen_name, since_id=tweet_id):
if reply['in_reply_to_status_id_str'] != tweet_id:
continue
if reply['id_str'] in prune:
log.info("ignoring pruned tweet id %s", reply['id_str'])
continue
log.info("found reply: %s", reply["id_str"])
if recursive:
if reply['id_str'] not in prune:
prune = prune + (tweet_id,)
for r in self.replies(reply, recursive, prune):
yield r
else:
yield reply
# if this tweet is itself a reply to another tweet get it and
# get other potential replies to it
reply_to_id = tweet.get('in_reply_to_status_id_str')
log.info("prune=%s", prune)
if recursive and reply_to_id and reply_to_id not in prune:
t = self.tweet(reply_to_id)
if t:
log.info("found reply-to: %s", t['id_str'])
prune = prune + (tweet['id_str'],)
for r in self.replies(t, recursive=True, prune=prune):
yield r
# if this tweet is a quote go get that too whatever tweets it
# may be in reply to
quote_id = tweet.get('quotes_status_id_str')
if recursive and quote_id and quote_id not in prune:
t = self.tweet(quote_id)
if t:
log.info("found quote: %s", t['id_str'])
prune = prune + (tweet['id_str'],)
for r in self.replies(t, recursive=True, prune=prune):
yield r | replies returns a generator of tweets that are replies for a given
tweet. It includes the original tweet. If you would like to fetch the
replies to the replies use recursive=True which will do a depth-first
recursive walk of the replies. It also walk up the reply chain if you
supply a tweet that is itself a reply to another tweet. You can
optionally supply a tuple of tweet ids to ignore during this traversal
using the prune parameter. | Below is the the instruction that describes the task:
### Input:
replies returns a generator of tweets that are replies for a given
tweet. It includes the original tweet. If you would like to fetch the
replies to the replies use recursive=True which will do a depth-first
recursive walk of the replies. It also walk up the reply chain if you
supply a tweet that is itself a reply to another tweet. You can
optionally supply a tuple of tweet ids to ignore during this traversal
using the prune parameter.
### Response:
def replies(self, tweet, recursive=False, prune=()):
"""
replies returns a generator of tweets that are replies for a given
tweet. It includes the original tweet. If you would like to fetch the
replies to the replies use recursive=True which will do a depth-first
recursive walk of the replies. It also walk up the reply chain if you
supply a tweet that is itself a reply to another tweet. You can
optionally supply a tuple of tweet ids to ignore during this traversal
using the prune parameter.
"""
yield tweet
# get replies to the tweet
screen_name = tweet['user']['screen_name']
tweet_id = tweet['id_str']
log.info("looking for replies to: %s", tweet_id)
for reply in self.search("to:%s" % screen_name, since_id=tweet_id):
if reply['in_reply_to_status_id_str'] != tweet_id:
continue
if reply['id_str'] in prune:
log.info("ignoring pruned tweet id %s", reply['id_str'])
continue
log.info("found reply: %s", reply["id_str"])
if recursive:
if reply['id_str'] not in prune:
prune = prune + (tweet_id,)
for r in self.replies(reply, recursive, prune):
yield r
else:
yield reply
# if this tweet is itself a reply to another tweet get it and
# get other potential replies to it
reply_to_id = tweet.get('in_reply_to_status_id_str')
log.info("prune=%s", prune)
if recursive and reply_to_id and reply_to_id not in prune:
t = self.tweet(reply_to_id)
if t:
log.info("found reply-to: %s", t['id_str'])
prune = prune + (tweet['id_str'],)
for r in self.replies(t, recursive=True, prune=prune):
yield r
# if this tweet is a quote go get that too whatever tweets it
# may be in reply to
quote_id = tweet.get('quotes_status_id_str')
if recursive and quote_id and quote_id not in prune:
t = self.tweet(quote_id)
if t:
log.info("found quote: %s", t['id_str'])
prune = prune + (tweet['id_str'],)
for r in self.replies(t, recursive=True, prune=prune):
yield r |
def dictlist_convert_to_string(dict_list: Iterable[Dict], key: str) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a string form, ``str(d[key])``. If the result is a
blank string, convert it to ``None``.
"""
for d in dict_list:
d[key] = str(d[key])
if d[key] == "":
d[key] = None | Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a string form, ``str(d[key])``. If the result is a
blank string, convert it to ``None``. | Below is the the instruction that describes the task:
### Input:
Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a string form, ``str(d[key])``. If the result is a
blank string, convert it to ``None``.
### Response:
def dictlist_convert_to_string(dict_list: Iterable[Dict], key: str) -> None:
"""
Process an iterable of dictionaries. For each dictionary ``d``, convert
(in place) ``d[key]`` to a string form, ``str(d[key])``. If the result is a
blank string, convert it to ``None``.
"""
for d in dict_list:
d[key] = str(d[key])
if d[key] == "":
d[key] = None |
def pyfs_storage_factory(fileinstance=None, default_location=None,
default_storage_class=None,
filestorage_class=PyFSFileStorage, fileurl=None,
size=None, modified=None, clean_dir=True):
"""Get factory function for creating a PyFS file storage instance."""
# Either the FileInstance needs to be specified or all filestorage
# class parameters need to be specified
assert fileinstance or (fileurl and size)
if fileinstance:
# FIXME: Code here should be refactored since it assumes a lot on the
# directory structure where the file instances are written
fileurl = None
size = fileinstance.size
modified = fileinstance.updated
if fileinstance.uri:
# Use already existing URL.
fileurl = fileinstance.uri
else:
assert default_location
# Generate a new URL.
fileurl = make_path(
default_location,
str(fileinstance.id),
'data',
current_app.config['FILES_REST_STORAGE_PATH_DIMENSIONS'],
current_app.config['FILES_REST_STORAGE_PATH_SPLIT_LENGTH'],
)
return filestorage_class(
fileurl, size=size, modified=modified, clean_dir=clean_dir) | Get factory function for creating a PyFS file storage instance. | Below is the the instruction that describes the task:
### Input:
Get factory function for creating a PyFS file storage instance.
### Response:
def pyfs_storage_factory(fileinstance=None, default_location=None,
default_storage_class=None,
filestorage_class=PyFSFileStorage, fileurl=None,
size=None, modified=None, clean_dir=True):
"""Get factory function for creating a PyFS file storage instance."""
# Either the FileInstance needs to be specified or all filestorage
# class parameters need to be specified
assert fileinstance or (fileurl and size)
if fileinstance:
# FIXME: Code here should be refactored since it assumes a lot on the
# directory structure where the file instances are written
fileurl = None
size = fileinstance.size
modified = fileinstance.updated
if fileinstance.uri:
# Use already existing URL.
fileurl = fileinstance.uri
else:
assert default_location
# Generate a new URL.
fileurl = make_path(
default_location,
str(fileinstance.id),
'data',
current_app.config['FILES_REST_STORAGE_PATH_DIMENSIONS'],
current_app.config['FILES_REST_STORAGE_PATH_SPLIT_LENGTH'],
)
return filestorage_class(
fileurl, size=size, modified=modified, clean_dir=clean_dir) |
def compare_to_rm(data):
"""Compare final variant calls against reference materials of known calls.
"""
if isinstance(data, (list, tuple)) and cwlutils.is_cwl_run(utils.to_single_data(data[0])):
data = _normalize_cwl_inputs(data)
toval_data = _get_validate(data)
toval_data = cwlutils.unpack_tarballs(toval_data, toval_data)
if toval_data:
caller = _get_caller(toval_data)
sample = dd.get_sample_name(toval_data)
base_dir = utils.safe_makedir(os.path.join(toval_data["dirs"]["work"], "validate", sample, caller))
if isinstance(toval_data["vrn_file"], (list, tuple)):
raise NotImplementedError("Multiple input files for validation: %s" % toval_data["vrn_file"])
else:
vrn_file = os.path.abspath(toval_data["vrn_file"])
rm_file = normalize_input_path(toval_data["config"]["algorithm"]["validate"], toval_data)
rm_interval_file = _gunzip(normalize_input_path(toval_data["config"]["algorithm"].get("validate_regions"),
toval_data),
toval_data)
rm_interval_file = bedutils.clean_file(rm_interval_file, toval_data, prefix="validateregions-",
bedprep_dir=utils.safe_makedir(os.path.join(base_dir, "bedprep")))
rm_file = naming.handle_synonyms(rm_file, dd.get_ref_file(toval_data), data.get("genome_build"),
base_dir, data)
rm_interval_file = (naming.handle_synonyms(rm_interval_file, dd.get_ref_file(toval_data),
data.get("genome_build"), base_dir, data)
if rm_interval_file else None)
vmethod = tz.get_in(["config", "algorithm", "validate_method"], data, "rtg")
# RTG can fail on totally empty files. Call everything in truth set as false negatives
if not vcfutils.vcf_has_variants(vrn_file):
eval_files = _setup_call_false(rm_file, rm_interval_file, base_dir, toval_data, "fn")
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
# empty validation file, every call is a false positive
elif not vcfutils.vcf_has_variants(rm_file):
eval_files = _setup_call_fps(vrn_file, rm_interval_file, base_dir, toval_data, "fp")
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
elif vmethod in ["rtg", "rtg-squash-ploidy"]:
eval_files = _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data, vmethod)
eval_files = _annotate_validations(eval_files, toval_data)
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
elif vmethod == "hap.py":
data["validate"] = _run_happy_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data)
elif vmethod == "bcbio.variation":
data["validate"] = _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir,
sample, caller, toval_data)
return [[data]] | Compare final variant calls against reference materials of known calls. | Below is the the instruction that describes the task:
### Input:
Compare final variant calls against reference materials of known calls.
### Response:
def compare_to_rm(data):
"""Compare final variant calls against reference materials of known calls.
"""
if isinstance(data, (list, tuple)) and cwlutils.is_cwl_run(utils.to_single_data(data[0])):
data = _normalize_cwl_inputs(data)
toval_data = _get_validate(data)
toval_data = cwlutils.unpack_tarballs(toval_data, toval_data)
if toval_data:
caller = _get_caller(toval_data)
sample = dd.get_sample_name(toval_data)
base_dir = utils.safe_makedir(os.path.join(toval_data["dirs"]["work"], "validate", sample, caller))
if isinstance(toval_data["vrn_file"], (list, tuple)):
raise NotImplementedError("Multiple input files for validation: %s" % toval_data["vrn_file"])
else:
vrn_file = os.path.abspath(toval_data["vrn_file"])
rm_file = normalize_input_path(toval_data["config"]["algorithm"]["validate"], toval_data)
rm_interval_file = _gunzip(normalize_input_path(toval_data["config"]["algorithm"].get("validate_regions"),
toval_data),
toval_data)
rm_interval_file = bedutils.clean_file(rm_interval_file, toval_data, prefix="validateregions-",
bedprep_dir=utils.safe_makedir(os.path.join(base_dir, "bedprep")))
rm_file = naming.handle_synonyms(rm_file, dd.get_ref_file(toval_data), data.get("genome_build"),
base_dir, data)
rm_interval_file = (naming.handle_synonyms(rm_interval_file, dd.get_ref_file(toval_data),
data.get("genome_build"), base_dir, data)
if rm_interval_file else None)
vmethod = tz.get_in(["config", "algorithm", "validate_method"], data, "rtg")
# RTG can fail on totally empty files. Call everything in truth set as false negatives
if not vcfutils.vcf_has_variants(vrn_file):
eval_files = _setup_call_false(rm_file, rm_interval_file, base_dir, toval_data, "fn")
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
# empty validation file, every call is a false positive
elif not vcfutils.vcf_has_variants(rm_file):
eval_files = _setup_call_fps(vrn_file, rm_interval_file, base_dir, toval_data, "fp")
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
elif vmethod in ["rtg", "rtg-squash-ploidy"]:
eval_files = _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data, vmethod)
eval_files = _annotate_validations(eval_files, toval_data)
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
elif vmethod == "hap.py":
data["validate"] = _run_happy_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data)
elif vmethod == "bcbio.variation":
data["validate"] = _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir,
sample, caller, toval_data)
return [[data]] |
def transform_txn_for_ledger(txn):
"""
Some transactions need to be transformed before they can be stored in the
ledger, eg. storing certain payload in another data store and only its
hash in the ledger
"""
if get_type(txn) == ATTRIB:
txn = DomainReqHandler.transform_attrib_for_ledger(txn)
return txn | Some transactions need to be transformed before they can be stored in the
ledger, eg. storing certain payload in another data store and only its
hash in the ledger | Below is the the instruction that describes the task:
### Input:
Some transactions need to be transformed before they can be stored in the
ledger, eg. storing certain payload in another data store and only its
hash in the ledger
### Response:
def transform_txn_for_ledger(txn):
"""
Some transactions need to be transformed before they can be stored in the
ledger, eg. storing certain payload in another data store and only its
hash in the ledger
"""
if get_type(txn) == ATTRIB:
txn = DomainReqHandler.transform_attrib_for_ledger(txn)
return txn |
def run_mhc_gene_assessment(job, rsem_files, rna_haplotype, univ_options, reports_options):
"""
A wrapper for assess_mhc_genes.
:param dict rsem_files: Results from running rsem
:param str rna_haplotype: The job store id for the rna haplotype file
:param dict univ_options: Dict of universal options used by almost all tools
:param dict reports_options: Options specific to reporting modules
:return: The results of running assess_mhc_genes
:rtype: toil.fileStore.FileID
"""
return job.addChildJobFn(assess_mhc_genes, rsem_files['rsem.genes.results'], rna_haplotype,
univ_options, reports_options).rv() | A wrapper for assess_mhc_genes.
:param dict rsem_files: Results from running rsem
:param str rna_haplotype: The job store id for the rna haplotype file
:param dict univ_options: Dict of universal options used by almost all tools
:param dict reports_options: Options specific to reporting modules
:return: The results of running assess_mhc_genes
:rtype: toil.fileStore.FileID | Below is the the instruction that describes the task:
### Input:
A wrapper for assess_mhc_genes.
:param dict rsem_files: Results from running rsem
:param str rna_haplotype: The job store id for the rna haplotype file
:param dict univ_options: Dict of universal options used by almost all tools
:param dict reports_options: Options specific to reporting modules
:return: The results of running assess_mhc_genes
:rtype: toil.fileStore.FileID
### Response:
def run_mhc_gene_assessment(job, rsem_files, rna_haplotype, univ_options, reports_options):
"""
A wrapper for assess_mhc_genes.
:param dict rsem_files: Results from running rsem
:param str rna_haplotype: The job store id for the rna haplotype file
:param dict univ_options: Dict of universal options used by almost all tools
:param dict reports_options: Options specific to reporting modules
:return: The results of running assess_mhc_genes
:rtype: toil.fileStore.FileID
"""
return job.addChildJobFn(assess_mhc_genes, rsem_files['rsem.genes.results'], rna_haplotype,
univ_options, reports_options).rv() |
def __compress_attributes(self, dic):
"""
This will convert all attributes that are list with only one item string into simple string. It seems that LDAP always return lists, even when it doesn
t make sense.
:param dic:
:return:
"""
result = {}
for k, v in dic.iteritems():
if isinstance(v, types.ListType) and len(v) == 1:
if k not in ('msExchMailboxSecurityDescriptor', 'msExchSafeSendersHash', 'msExchBlockedSendersHash',
'replicationSignature', 'msExchSafeRecipientsHash', 'sIDHistory',
'msRTCSIP-UserRoutingGroupId', 'mSMQDigests', 'mSMQSignCertificates',
'msExchMasterAccountSid', 'msExchPreviousAccountSid', 'msExchUMPinChecksum',
'userSMIMECertificate', 'userCertificate', 'userCert',
'msExchDisabledArchiveGUID', 'msExchUMPinChecksum', 'msExchUMSpokenName',
'objectSid', 'objectGUID', 'msExchArchiveGUID', 'thumbnailPhoto', 'msExchMailboxGuid'):
try:
result[k] = v[0].decode('utf-8')
except Exception as e:
logging. error("Failed to decode attribute: %s -- %s" % (k, e))
result[k] = v[0]
return result | This will convert all attributes that are list with only one item string into simple string. It seems that LDAP always return lists, even when it doesn
t make sense.
:param dic:
:return: | Below is the the instruction that describes the task:
### Input:
This will convert all attributes that are list with only one item string into simple string. It seems that LDAP always return lists, even when it doesn
t make sense.
:param dic:
:return:
### Response:
def __compress_attributes(self, dic):
"""
This will convert all attributes that are list with only one item string into simple string. It seems that LDAP always return lists, even when it doesn
t make sense.
:param dic:
:return:
"""
result = {}
for k, v in dic.iteritems():
if isinstance(v, types.ListType) and len(v) == 1:
if k not in ('msExchMailboxSecurityDescriptor', 'msExchSafeSendersHash', 'msExchBlockedSendersHash',
'replicationSignature', 'msExchSafeRecipientsHash', 'sIDHistory',
'msRTCSIP-UserRoutingGroupId', 'mSMQDigests', 'mSMQSignCertificates',
'msExchMasterAccountSid', 'msExchPreviousAccountSid', 'msExchUMPinChecksum',
'userSMIMECertificate', 'userCertificate', 'userCert',
'msExchDisabledArchiveGUID', 'msExchUMPinChecksum', 'msExchUMSpokenName',
'objectSid', 'objectGUID', 'msExchArchiveGUID', 'thumbnailPhoto', 'msExchMailboxGuid'):
try:
result[k] = v[0].decode('utf-8')
except Exception as e:
logging. error("Failed to decode attribute: %s -- %s" % (k, e))
result[k] = v[0]
return result |
def initStats(self, extras=None):
"""Query and parse Web Server Status Page.
@param extras: Include extra metrics, which can be computationally more
expensive.
"""
url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath)
response = util.get_url(url, self._user, self._password)
#with open('/tmp/opcinfo.json') as f:
# response = f.read()
self._statusDict = json.loads(response) | Query and parse Web Server Status Page.
@param extras: Include extra metrics, which can be computationally more
expensive. | Below is the the instruction that describes the task:
### Input:
Query and parse Web Server Status Page.
@param extras: Include extra metrics, which can be computationally more
expensive.
### Response:
def initStats(self, extras=None):
"""Query and parse Web Server Status Page.
@param extras: Include extra metrics, which can be computationally more
expensive.
"""
url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath)
response = util.get_url(url, self._user, self._password)
#with open('/tmp/opcinfo.json') as f:
# response = f.read()
self._statusDict = json.loads(response) |
def avgwave(self):
"""Calculate :ref:`pysynphot-formula-avgwv`.
Returns
-------
ans : float
Average wavelength.
"""
mywaveunits = self.waveunits.name
self.convert('angstroms')
wave = self.wave
thru = self.throughput
self.convert(mywaveunits)
num = self.trapezoidIntegration(wave, thru*wave)
den = self.trapezoidIntegration(wave, thru)
if 0.0 in (num, den):
return 0.0
else:
return num/den | Calculate :ref:`pysynphot-formula-avgwv`.
Returns
-------
ans : float
Average wavelength. | Below is the the instruction that describes the task:
### Input:
Calculate :ref:`pysynphot-formula-avgwv`.
Returns
-------
ans : float
Average wavelength.
### Response:
def avgwave(self):
"""Calculate :ref:`pysynphot-formula-avgwv`.
Returns
-------
ans : float
Average wavelength.
"""
mywaveunits = self.waveunits.name
self.convert('angstroms')
wave = self.wave
thru = self.throughput
self.convert(mywaveunits)
num = self.trapezoidIntegration(wave, thru*wave)
den = self.trapezoidIntegration(wave, thru)
if 0.0 in (num, den):
return 0.0
else:
return num/den |
def createSomeItems(store, itemType, values, counter):
"""
Create some instances of a particular type in a store.
"""
for i in counter:
itemType(store=store, **values) | Create some instances of a particular type in a store. | Below is the the instruction that describes the task:
### Input:
Create some instances of a particular type in a store.
### Response:
def createSomeItems(store, itemType, values, counter):
"""
Create some instances of a particular type in a store.
"""
for i in counter:
itemType(store=store, **values) |
def mkApplications(location, *atoms):
"""Make a sequence of applications from a list of tokens.
atoms is a list of atoms, which will be handled left-associatively. E.g:
['foo', [], []] == foo()() ==> Application(Application('foo', []), [])
"""
atoms = list(atoms)
while len(atoms) > 1:
atoms[0:2] = [Application(location, atoms[0], atoms[1])]
# Nothing left to apply
return atoms[0] | Make a sequence of applications from a list of tokens.
atoms is a list of atoms, which will be handled left-associatively. E.g:
['foo', [], []] == foo()() ==> Application(Application('foo', []), []) | Below is the the instruction that describes the task:
### Input:
Make a sequence of applications from a list of tokens.
atoms is a list of atoms, which will be handled left-associatively. E.g:
['foo', [], []] == foo()() ==> Application(Application('foo', []), [])
### Response:
def mkApplications(location, *atoms):
"""Make a sequence of applications from a list of tokens.
atoms is a list of atoms, which will be handled left-associatively. E.g:
['foo', [], []] == foo()() ==> Application(Application('foo', []), [])
"""
atoms = list(atoms)
while len(atoms) > 1:
atoms[0:2] = [Application(location, atoms[0], atoms[1])]
# Nothing left to apply
return atoms[0] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.