text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_ihex(self, number_of_data_bytes=32, address_length_bits=32):
"""Format the binary file as Intel HEX records and return them as a string. `number_of_data_bytes` is the number of data bytes in each record. `address_length_bits` is the number of address bits in each record. :20010000214601360121470136007EFE09D219012146017E17C20001FF5F16002148011979 :20012000194E79234623965778239EDA3F01B2CA3F0156702B5E712B722B7321460134219F :00000001FF """
|
def i32hex(address, extended_linear_address, data_address):
if address > 0xffffffff:
raise Error(
'cannot address more than 4 GB in I32HEX files (32 '
'bits addresses)')
address_upper_16_bits = (address >> 16)
address &= 0xffff
# All segments are sorted by address. Update the
# extended linear address when required.
if address_upper_16_bits > extended_linear_address:
extended_linear_address = address_upper_16_bits
packed = pack_ihex(IHEX_EXTENDED_LINEAR_ADDRESS,
0,
2,
binascii.unhexlify('{:04X}'.format(
extended_linear_address)))
data_address.append(packed)
return address, extended_linear_address
def i16hex(address, extended_segment_address, data_address):
if address > 16 * 0xffff + 0xffff:
raise Error(
'cannot address more than 1 MB in I16HEX files (20 '
'bits addresses)')
address_lower = (address - 16 * extended_segment_address)
# All segments are sorted by address. Update the
# extended segment address when required.
if address_lower > 0xffff:
extended_segment_address = (4096 * (address >> 16))
if extended_segment_address > 0xffff:
extended_segment_address = 0xffff
address_lower = (address - 16 * extended_segment_address)
packed = pack_ihex(IHEX_EXTENDED_SEGMENT_ADDRESS,
0,
2,
binascii.unhexlify('{:04X}'.format(
extended_segment_address)))
data_address.append(packed)
return address_lower, extended_segment_address
def i8hex(address):
if address > 0xffff:
raise Error(
'cannot address more than 64 kB in I8HEX files (16 '
'bits addresses)')
data_address = []
extended_segment_address = 0
extended_linear_address = 0
for address, data in self._segments.chunks(number_of_data_bytes):
if address_length_bits == 32:
address, extended_linear_address = i32hex(address,
extended_linear_address,
data_address)
elif address_length_bits == 24:
address, extended_segment_address = i16hex(address,
extended_segment_address,
data_address)
elif address_length_bits == 16:
i8hex(address)
else:
raise Error(
'expected address length 16, 24 or 32, but got {}'.format(
address_length_bits))
data_address.append(pack_ihex(IHEX_DATA,
address,
len(data),
data))
footer = []
if self.execution_start_address is not None:
if address_length_bits == 24:
address = binascii.unhexlify(
'{:08X}'.format(self.execution_start_address))
footer.append(pack_ihex(IHEX_START_SEGMENT_ADDRESS,
0,
4,
address))
elif address_length_bits == 32:
address = binascii.unhexlify(
'{:08X}'.format(self.execution_start_address))
footer.append(pack_ihex(IHEX_START_LINEAR_ADDRESS,
0,
4,
address))
footer.append(pack_ihex(IHEX_END_OF_FILE, 0, 0, None))
return '\n'.join(data_address + footer) + '\n'
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_ti_txt(self):
"""Format the binary file as a TI-TXT file and return it as a string. @0100 21 46 01 36 01 21 47 01 36 00 7E FE 09 D2 19 01 21 46 01 7E 17 C2 00 01 FF 5F 16 00 21 48 01 19 19 4E 79 23 46 23 96 57 78 23 9E DA 3F 01 B2 CA 3F 01 56 70 2B 5E 71 2B 72 2B 73 21 46 01 34 21 q """
|
lines = []
for segment in self._segments:
lines.append('@{:04X}'.format(segment.address))
for _, data in segment.chunks(TI_TXT_BYTES_PER_LINE):
lines.append(' '.join('{:02X}'.format(byte) for byte in data))
lines.append('q')
return '\n'.join(lines) + '\n'
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_binary(self, minimum_address=None, maximum_address=None, padding=None):
"""Return a byte string of all data within given address range. `minimum_address` is the absolute minimum address of the resulting binary data. `maximum_address` is the absolute maximum address of the resulting binary data (non-inclusive). `padding` is the word value of the padding between non-adjacent segments. Give as a bytes object of length 1 when the word size is 8 bits, length 2 when the word size is 16 bits, and so on. bytearray(b'!F\\x016\\x01!G\\x016\\x00~\\xfe\\t\\xd2\\x19\\x01!F\\x01~\\x17\\xc2\\x00\\x01 \\xff_\\x16\\x00!H\\x01\\x19\\x19Ny#F#\\x96Wx#\\x9e\\xda?\\x01\\xb2\\xca?\\x01Vp+^q+r+s! F\\x014!') """
|
if len(self._segments) == 0:
return b''
if minimum_address is None:
current_maximum_address = self.minimum_address
else:
current_maximum_address = minimum_address
if maximum_address is None:
maximum_address = self.maximum_address
if current_maximum_address >= maximum_address:
return b''
if padding is None:
padding = b'\xff' * self.word_size_bytes
binary = bytearray()
for address, data in self._segments:
length = len(data) // self.word_size_bytes
# Discard data below the minimum address.
if address < current_maximum_address:
if address + length <= current_maximum_address:
continue
offset = (current_maximum_address - address) * self.word_size_bytes
data = data[offset:]
length = len(data) // self.word_size_bytes
address = current_maximum_address
# Discard data above the maximum address.
if address + length > maximum_address:
if address < maximum_address:
size = (maximum_address - address) * self.word_size_bytes
data = data[:size]
length = len(data) // self.word_size_bytes
elif maximum_address >= current_maximum_address:
binary += padding * (maximum_address - current_maximum_address)
break
binary += padding * (address - current_maximum_address)
binary += data
current_maximum_address = address + length
return binary
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_array(self, minimum_address=None, padding=None, separator=', '):
"""Format the binary file as a string values separated by given separator `separator`. This function can be used to generate array initialization code for C and other languages. `minimum_address` is the start address of the resulting binary data. `padding` is the value of the padding between not adjacent segments. '0x21, 0x46, 0x01, 0x36, 0x01, 0x21, 0x47, 0x01, 0x36, 0x00, 0x7e, 0xfe, 0x09, 0xd2, 0x19, 0x01, 0x21, 0x46, 0x01, 0x7e, 0x17, 0xc2, 0x00, 0x01, 0xff, 0x5f, 0x16, 0x00, 0x21, 0x48, 0x01, 0x19, 0x19, 0x4e, 0x79, 0x23, 0x46, 0x23, 0x96, 0x57, 0x78, 0x23, 0x9e, 0xda, 0x3f, 0x01, 0xb2, 0xca, 0x3f, 0x01, 0x56, 0x70, 0x2b, 0x5e, 0x71, 0x2b, 0x72, 0x2b, 0x73, 0x21, 0x46, 0x01, 0x34, 0x21' """
|
binary_data = self.as_binary(minimum_address,
padding=padding)
words = []
for offset in range(0, len(binary_data), self.word_size_bytes):
word = 0
for byte in binary_data[offset:offset + self.word_size_bytes]:
word <<= 8
word += byte
words.append('0x{:02x}'.format(word))
return separator.join(words)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def as_hexdump(self):
"""Format the binary file as a hexdump and return it as a string. 00000130 3f 01 56 70 2b 5e 71 2b 72 2b 73 21 46 01 34 21 |?.Vp+^q+r+s!F.4!| """
|
# Empty file?
if len(self) == 0:
return '\n'
non_dot_characters = set(string.printable)
non_dot_characters -= set(string.whitespace)
non_dot_characters |= set(' ')
def align16(address):
return address - (address % 16)
def padding(length):
return [None] * length
def format_line(address, data):
"""`data` is a list of integers and None for unused elements.
"""
data += padding(16 - len(data))
hexdata = []
for byte in data:
if byte is not None:
elem = '{:02x}'.format(byte)
else:
elem = ' '
hexdata.append(elem)
first_half = ' '.join(hexdata[0:8])
second_half = ' '.join(hexdata[8:16])
text = ''
for byte in data:
if byte is None:
text += ' '
elif chr(byte) in non_dot_characters:
text += chr(byte)
else:
text += '.'
return '{:08x} {:23s} {:23s} |{:16s}|'.format(
address, first_half, second_half, text)
# Format one line at a time.
lines = []
line_address = align16(self.minimum_address)
line_data = []
for chunk in self._segments.chunks(size=16, alignment=16):
aligned_chunk_address = align16(chunk.address)
if aligned_chunk_address > line_address:
lines.append(format_line(line_address, line_data))
if aligned_chunk_address > line_address + 16:
lines.append('...')
line_address = aligned_chunk_address
line_data = []
line_data += padding(chunk.address - line_address - len(line_data))
line_data += [byte for byte in chunk.data]
lines.append(format_line(line_address, line_data))
return '\n'.join(lines) + '\n'
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fill(self, value=b'\xff'):
"""Fill all empty space between segments with given value `value`. """
|
previous_segment_maximum_address = None
fill_segments = []
for address, data in self._segments:
maximum_address = address + len(data)
if previous_segment_maximum_address is not None:
fill_size = address - previous_segment_maximum_address
fill_size_words = fill_size // self.word_size_bytes
fill_segments.append(_Segment(
previous_segment_maximum_address,
previous_segment_maximum_address + fill_size,
value * fill_size_words,
self.word_size_bytes))
previous_segment_maximum_address = maximum_address
for segment in fill_segments:
self._segments.add(segment)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def exclude(self, minimum_address, maximum_address):
"""Exclude given range and keep the rest. `minimum_address` is the first word address to exclude (including). `maximum_address` is the last word address to exclude (excluding). """
|
if maximum_address < minimum_address:
raise Error('bad address range')
minimum_address *= self.word_size_bytes
maximum_address *= self.word_size_bytes
self._segments.remove(minimum_address, maximum_address)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def crop(self, minimum_address, maximum_address):
"""Keep given range and discard the rest. `minimum_address` is the first word address to keep (including). `maximum_address` is the last word address to keep (excluding). """
|
minimum_address *= self.word_size_bytes
maximum_address *= self.word_size_bytes
maximum_address_address = self._segments.maximum_address
self._segments.remove(0, minimum_address)
self._segments.remove(maximum_address, maximum_address_address)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def info(self):
"""Return a string of human readable information about the binary file. .. code-block:: python Data ranges: 0x00000100 - 0x00000140 (64 bytes) """
|
info = ''
if self._header is not None:
if self._header_encoding is None:
header = ''
for b in bytearray(self.header):
if chr(b) in string.printable:
header += chr(b)
else:
header += '\\x{:02x}'.format(b)
else:
header = self.header
info += 'Header: "{}"\n'.format(header)
if self.execution_start_address is not None:
info += 'Execution start address: 0x{:08x}\n'.format(
self.execution_start_address)
info += 'Data ranges:\n\n'
for address, data in self._segments:
minimum_address = address
size = len(data)
maximum_address = (minimum_address + size // self.word_size_bytes)
info += 4 * ' '
info += '0x{:08x} - 0x{:08x} ({})\n'.format(
minimum_address,
maximum_address,
format_size(size, binary=True))
return info
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _precompute(self, tree):
""" Collect metric info in a single preorder traversal. """
|
d = {}
for n in tree.preorder_internal_node_iter():
d[n] = namedtuple('NodeDist', ['dist_from_root', 'edges_from_root'])
if n.parent_node:
d[n].dist_from_root = d[n.parent_node].dist_from_root + n.edge_length
d[n].edges_from_root = d[n.parent_node].edges_from_root + 1
else:
d[n].dist_from_root = 0.0
d[n].edges_from_root = 0
return d
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_vectors(self, tree, precomputed_info):
""" Populate the vectors m and M. """
|
little_m = []
big_m = []
leaf_nodes = sorted(tree.leaf_nodes(), key=lambda x: x.taxon.label)
# inner nodes, sorted order
for leaf_a, leaf_b in combinations(leaf_nodes, 2):
mrca = tree.mrca(taxa=[leaf_a.taxon, leaf_b.taxon])
little_m.append(precomputed_info[mrca].edges_from_root)
big_m.append(precomputed_info[mrca].dist_from_root)
# leaf nodes, sorted order
for leaf in leaf_nodes:
little_m.append(1)
big_m.append(leaf.edge_length)
return np.array(little_m), np.array(big_m)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_empty(rec):
""" Deletes sequences that were marked for deletion by convert_to_IUPAC """
|
for header, sequence in rec.mapping.items():
if all(char == 'X' for char in sequence):
rec.headers.remove(header)
rec.sequences.remove(sequence)
rec.update()
return rec
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transliterate(text):
""" Utility to properly transliterate text. """
|
text = unidecode(six.text_type(text))
text = text.replace('@', 'a')
return text
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def slugify(mapping, bind, values):
""" Transform all values into URL-capable slugs. """
|
for value in values:
if isinstance(value, six.string_types):
value = transliterate(value)
value = normality.slugify(value)
yield value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def latinize(mapping, bind, values):
""" Transliterate a given string into the latin alphabet. """
|
for v in values:
if isinstance(v, six.string_types):
v = transliterate(v)
yield v
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def join(mapping, bind, values):
""" Merge all the strings. Put space between them. """
|
return [' '.join([six.text_type(v) for v in values if v is not None])]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hash(mapping, bind, values):
""" Generate a sha1 for each of the given values. """
|
for v in values:
if v is None:
continue
if not isinstance(v, six.string_types):
v = six.text_type(v)
yield sha1(v.encode('utf-8')).hexdigest()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean(mapping, bind, values):
""" Perform several types of string cleaning for titles etc.. """
|
categories = {'C': ' '}
for value in values:
if isinstance(value, six.string_types):
value = normality.normalize(value, lowercase=False, collapse=True,
decompose=False,
replace_categories=categories)
yield value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isconnected(mask):
""" Checks that all nodes are reachable from the first node - i.e. that the graph is fully connected. """
|
nodes_to_check = list((np.where(mask[0, :])[0])[1:])
seen = [True] + [False] * (len(mask) - 1)
while nodes_to_check and not all(seen):
node = nodes_to_check.pop()
reachable = np.where(mask[node, :])[0]
for i in reachable:
if not seen[i]:
nodes_to_check.append(i)
seen[i] = True
return all(seen)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def normalise_rows(matrix):
""" Scales all rows to length 1. Fails when row is 0-length, so it leaves these unchanged """
|
lengths = np.apply_along_axis(np.linalg.norm, 1, matrix)
if not (lengths > 0).all():
# raise ValueError('Cannot normalise 0 length vector to length 1')
# print(matrix)
lengths[lengths == 0] = 1
return matrix / lengths[:, np.newaxis]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def kdists(matrix, k=7, ix=None):
""" Returns the k-th nearest distances, row-wise, as a column vector """
|
ix = ix or kindex(matrix, k)
return matrix[ix][np.newaxis].T
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def kindex(matrix, k):
""" Returns indices to select the kth nearest neighbour"""
|
ix = (np.arange(len(matrix)), matrix.argsort(axis=0)[k])
return ix
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def kmask(matrix, k=7, dists=None, logic='or'):
""" Creates a boolean mask to include points within k nearest neighbours, and exclude the rest. Logic can be OR or AND. OR gives the k-nearest-neighbour mask, AND gives the mutual k-nearest-neighbour mask."""
|
dists = (kdists(matrix, k=k) if dists is None else dists)
mask = (matrix <= dists)
if logic == 'or' or logic == '|':
return mask | mask.T
elif logic == 'and' or logic == '&':
return mask & mask.T
return mask
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def kscale(matrix, k=7, dists=None):
""" Returns the local scale based on the k-th nearest neighbour """
|
dists = (kdists(matrix, k=k) if dists is None else dists)
scale = dists.dot(dists.T)
return scale
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shift_and_scale(matrix, shift, scale):
""" Shift and scale matrix so its minimum value is placed at `shift` and its maximum value is scaled to `scale` """
|
zeroed = matrix - matrix.min()
scaled = (scale - shift) * (zeroed / zeroed.max())
return scaled + shift
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def coords_by_dimension(self, dimensions=3):
""" Returns fitted coordinates in specified number of dimensions, and the amount of variance explained) """
|
coords_matrix = self.vecs[:, :dimensions]
varexp = self.cve[dimensions - 1]
return coords_matrix, varexp
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_value(mapping, bind, data):
""" Given a mapping and JSON schema spec, extract a value from ``data`` and apply certain transformations to normalize the value. """
|
columns = mapping.get('columns', [mapping.get('column')])
values = [data.get(c) for c in columns]
for transform in mapping.get('transforms', []):
# any added transforms must also be added to the schema.
values = list(TRANSFORMS[transform](mapping, bind, values))
format_str = mapping.get('format')
value = values[0] if len(values) else None
if not is_empty(format_str):
value = format_str % tuple('' if v is None else v for v in values)
empty = is_empty(value)
if empty:
value = mapping.get('default') or bind.schema.get('default')
return empty, convert_value(bind, value)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_value(bind, value):
""" Type casting. """
|
type_name = get_type(bind)
try:
return typecast.cast(type_name, value)
except typecast.ConverterError:
return value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def peaks(x, y, lookahead=20, delta=0.00003):
""" A wrapper around peakdetect to pack the return values in a nicer format """
|
_max, _min = peakdetect(y, x, lookahead, delta)
x_peaks = [p[0] for p in _max]
y_peaks = [p[1] for p in _max]
x_valleys = [p[0] for p in _min]
y_valleys = [p[1] for p in _min]
_peaks = [x_peaks, y_peaks]
_valleys = [x_valleys, y_valleys]
return {"peaks": _peaks, "valleys": _valleys}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _restricted_growth_notation(l):
""" The clustering returned by the hcluster module gives group membership without regard for numerical order This function preserves the group membership, but sorts the labelling into numerical order """
|
list_length = len(l)
d = defaultdict(list)
for (i, element) in enumerate(l):
d[element].append(i)
l2 = [None] * list_length
for (name, index_list) in enumerate(sorted(d.values(), key=min)):
for index in index_list:
l2[index] = name
return tuple(l2)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_membership(self):
""" Alternative representation of group membership - creates a list with one tuple per group; each tuple contains the indices of its members Example: partition = (0,0,0,1,0,1,2,2) membership = [(0,1,2,4), (3,5), (6,7)] :return: list of tuples giving group memberships by index """
|
result = defaultdict(list)
for (position, value) in enumerate(self.partition_vector):
result[value].append(position)
return sorted([tuple(x) for x in result.values()])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extend_peaks(self, prop_thresh=50):
"""Each peak in the peaks of the object is checked for its presence in other octaves. If it does not exist, it is created. prop_thresh is the cent range within which the peak in the other octave is expected to be present, i.e., only if there is a peak within this cent range in other octaves, then the peak is considered to be present in that octave. Note that this does not change the peaks of the object. It just returns the extended peaks. """
|
# octave propagation of the reference peaks
temp_peaks = [i + 1200 for i in self.peaks["peaks"][0]]
temp_peaks.extend([i - 1200 for i in self.peaks["peaks"][0]])
extended_peaks = []
extended_peaks.extend(self.peaks["peaks"][0])
for i in temp_peaks:
# if a peak exists around, don't add this new one.
nearest_ind = slope.find_nearest_index(self.peaks["peaks"][0], i)
diff = abs(self.peaks["peaks"][0][nearest_ind] - i)
diff = np.mod(diff, 1200)
if diff > prop_thresh:
extended_peaks.append(i)
return extended_peaks
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot(self, intervals=None, new_fig=True):
"""This function plots histogram together with its smoothed version and peak information if provided. Just intonation intervals are plotted for a reference."""
|
import pylab as p
if new_fig:
p.figure()
#step 1: plot histogram
p.plot(self.x, self.y, ls='-', c='b', lw='1.5')
#step 2: plot peaks
first_peak = None
last_peak = None
if self.peaks:
first_peak = min(self.peaks["peaks"][0])
last_peak = max(self.peaks["peaks"][0])
p.plot(self.peaks["peaks"][0], self.peaks["peaks"][1], 'rD', ms=10)
p.plot(self.peaks["valleys"][0], self.peaks["valleys"][1], 'yD', ms=5)
#Intervals
if intervals is not None:
#spacing = 0.02*max(self.y)
for interval in intervals:
if first_peak is not None:
if interval <= first_peak or interval >= last_peak:
continue
p.axvline(x=interval, ls='-.', c='g', lw='1.5')
if interval-1200 >= min(self.x):
p.axvline(x=interval-1200, ls=':', c='b', lw='0.5')
if interval+1200 <= max(self.x):
p.axvline(x=interval+1200, ls=':', c='b', lw='0.5')
if interval+2400 <= max(self.x):
p.axvline(x=interval+2400, ls='-.', c='r', lw='0.5')
#spacing *= -1
#p.title("Tonic-aligned complete-range pitch histogram")
#p.xlabel("Pitch value (Cents)")
#p.ylabel("Normalized frequency of occurence")
p.show()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def threadpool_map(task, args, message, concurrency, batchsize=1, nargs=None):
""" Helper to map a function over a range of inputs, using a threadpool, with a progress meter """
|
import concurrent.futures
njobs = get_njobs(nargs, args)
show_progress = bool(message)
batches = grouper(batchsize, tupleise(args))
batched_task = lambda batch: [task(*job) for job in batch]
if show_progress:
message += ' (TP:{}w:{}b)'.format(concurrency, batchsize)
pbar = setup_progressbar(message, njobs, simple_progress=True)
pbar.start()
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor:
futures = []
completed_count = 0
for batch in batches:
futures.append(executor.submit(batched_task, batch))
if show_progress:
for i, fut in enumerate(concurrent.futures.as_completed(futures), start=1):
completed_count += len(fut.result())
pbar.update(completed_count)
else:
concurrent.futures.wait(futures)
if show_progress:
pbar.finish()
return flatten_list([fut.result() for fut in futures])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def insort_no_dup(lst, item):
""" If item is not in lst, add item to list at its sorted position """
|
import bisect
ix = bisect.bisect_left(lst, item)
if lst[ix] != item:
lst[ix:ix] = [item]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_gamma_model(alignment, missing_data=None, ncat=4):
""" Create a phylo_utils.likelihood.GammaMixture for calculating likelihood on a tree, from a treeCl.Alignment and its matching treeCl.Parameters """
|
model = alignment.parameters.partitions.model
freqs = alignment.parameters.partitions.frequencies
alpha = alignment.parameters.partitions.alpha
if model == 'LG':
subs_model = LG(freqs)
elif model == 'WAG':
subs_model = WAG(freqs)
elif model == 'GTR':
rates = alignment.parameters.partitions.rates
subs_model = GTR(rates, freqs, True)
else:
raise ValueError("Can't handle this model: {}".format(model))
tm = TransitionMatrix(subs_model)
gamma = GammaMixture(alpha, ncat)
gamma.init_models(tm, alignment_to_partials(alignment, missing_data))
return gamma
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sample_wr(lst):
""" Sample from lst, with replacement """
|
arr = np.array(lst)
indices = np.random.randint(len(lst), size=len(lst))
sample = np.empty(arr.shape, dtype=arr.dtype)
for i, ix in enumerate(indices):
sample[i] = arr[ix]
return list(sample)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _preprocess_inputs(x, weights):
""" Coerce inputs into compatible format """
|
if weights is None:
w_arr = np.ones(len(x))
else:
w_arr = np.array(weights)
x_arr = np.array(x)
if x_arr.ndim == 2:
if w_arr.ndim == 1:
w_arr = w_arr[:, np.newaxis]
return x_arr, w_arr
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def amean(x, weights=None):
""" Return the weighted arithmetic mean of x """
|
w_arr, x_arr = _preprocess_inputs(x, weights)
return (w_arr*x_arr).sum(axis=0) / w_arr.sum(axis=0)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gmean(x, weights=None):
""" Return the weighted geometric mean of x """
|
w_arr, x_arr = _preprocess_inputs(x, weights)
return np.exp((w_arr*np.log(x_arr)).sum(axis=0) / w_arr.sum(axis=0))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hmean(x, weights=None):
""" Return the weighted harmonic mean of x """
|
w_arr, x_arr = _preprocess_inputs(x, weights)
return w_arr.sum(axis=0) / (w_arr/x_arr).sum(axis=0)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def records(self):
""" Returns a list of records in SORT_KEY order """
|
return [self._records[i] for i in range(len(self._records))]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_trees(self, input_dir):
""" Read a directory full of tree files, matching them up to the already loaded alignments """
|
if self.show_progress:
pbar = setup_progressbar("Loading trees", len(self.records))
pbar.start()
for i, rec in enumerate(self.records):
hook = os.path.join(input_dir, '{}.nwk*'.format(rec.name))
filename = glob.glob(hook)
try:
with fileIO.freader(filename[0]) as infile:
tree = infile.read().decode('utf-8')
d = dict(ml_tree=tree)
rec.parameters.construct_from_dict(d)
except (IOError, IndexError):
continue
finally:
if self.show_progress:
pbar.update(i)
if self.show_progress:
pbar.finish()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_parameters(self, input_dir):
""" Read a directory full of json parameter files, matching them up to the already loaded alignments """
|
if self.show_progress:
pbar = setup_progressbar("Loading parameters", len(self.records))
pbar.start()
for i, rec in enumerate(self.records):
hook = os.path.join(input_dir, '{}.json*'.format(rec.name))
filename = glob.glob(hook)
try:
with fileIO.freader(filename[0]) as infile:
d = json.loads(infile.read().decode('utf-8'), parse_int=True)
rec.parameters.construct_from_dict(d)
except (IOError, IndexError):
continue
finally:
if self.show_progress:
pbar.update(i)
if self.show_progress:
pbar.finish()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_trees(self, indices=None, task_interface=None, jobhandler=default_jobhandler, batchsize=1, show_progress=True, **kwargs):
""" Infer phylogenetic trees for the loaded Alignments :param indices: Only run inference on the alignments at these given indices :param task_interface: Inference tool specified via TaskInterface (default RaxmlTaskInterface) :param jobhandler: Launch jobs via this JobHandler (default SequentialJobHandler; also available are ThreadpoolJobHandler and ProcesspoolJobHandler for running inference in parallel) :param batchsize: Batch size for Thread- or ProcesspoolJobHandlers) :param kwargs: Remaining arguments to pass to the TaskInterface :return: None """
|
if indices is None:
indices = list(range(len(self)))
if task_interface is None:
task_interface = tasks.RaxmlTaskInterface()
records = [self[i] for i in indices]
# Scrape args from records
args, to_delete = task_interface.scrape_args(records, **kwargs)
# Dispatch work
msg = '{} Tree estimation'.format(task_interface.name) if show_progress else ''
map_result = jobhandler(task_interface.get_task(), args, msg, batchsize)
# Process results
with fileIO.TempFileList(to_delete):
for rec, result in zip(records, map_result):
#logger.debug('Result - {}'.format(result))
rec.parameters.construct_from_dict(result)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def num_species(self):
""" Returns the number of species found over all records """
|
all_headers = reduce(lambda x, y: set(x) | set(y),
(rec.get_names() for rec in self.records))
return len(all_headers)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def permuted_copy(self, partition=None):
""" Return a copy of the collection with all alignment columns permuted """
|
def take(n, iterable):
return [next(iterable) for _ in range(n)]
if partition is None:
partition = Partition([1] * len(self))
index_tuples = partition.get_membership()
alignments = []
for ix in index_tuples:
concat = Concatenation(self, ix)
sites = concat.alignment.get_sites()
random.shuffle(sites)
d = dict(zip(concat.alignment.get_names(), [iter(x) for x in zip(*sites)]))
new_seqs = [[(k, ''.join(take(l, d[k]))) for k in d] for l in concat.lengths]
for seqs, datatype, name in zip(new_seqs, concat.datatypes, concat.names):
alignment = Alignment(seqs, datatype)
alignment.name = name
alignments.append(alignment)
return self.__class__(records=sorted(alignments, key=lambda x: SORT_KEY(x.name)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_id(self, grp):
""" Return a hash of the tuple of indices that specify the group """
|
thehash = hex(hash(grp))
if ISPY3: # use default encoding to get bytes
thehash = thehash.encode()
return self.cache.get(grp, hashlib.sha1(thehash).hexdigest())
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_work_done(self, grp):
""" Check for the existence of alignment and result files. """
|
id_ = self.get_id(grp)
concat_file = os.path.join(self.cache_dir, '{}.phy'.format(id_))
result_file = os.path.join(self.cache_dir, '{}.{}.json'.format(id_, self.task_interface.name))
return os.path.exists(concat_file), os.path.exists(result_file)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_group(self, grp, overwrite=False, **kwargs):
""" Write the concatenated alignment to disk in the location specified by self.cache_dir """
|
id_ = self.get_id(grp)
alignment_done, result_done = self.check_work_done(grp)
self.cache[grp] = id_
al_filename = os.path.join(self.cache_dir, '{}.phy'.format(id_))
qfile_filename = os.path.join(self.cache_dir, '{}.partitions.txt'.format(id_))
if overwrite or not (alignment_done or result_done):
conc = self.collection.concatenate(grp)
al = conc.alignment
al.write_alignment(al_filename, 'phylip', True)
q = conc.qfile(**kwargs)
with open(qfile_filename, 'w') as fl:
fl.write(q + '\n')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_group_result(self, grp, **kwargs):
""" Retrieve the results for a group. Needs this to already be calculated - errors out if result not available. """
|
id_ = self.get_id(grp)
self.cache[grp] = id_
# Check if this file is already processed
alignment_written, results_written = self.check_work_done(grp)
if not results_written:
if not alignment_written:
self.write_group(grp, **kwargs)
logger.error('Alignment {} has not been analysed - run analyse_cache_dir'.format(id_))
raise ValueError('Missing result')
else:
with open(self.get_result_file(id_)) as fl:
return json.load(fl)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_partition_score(self, p):
""" Assumes analysis is done and written to id.json! """
|
scores = []
for grp in p.get_membership():
try:
result = self.get_group_result(grp)
scores.append(result['likelihood'])
except ValueError:
scores.append(None)
return sum(scores)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_partition_trees(self, p):
""" Return the trees associated with a partition, p """
|
trees = []
for grp in p.get_membership():
try:
result = self.get_group_result(grp)
trees.append(result['ml_tree'])
except ValueError:
trees.append(None)
logger.error('No tree found for group {}'.format(grp))
return trees
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def expect(self, use_proportions=True):
""" The Expectation step of the CEM algorithm """
|
changed = self.get_changed(self.partition, self.prev_partition)
lk_table = self.generate_lktable(self.partition, changed, use_proportions)
self.table = self.likelihood_table_to_probs(lk_table)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def classify(self, table, weighted_choice=False, transform=None):
""" The Classification step of the CEM algorithm """
|
assert table.shape[1] == self.numgrp
if weighted_choice:
if transform is not None:
probs = transform_fn(table.copy(), transform) #
else:
probs = table.copy()
cmprobs = probs.cumsum(1)
logger.info('Probabilities\n{}'.format(probs))
r = np.random.random(cmprobs.shape[0])
search = np.apply_along_axis(np.searchsorted, 1, cmprobs, r) # Not very efficient
assignment = np.diag(search)
else:
probs = table
assignment = np.where(probs==probs.max(1)[:, np.newaxis])[1]
logger.info('Assignment\n{}'.format(assignment))
assignment = self._fill_empty_groups(probs, assignment) # don't want empty groups
new_partition = Partition(tuple(assignment))
self.set_partition(new_partition)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def maximise(self, **kwargs):
""" The Maximisation step of the CEM algorithm """
|
self.scorer.write_partition(self.partition)
self.scorer.analyse_cache_dir(**kwargs)
self.likelihood = self.scorer.get_partition_score(self.partition)
self.scorer.clean_cache()
changed = self.get_changed(self.partition, self.prev_partition)
self.update_perlocus_likelihood_objects(self.partition, changed)
return self.partition, self.likelihood, sum(inst.get_likelihood() for inst in self.insts)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_partition(self, partition):
""" Store the partition in self.partition, and move the old self.partition into self.prev_partition """
|
assert len(partition) == self.numgrp
self.partition, self.prev_partition = partition, self.partition
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_changed(self, p1, p2):
""" Return the loci that are in clusters that have changed between partitions p1 and p2 """
|
if p1 is None or p2 is None:
return list(range(len(self.insts)))
return set(flatten_list(set(p1) - set(p2)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _update_likelihood_model(self, inst, partition_parameters, tree):
""" Set parameters of likelihood model - inst - using values in dictionary - partition_parameters -, and - tree - """
|
# Build transition matrix from dict
model = partition_parameters['model']
freqs = partition_parameters.get('frequencies')
if model == 'LG':
subs_model = phylo_utils.models.LG(freqs)
elif model == 'WAG':
subs_model = phylo_utils.models.WAG(freqs)
elif model == 'GTR':
rates = partition_parameters.get('rates')
subs_model = phylo_utils.models.GTR(rates, freqs, True)
else:
raise ValueError("Can't handle this model: {}".format(model))
tm = phylo_utils.markov.TransitionMatrix(subs_model)
# Read alpha value
alpha = partition_parameters['alpha']
inst.set_tree(tree)
inst.update_alpha(alpha)
inst.update_transition_matrix(tm)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fill_empty_groups_old(self, probs, assignment):
""" Does the simple thing - if any group is empty, but needs to have at least one member, assign the data point with highest probability of membership """
|
new_assignment = np.array(assignment.tolist())
for k in range(self.numgrp):
if np.count_nonzero(assignment==k) == 0:
logger.info('Group {} became empty'.format(k))
best = np.where(probs[:,k]==probs[:,k].max())[0][0]
new_assignment[best] = k
new_assignment = self._fill_empty_groups(probs, new_assignment)
return new_assignment
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def jac(x,a):
""" Jacobian matrix given Christophe's suggestion of f """
|
return (x-a) / np.sqrt(((x-a)**2).sum(1))[:,np.newaxis]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gradient(x, a, c):
""" J'.G """
|
return jac(x, a).T.dot(g(x, a, c))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hessian(x, a):
""" J'.J """
|
j = jac(x, a)
return j.T.dot(j)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def grad_desc_update(x, a, c, step=0.01):
""" Given a value of x, return a better x using gradient descent """
|
return x - step * gradient(x,a,c)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def optimise_levenberg_marquardt(x, a, c, damping=0.001, tolerance=0.001):
""" Optimise value of x using levenberg-marquardt """
|
x_new = x
x_old = x-1 # dummy value
f_old = f(x_new, a, c)
while np.abs(x_new - x_old).sum() > tolerance:
x_old = x_new
x_tmp = levenberg_marquardt_update(x_old, a, c, damping)
f_new = f(x_tmp, a, c)
if f_new < f_old:
damping = np.max(damping/10., 1e-20)
x_new = x_tmp
f_old = f_new
else:
damping *= 10.
return x_new
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_out_of_sample_mds(boot_collection, ref_collection, ref_distance_matrix, index, dimensions, task=_fast_geo, rooted=False, **kwargs):
""" index = index of the locus the bootstrap sample corresponds to - only important if using recalc=True in kwargs """
|
fit = np.empty((len(boot_collection), dimensions))
if ISPY3:
query_trees = [PhyloTree(tree.encode(), rooted) for tree in boot_collection.trees]
ref_trees = [PhyloTree(tree.encode(), rooted) for tree in ref_collection.trees]
else:
query_trees = [PhyloTree(tree, rooted) for tree in boot_collection.trees]
ref_trees = [PhyloTree(tree, rooted) for tree in ref_collection.trees]
for i, tree in enumerate(query_trees):
distvec = np.array([task(tree, ref_tree, False) for ref_tree in ref_trees])
oos = OutOfSampleMDS(ref_distance_matrix)
fit[i] = oos.fit(index, distvec, dimensions=dimensions, **kwargs)
return fit
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stress(ref_cds, est_cds):
""" Kruskal's stress """
|
ref_dists = pdist(ref_cds)
est_dists = pdist(est_cds)
return np.sqrt(((ref_dists - est_dists)**2).sum() / (ref_dists**2).sum())
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rmsd(ref_cds, est_cds):
""" Root-mean-squared-difference """
|
ref_dists = pdist(ref_cds)
est_dists = pdist(est_cds)
return np.sqrt(((ref_dists - est_dists)**2).mean())
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def levenberg_marquardt(self, start_x=None, damping=1.0e-3, tolerance=1.0e-6):
""" Optimise value of x using levenberg marquardt """
|
if start_x is None:
start_x = self._analytical_fitter.fit(self._c)
return optimise_levenberg_marquardt(start_x, self._a, self._c, tolerance)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _make_A_and_part_of_b_adjacent(self, ref_crds):
""" Make A and part of b. See docstring of this class for answer to "What are A and b?" """
|
rot = self._rotate_rows(ref_crds)
A = 2*(rot - ref_crds)
partial_b = (rot**2 - ref_crds**2).sum(1)
return A, partial_b
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_schema_mapping(resolver, schema_uri, depth=1):
""" Try and recursively iterate a JSON schema and to generate an ES mapping that encasulates it. """
|
visitor = SchemaVisitor({'$ref': schema_uri}, resolver)
return _generate_schema_mapping(visitor, set(), depth)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def phyml_task(alignment_file, model, **kwargs):
""" Kwargs are passed to the Phyml process command line """
|
import re
fl = os.path.abspath(alignment_file)
ph = Phyml(verbose=False)
if model in ['JC69', 'K80', 'F81', 'F84', 'HKY85', 'TN93', 'GTR']:
datatype = 'nt'
elif re.search('[01]{6}', model) is not None:
datatype = 'nt'
else:
datatype = 'aa'
cmd = '-i {} -m {} -d {} -f m --quiet'.format(alignment_file, model, datatype)
logger.debug("Phyml command = {}".format(cmd))
ph(cmd, wait=True, **kwargs)
logger.debug("Phyml stdout = {}".format(ph.get_stdout()))
logger.debug("Phyml stderr = {}".format(ph.get_stderr()))
parser = PhymlParser()
expected_outfiles = ['{}_phyml_stats'.format(alignment_file), '{}_phyml_tree'.format(alignment_file)]
for i in range(2):
if not os.path.exists(expected_outfiles[i]):
expected_outfiles[i] += '.txt'
logger.debug('Stats file {} {}'.format(expected_outfiles[0], 'exists' if os.path.exists(expected_outfiles[0]) else 'doesn\'t exist'))
logger.debug('Tree file {} {}'.format(expected_outfiles[1], 'exists' if os.path.exists(expected_outfiles[1]) else 'doesn\'t exist'))
with fileIO.TempFileList(expected_outfiles):
try:
result = parser.to_dict(*expected_outfiles)
except IOError as ioerr:
logger.error('File IO error: {}'.format(ioerr))
result = None
except ParseException as parseerr:
logger.error('Other parse error: {}'.format(parseerr))
result = None
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_mapping(mapping):
""" Validate a mapping configuration file against the relevant schema. """
|
file_path = os.path.join(os.path.dirname(__file__),
'schemas', 'mapping.json')
with open(file_path, 'r') as fh:
validator = Draft4Validator(json.load(fh))
validator.validate(mapping)
return mapping
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def heatmap(self, partition=None, cmap=CM.Blues):
""" Plots a visual representation of a distance matrix """
|
if isinstance(self.dm, DistanceMatrix):
length = self.dm.values.shape[0]
else:
length = self.dm.shape[0]
datamax = float(np.abs(self.dm).max())
fig = plt.figure()
ax = fig.add_subplot(111)
ticks_at = [0, 0.5 * datamax, datamax]
if partition:
sorting = flatten_list(partition.get_membership())
self.dm = self.dm.reorder(sorting)
cax = ax.imshow(
self.dm.values,
interpolation='nearest',
origin='lower',
extent=[0., length, 0., length],
vmin=0,
vmax=datamax,
cmap=cmap,
)
cbar = fig.colorbar(cax, ticks=ticks_at, format='%1.2g')
cbar.set_label('Distance')
return fig
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_tree_collection_strings(self, scale=1, guide_tree=None):
""" Function to get input strings for tree_collection tree_collection needs distvar, genome_map and labels - these are returned in the order above """
|
records = [self.collection[i] for i in self.indices]
return TreeCollectionTaskInterface().scrape_args(records)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_json(buffer, auto_flatten=True, raise_for_index=True):
"""Parses a JSON string into either a view or an index. If auto flatten is enabled a sourcemap index that does not contain external references is automatically flattened into a view. By default if an index would be returned an `IndexedSourceMap` error is raised instead which holds the index. """
|
buffer = to_bytes(buffer)
view_out = _ffi.new('lsm_view_t **')
index_out = _ffi.new('lsm_index_t **')
buffer = to_bytes(buffer)
rv = rustcall(
_lib.lsm_view_or_index_from_json,
buffer, len(buffer), view_out, index_out)
if rv == 1:
return View._from_ptr(view_out[0])
elif rv == 2:
index = Index._from_ptr(index_out[0])
if auto_flatten and index.can_flatten:
return index.into_view()
if raise_for_index:
raise IndexedSourceMap('Unexpected source map index',
index=index)
return index
else:
raise AssertionError('Unknown response from C ABI (%r)' % rv)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_memdb(buffer):
"""Creates a sourcemap view from MemDB bytes."""
|
buffer = to_bytes(buffer)
return View._from_ptr(rustcall(
_lib.lsm_view_from_memdb,
buffer, len(buffer)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_memdb_file(path):
"""Creates a sourcemap view from MemDB at a given file."""
|
path = to_bytes(path)
return View._from_ptr(rustcall(_lib.lsm_view_from_memdb_file, path))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dump_memdb(self, with_source_contents=True, with_names=True):
"""Dumps a sourcemap in MemDB format into bytes."""
|
len_out = _ffi.new('unsigned int *')
buf = rustcall(
_lib.lsm_view_dump_memdb,
self._get_ptr(), len_out,
with_source_contents, with_names)
try:
rv = _ffi.unpack(buf, len_out[0])
finally:
_lib.lsm_buffer_free(buf)
return rv
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lookup_token(self, line, col):
"""Given a minified location, this tries to locate the closest token that is a match. Returns `None` if no match can be found. """
|
# Silently ignore underflows
if line < 0 or col < 0:
return None
tok_out = _ffi.new('lsm_token_t *')
if rustcall(_lib.lsm_view_lookup_token, self._get_ptr(),
line, col, tok_out):
return convert_token(tok_out[0])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_original_function_name(self, line, col, minified_name, minified_source):
"""Given a token location and a minified function name and the minified source file this returns the original function name if it can be found of the minified function in scope. """
|
# Silently ignore underflows
if line < 0 or col < 0:
return None
minified_name = minified_name.encode('utf-8')
sout = _ffi.new('const char **')
try:
slen = rustcall(_lib.lsm_view_get_original_function_name,
self._get_ptr(), line, col, minified_name,
minified_source, sout)
if slen > 0:
return _ffi.unpack(sout[0], slen).decode('utf-8', 'replace')
except SourceMapError:
# In some rare cases the library is/was known to panic. We do
# not want to report this upwards (this happens on slicing
# out of range on older rust versions in the rust-sourcemap
# library)
pass
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_source_contents(self, src_id):
"""Given a source ID this returns the embedded sourcecode if there is. The sourcecode is returned as UTF-8 bytes for more efficient processing. """
|
len_out = _ffi.new('unsigned int *')
must_free = _ffi.new('int *')
rv = rustcall(_lib.lsm_view_get_source_contents,
self._get_ptr(), src_id, len_out, must_free)
if rv:
try:
return _ffi.unpack(rv, len_out[0])
finally:
if must_free[0]:
_lib.lsm_buffer_free(rv)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def has_source_contents(self, src_id):
"""Checks if some sources exist."""
|
return bool(rustcall(_lib.lsm_view_has_source_contents,
self._get_ptr(), src_id))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_source_name(self, src_id):
"""Returns the name of the given source."""
|
len_out = _ffi.new('unsigned int *')
rv = rustcall(_lib.lsm_view_get_source_name,
self._get_ptr(), src_id, len_out)
if rv:
return decode_rust_str(rv, len_out[0])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iter_sources(self):
"""Iterates over all source names and IDs."""
|
for src_id in xrange(self.get_source_count()):
yield src_id, self.get_source_name(src_id)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_json(buffer):
"""Creates an index from a JSON string."""
|
buffer = to_bytes(buffer)
return Index._from_ptr(rustcall(
_lib.lsm_index_from_json,
buffer, len(buffer)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def into_view(self):
"""Converts the index into a view"""
|
try:
return View._from_ptr(rustcall(
_lib.lsm_index_into_view,
self._get_ptr()))
finally:
self._ptr = None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_path(filename):
"""Creates a sourcemap view from a file path."""
|
filename = to_bytes(filename)
if NULL_BYTE in filename:
raise ValueError('null byte in path')
return ProguardView._from_ptr(rustcall(
_lib.lsm_proguard_mapping_from_path,
filename + b'\x00'))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply(self, data):
""" Apply the given mapping to ``data``, recursively. The return type is a tuple of a boolean and the resulting data element. The boolean indicates whether any values were mapped in the child nodes of the mapping. It is used to skip optional branches of the object graph. """
|
if self.visitor.is_object:
obj = {}
if self.visitor.parent is None:
obj['$schema'] = self.visitor.path
obj_empty = True
for child in self.children:
empty, value = child.apply(data)
if empty and child.optional:
continue
obj_empty = False if not empty else obj_empty
if child.visitor.name in obj and child.visitor.is_array:
obj[child.visitor.name].extend(value)
else:
obj[child.visitor.name] = value
return obj_empty, obj
elif self.visitor.is_array:
empty, value = self.children.apply(data)
return empty, [value]
elif self.visitor.is_value:
return extract_value(self.mapping, self.visitor, data)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def translate(self, text):
""" Translate text, returns the modified text. """
|
# Reset substitution counter
self.count = 0
# Process text
return self._make_regex().sub(self, text)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cluster(self, n, embed_dim=None, algo=spectral.SPECTRAL, method=methods.KMEANS):
""" Cluster the embedded coordinates using spectral clustering Parameters n: int The number of clusters to return embed_dim: int The dimensionality of the underlying coordinates Defaults to same value as n algo: enum value (spectral.SPECTRAL | spectral.KPCA | spectral.ZELNIKMANOR) Type of embedding to use method: enum value (methods.KMEANS | methods.GMM) The clustering method to use Returns ------- Partition: Partition object describing the data partition """
|
if n == 1:
return Partition([1] * len(self.get_dm(False)))
if embed_dim is None:
embed_dim = n
if algo == spectral.SPECTRAL:
self._coords = self.spectral_embedding(embed_dim)
elif algo == spectral.KPCA:
self._coords = self.kpca_embedding(embed_dim)
elif algo == spectral.ZELNIKMANOR:
self._coords = self.spectral_embedding_(embed_dim)
else:
raise OptionError(algo, list(spectral.reverse.values()))
if method == methods.KMEANS:
p = self.kmeans(n, self._coords.df.values)
elif method == methods.GMM:
p = self.gmm(n, self._coords.df.values)
elif method == methods.WARD:
linkmat = fastcluster.linkage(self._coords.values, 'ward')
p = _hclust(linkmat, n)
else:
raise OptionError(method, list(methods.reverse.values()))
if self._verbosity > 0:
print('Using clustering method: {}'.format(methods.reverse[method]))
return p
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def spectral_embedding(self, n):
""" Embed the points using spectral decomposition of the laplacian of the affinity matrix Parameters n: int The number of dimensions """
|
coords = spectral_embedding(self._affinity, n)
return CoordinateMatrix(normalise_rows(coords))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def kpca_embedding(self, n):
""" Embed the points using kernel PCA of the affinity matrix Parameters n: int The number of dimensions """
|
return self.dm.embedding(n, 'kpca', affinity_matrix=self._affinity)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cluster(self, n, embed_dim=None, algo=mds.CLASSICAL, method=methods.KMEANS):
""" Cluster the embedded coordinates using multidimensional scaling Parameters n: int The number of clusters to return embed_dim int The dimensionality of the underlying coordinates Defaults to same value as n method: enum value (methods.KMEANS | methods.GMM) The clustering method to use Returns ------- Partition: Partition object describing the data partition """
|
if n == 1:
return Partition([1] * len(self.get_dm(False)))
if embed_dim is None:
embed_dim = n
if algo == mds.CLASSICAL:
self._coords = self.dm.embedding(embed_dim, 'cmds')
elif algo == mds.METRIC:
self._coords = self.dm.embedding(embed_dim, 'mmds')
else:
raise OptionError(algo, list(mds.reverse.values()))
if method == methods.KMEANS:
p = self.kmeans(n, self._coords.values)
elif method == methods.GMM:
p = self.gmm(n, self._coords.values)
elif method == methods.WARD:
linkmat = fastcluster.linkage(self._coords.values, 'ward')
p = _hclust(linkmat, n)
else:
raise OptionError(method, list(methods.reverse.values()))
#if self._verbosity > 0:
# print('Using clustering method: {}'.format(methods.reverse[method]))
return p
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _log_thread(self, pipe, queue):
""" Start a thread logging output from pipe """
|
# thread function to log subprocess output (LOG is a queue)
def enqueue_output(out, q):
for line in iter(out.readline, b''):
q.put(line.rstrip())
out.close()
# start thread
t = threading.Thread(target=enqueue_output,
args=(pipe, queue))
t.daemon = True # thread dies with the program
t.start()
self.threads.append(t)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _search_for_executable(self, executable):
""" Search for file give in "executable". If it is not found, we try the environment PATH. Returns either the absolute path to the found executable, or None if the executable couldn't be found. """
|
if os.path.isfile(executable):
return os.path.abspath(executable)
else:
envpath = os.getenv('PATH')
if envpath is None:
return
for path in envpath.split(os.pathsep):
exe = os.path.join(path, executable)
if os.path.isfile(exe):
return os.path.abspath(exe)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _command_template(self, switches, objectInput=None):
"""Template for Tika app commands Args: switches (list):
list of switches to Tika app Jar objectInput (object):
file object/standard input to analyze Return: Standard output data (unicode Python 2, str Python 3) """
|
command = ["java", "-jar", self.file_jar, "-eUTF-8"]
if self.memory_allocation:
command.append("-Xmx{}".format(self.memory_allocation))
command.extend(switches)
if not objectInput:
objectInput = subprocess.PIPE
log.debug("Subprocess command: {}".format(", ".join(command)))
if six.PY2:
with open(os.devnull, "w") as devnull:
out = subprocess.Popen(
command,
stdin=objectInput,
stdout=subprocess.PIPE,
stderr=devnull)
elif six.PY3:
out = subprocess.Popen(
command,
stdin=objectInput,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
stdoutdata, _ = out.communicate()
return stdoutdata.decode("utf-8").strip()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def detect_content_type(self, path=None, payload=None, objectInput=None):
""" Return the content type of passed file or payload. Args: path (string):
Path of file to analyze payload (string):
Payload base64 to analyze objectInput (object):
file object/standard input to analyze Returns: content type of file (string) """
|
# From Python detection content type from stdin doesn't work TO FIX
if objectInput:
message = "Detection content type with file object is not stable."
log.exception(message)
raise TikaAppError(message)
f = file_path(path, payload, objectInput)
switches = ["-d", f]
result = self._command_template(switches).lower()
return result, path, f
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_only_content(self, path=None, payload=None, objectInput=None):
""" Return only the text content of passed file. These parameters are in OR. Only one of them can be analyzed. Args: path (string):
Path of file to analyze payload (string):
Payload base64 to analyze objectInput (object):
file object/standard input to analyze Returns: text of file passed (string) """
|
if objectInput:
switches = ["-t"]
result = self._command_template(switches, objectInput)
return result, True, None
else:
f = file_path(path, payload)
switches = ["-t", f]
result = self._command_template(switches)
return result, path, f
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_all_content( self, path=None, payload=None, objectInput=None, pretty_print=False, convert_to_obj=False, ):
""" This function returns a JSON of all contents and metadata of passed file Args: path (string):
Path of file to analyze payload (string):
Payload base64 to analyze objectInput (object):
file object/standard input to analyze pretty_print (boolean):
If True adds newlines and whitespace, for better readability convert_to_obj (boolean):
If True convert JSON in object """
|
f = file_path(path, payload, objectInput)
switches = ["-J", "-t", "-r", f]
if not pretty_print:
switches.remove("-r")
result = self._command_template(switches)
if result and convert_to_obj:
result = json.loads(result, encoding="utf-8")
return result, path, f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.