text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def loadDHCPOptions(self, address_family, options): """ Create a high level DHCPOptions object :param str address_family: Address family of the options. Can be either dhcpv4 or dhcpv6 :param dict options: Dictionary containing the option set to apply for this address family. Note: only those specified will be applied. Allowed options can be found in :attr:`ns1.ipam.DHCPOptions.OPTIONS` """
import ns1.ipam return ns1.ipam.DHCPOptions(address_family, options)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fill_tree(comments): """ Insert extra comments in the comments list, so that the root path of the first comment is always visible. Use this in comments' pagination to fill in the tree information. The inserted comments have an ``added_path`` attribute. """
if not comments: return it = iter(comments) first = next(it) extra_path_items = imap(_mark_as_root_path, first.root_path) return chain(extra_path_items, [first], it)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def annotate_tree_properties(comments): """ iterate through nodes and adds some magic properties to each of them representing opening list of children and closing it """
if not comments: return it = iter(comments) # get the first item, this will fail if no items ! old = next(it) # first item starts a new thread old.open = True last = set() for c in it: # if this comment has a parent, store its last child for future reference if old.last_child_id: last.add(old.last_child_id) # this is the last child, mark it if c.pk in last: c.last = True # increase the depth if c.depth > old.depth: c.open = True else: # c.depth <= old.depth # close some depths old.close = list(range(old.depth - c.depth)) # new thread if old.root_id != c.root_id: # close even the top depth old.close.append(len(old.close)) # and start a new thread c.open = True # empty the last set last = set() # iterate yield old old = c old.close = range(old.depth) yield old
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def run_spades(self, stop_at_first_success=False): '''Runs spades on all kmers. Each a separate run because SPAdes dies if any kmer does not work. Chooses the 'best' assembly to be the one with the biggest N50''' n50 = {} kmer_to_dir = {} for k in self.spades_kmers: tmpdir = tempfile.mkdtemp(prefix=self.outdir + '.tmp.spades.' + str(k) + '.', dir=os.getcwd()) kmer_to_dir[k] = tmpdir ok, errs = self.run_spades_once(k, tmpdir) if ok: contigs_fasta = os.path.join(tmpdir, 'contigs.fasta') contigs_fai = contigs_fasta + '.fai' common.syscall(self.samtools.exe() + ' faidx ' + contigs_fasta, verbose=self.verbose) stats = pyfastaq.tasks.stats_from_fai(contigs_fai) if stats['N50'] != 0: n50[k] = stats['N50'] if stop_at_first_success: break if len(n50) > 0: if self.verbose: print('[assemble]\tkmer\tN50') for k in sorted(n50): print('[assemble]', k, n50[k], sep='\t') best_k = None for k in sorted(n50): if best_k is None or n50[k] >= n50[best_k]: best_k = k assert best_k is not None for k, directory in kmer_to_dir.items(): if k == best_k: if self.verbose: print('[assemble] using assembly with kmer', k) os.rename(directory, self.outdir) else: shutil.rmtree(directory) else: raise Error('Error running SPAdes. Output directories are:\n ' + '\n '.join(kmer_to_dir.values()) + '\nThe reason why should be in the spades.log file in each directory.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def run_canu(self): '''Runs canu instead of spades''' cmd = self._make_canu_command(self.outdir,'canu') ok, errs = common.syscall(cmd, verbose=self.verbose, allow_fail=False) if not ok: raise Error('Error running Canu.') original_contigs = os.path.join(self.outdir, 'canu.contigs.fasta') renamed_contigs = os.path.join(self.outdir, 'contigs.fasta') Assembler._rename_canu_contigs(original_contigs, renamed_contigs) original_gfa = os.path.join(self.outdir, 'canu.contigs.gfa') renamed_gfa = os.path.join(self.outdir, 'contigs.gfa') os.rename(original_gfa, renamed_gfa)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def aligned_read_to_read(read, revcomp=True, qual=None, ignore_quality=False): '''Returns Fasta or Fastq sequence from pysam aligned read''' if read.qual is None or ignore_quality: if qual is None or ignore_quality: seq = pyfastaq.sequences.Fasta(read.qname, common.decode(read.seq)) else: seq = pyfastaq.sequences.Fastq(read.qname, common.decode(read.seq), qual * read.query_length) else: if qual is None: seq = pyfastaq.sequences.Fastq(read.qname, common.decode(read.seq), common.decode(read.qual)) else: seq = pyfastaq.sequences.Fastq(read.qname, common.decode(read.seq), qual * read.query_length) if read.is_reverse and revcomp: seq.revcomp() return seq
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_ref_lengths(self): '''Gets the length of each reference sequence from the header of the bam. Returns dict name => length''' sam_reader = pysam.Samfile(self.bam, "rb") return dict(zip(sam_reader.references, sam_reader.lengths))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_contigs_to_use(self, contigs_to_use): '''If contigs_to_use is a set, returns that set. If it's None, returns an empty set. Otherwise, assumes it's a file name, and gets names from the file''' if type(contigs_to_use) == set: return contigs_to_use elif contigs_to_use is None: return set() else: f = pyfastaq.utils.open_file_read(contigs_to_use) contigs_to_use = set([line.rstrip() for line in f]) pyfastaq.utils.close(f) return contigs_to_use
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _all_reads_from_contig(self, contig, fout): '''Gets all reads from contig called "contig" and writes to fout''' sam_reader = pysam.Samfile(self.bam, "rb") for read in sam_reader.fetch(contig): print(mapping.aligned_read_to_read(read, ignore_quality=not self.fastq_out), file=fout)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_all_unmapped_reads(self, fout): '''Writes all unmapped reads to fout''' sam_reader = pysam.Samfile(self.bam, "rb") for read in sam_reader.fetch(until_eof=True): if read.is_unmapped: print(mapping.aligned_read_to_read(read, ignore_quality=not self.fastq_out), file=fout)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _exclude_region(self, contig, start, end, fout): '''Writes reads not mapping to the given region of contig, start and end as per python convention''' sam_reader = pysam.Samfile(self.bam, "rb") exclude_interval = pyfastaq.intervals.Interval(start, end - 1) for read in sam_reader.fetch(contig): read_interval = pyfastaq.intervals.Interval(read.pos, read.reference_end - 1) if not read_interval.intersects(exclude_interval): print(mapping.aligned_read_to_read(read, ignore_quality=not self.fastq_out), file=fout)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_region(self, contig, start, end, fout, min_length=250): '''Writes reads mapping to given region of contig, trimming part of read not in the region''' sam_reader = pysam.Samfile(self.bam, "rb") trimming_end = (start == 0) for read in sam_reader.fetch(contig, start, end): read_interval = pyfastaq.intervals.Interval(read.pos, read.reference_end - 1) seq = mapping.aligned_read_to_read(read, ignore_quality=not self.fastq_out, revcomp=False) if trimming_end: bases_off_start = 0 bases_off_end = max(0, read.reference_end - 1 - end) #seq.seq = seq.seq[:read.query_alignment_end - bases_off_end] seq = seq.subseq(0, read.query_alignment_end - bases_off_end) else: bases_off_start = max(0, start - read.pos + 1) #seq.seq = seq.seq[bases_off_start + read.query_alignment_start:] seq = seq.subseq(bases_off_start + read.query_alignment_start, len(seq)) if read.is_reverse: seq.revcomp() if len(seq) >= min_length: print(seq, file=fout)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_contigs_to_keep(self, filename): '''Returns a set of names from file called filename. If filename is None, returns an empty set''' if filename is None: return set() with open(filename) as f: return {line.rstrip() for line in f}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _remove_small_contigs(self, infile, outfile, keep=None): '''Writes a new file with small contigs removed. Returns lists of all names and names of removed contigs''' removed = set() all_names = set() if keep is None: keep = set() file_reader = pyfastaq.sequences.file_reader(infile) fout = pyfastaq.utils.open_file_write(outfile) for seq in file_reader: all_names.add(seq.id) if len(seq) >= self.min_contig_length or seq.id in keep: print(seq, file=fout) else: removed.add(seq.id) pyfastaq.utils.close(fout) return all_names, removed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _containing_contigs(self, hits): '''Given a list of hits, all with same query, returns a set of the contigs containing that query''' return {hit.ref_name for hit in hits if self._contains(hit)}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _expand_containing_using_transitivity(self, containing_contigs): '''This uses a contined in b, and b contained in c to force a contained in c. Just in case a contained in c wasn't already found by nucmer''' for name in containing_contigs: containing_contigs[name] = self._get_all_containing(containing_contigs, name) return containing_contigs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _collapse_list_of_sets(self, sets): '''Input is a list of sets. Merges any intersecting sets in the list''' found = True while found: found = False to_intersect = None for i in range(len(sets)): for j in range(len(sets)): if i == j: continue elif sets[i].intersection(sets[j]): to_intersect = i, j break if to_intersect is not None: break if to_intersect is not None: found = True sets[i].update(sets[j]) sets.pop(j) return sets
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _longest_contig(self, contig_set, contig_lengths): '''Returns the name of the longest contig, from the set of names contig_set. contig_lengths is expected to be a dictionary of contig name => length.''' longest_name = None max_length = -1 for name in contig_set: if contig_lengths[name] > max_length: longest_name = name max_length = contig_lengths[name] assert max_length != -1 assert longest_name is not None return longest_name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def check_files_exist(filenames): '''Dies if any files in the list of filenames does not exist''' files_not_found = [x for x in filenames if not os.path.exists(x)] if len(files_not_found): for filename in files_not_found: print('File not found: "', filename, '"', sep='', file=sys.stderr) raise Error('File(s) not found. Cannot continue')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_contigs(self): '''Returns a dictionary of contig_name -> pyfastaq.Sequences.Fasta object''' contigs = {} pyfastaq.tasks.file_to_dict(self.contigs_fasta, contigs) return contigs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def circular_contigs(self): '''Returns a set of the contig names that are circular''' if self.assembler == 'spades': if self.contigs_fastg is not None: return self._circular_contigs_from_spades_before_3_6_1(self.contigs_fastg) elif None not in [self.contigs_paths, self.assembly_graph_fastg]: return self._circular_contigs_from_spades_after_3_6_1(self.assembly_graph_fastg, self.contigs_paths) else: return set() elif self.assembler == 'canu': return self._circular_contigs_from_canu_gfa(self.contigs_gfa) else: return set()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _run_nucmer(self, ref, qry, outfile): '''Run nucmer of new assembly vs original assembly''' n = pymummer.nucmer.Runner( ref, qry, outfile, min_id=self.nucmer_min_id, min_length=self.nucmer_min_length, diagdiff=self.nucmer_diagdiff, maxmatch=True, breaklen=self.nucmer_breaklen, simplify=True, verbose=self.verbose ) n.run()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _load_nucmer_hits(self, infile): '''Returns dict ref name => list of nucmer hits from infile''' hits = {} file_reader = pymummer.coords_file.reader(infile) for al in file_reader: if al.ref_name not in hits: hits[al.ref_name] = [] hits[al.ref_name].append(al) return hits
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _is_at_ref_start(self, nucmer_hit): '''Returns True iff the hit is "close enough" to the start of the reference sequence''' hit_coords = nucmer_hit.ref_coords() return hit_coords.start < self.ref_end_tolerance
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _is_at_ref_end(self, nucmer_hit): '''Returns True iff the hit is "close enough" to the end of the reference sequence''' hit_coords = nucmer_hit.ref_coords() return hit_coords.end >= nucmer_hit.ref_length - self.ref_end_tolerance
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _is_at_qry_start(self, nucmer_hit): '''Returns True iff the hit is "close enough" to the start of the query sequence''' hit_coords = nucmer_hit.qry_coords() return hit_coords.start < self.qry_end_tolerance
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _is_at_qry_end(self, nucmer_hit): '''Returns True iff the hit is "close enough" to the end of the query sequence''' hit_coords = nucmer_hit.qry_coords() return hit_coords.end >= nucmer_hit.qry_length - self.qry_end_tolerance
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_hit_nearest_ref_start(self, hits): '''Returns the hit nearest to the start of the ref sequence from the input list of hits''' nearest_to_start = hits[0] for hit in hits[1:]: if hit.ref_coords().start < nearest_to_start.ref_coords().start: nearest_to_start = hit return nearest_to_start
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_hit_nearest_ref_end(self, hits): '''Returns the hit nearest to the end of the ref sequence from the input list of hits''' nearest_to_end = hits[0] for hit in hits[1:]: if hit.ref_coords().end > nearest_to_end.ref_coords().end: nearest_to_end = hit return nearest_to_end
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _has_qry_hit_longer_than(self, nucmer_hits, min_length, hits_to_exclude=None): '''Returns True iff list of nucmer_hits has a hit longer than min_length, not counting the hits in hits_to_exclude''' if hits_to_exclude is None: to_exclude = set() else: to_exclude = hits_to_exclude long_hits = [hit.hit_length_qry for hit in nucmer_hits if hit not in to_exclude and hit.hit_length_qry > min_length] return len(long_hits) > 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _can_circularise(self, start_hit, end_hit): '''Returns true iff the two hits can be used to circularise the reference sequence of the hits''' if not(self._is_at_ref_start(start_hit) or self._is_at_ref_end(end_hit)): return False if self._is_at_qry_end(start_hit) \ and self._is_at_qry_start(end_hit) \ and start_hit.on_same_strand() \ and end_hit.on_same_strand(): return True if self._is_at_qry_start(start_hit) \ and self._is_at_qry_end(end_hit) \ and (not start_hit.on_same_strand()) \ and (not end_hit.on_same_strand()): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _remove_keys_from_dict_with_nonunique_values(self, d, log_fh=None, log_outprefix=None): '''Returns a new dictionary, with keys from input dict removed if their value was not unique''' value_counts = collections.Counter(d.values()) new_d = {} writing_log_file = None not in [log_fh, log_outprefix] for key in d: if value_counts[d[key]] == 1: new_d[key] = d[key] elif writing_log_file: print(log_outprefix, 'Reject because non-unique:', d[key], sep='\t', file=log_fh) return new_d
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _make_circularised_contig(self, ref_start_hit, ref_end_hit): '''Given a nucmer ref_start_hit and ref_end_hit, returns a new contig. Assumes that these hits can be used to circularise the reference contig of the hits using the query contig''' assert ref_start_hit.ref_name == ref_end_hit.ref_name assert ref_start_hit.qry_name == ref_end_hit.qry_name qry_name = ref_start_hit.qry_name ref_name = ref_start_hit.ref_name ref_start_coords = ref_start_hit.ref_coords() ref_end_coords = ref_end_hit.ref_coords() if ref_start_coords.intersects(ref_end_coords): new_ctg = copy.copy(self.reassembly_contigs[qry_name]) new_ctg.id = ref_name return new_ctg if ref_start_hit.on_same_strand(): qry_start_coords = ref_end_hit.qry_coords() qry_end_coords = ref_start_hit.qry_coords() bases = self.original_contigs[ref_name][ref_start_coords.end+1:ref_end_coords.start] + \ self.reassembly_contigs[qry_name][qry_start_coords.start:qry_end_coords.end+1] return pyfastaq.sequences.Fasta(ref_name, bases) else: qry_start_coords = ref_start_hit.qry_coords() qry_end_coords = ref_end_hit.qry_coords() tmp_seq = pyfastaq.sequences.Fasta('x', self.reassembly_contigs[qry_name][qry_start_coords.start:qry_end_coords.end+1]) tmp_seq.revcomp() return pyfastaq.sequences.Fasta(ref_name, self.original_contigs[ref_name][ref_start_coords.end+1:ref_end_coords.start] + tmp_seq.seq)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _orientation_ok_to_bridge_contigs(self, start_hit, end_hit): '''Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits''' assert start_hit.qry_name == end_hit.qry_name if start_hit.ref_name == end_hit.ref_name: return False if ( (self._is_at_ref_end(start_hit) and start_hit.on_same_strand()) or (self._is_at_ref_start(start_hit) and not start_hit.on_same_strand()) ): start_hit_ok = True else: start_hit_ok = False if ( (self._is_at_ref_start(end_hit) and end_hit.on_same_strand()) or (self._is_at_ref_end(end_hit) and not end_hit.on_same_strand()) ): end_hit_ok = True else: end_hit_ok = False return start_hit_ok and end_hit_ok
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _merge_all_bridged_contigs(self, nucmer_hits, ref_contigs, qry_contigs, log_fh=None, log_outprefix=None): '''Input is dict of nucmer_hits. Makes any possible contig merges. Returns True iff any merges were made''' writing_log_file = None not in [log_fh, log_outprefix] if len(nucmer_hits) == 0: if writing_log_file: print(log_outprefix, 'No nucmer hits, so will not make any merges', sep='\t', file=log_fh) return all_nucmer_hits = [] for l in nucmer_hits.values(): all_nucmer_hits.extend(l) nucmer_hits_by_qry = self._hits_hashed_by_query(all_nucmer_hits) bridges = self._get_possible_query_bridging_contigs(nucmer_hits_by_qry, log_fh=log_fh, log_outprefix=log_outprefix) if writing_log_file: print(log_outprefix, '\tPotential contigs to use for merging: ', ' '.join(sorted(bridges.keys())), sep='', file=log_fh) bridges = self._filter_bridging_contigs(bridges) if writing_log_file: print(log_outprefix, '\tContigs to use for merging after uniqueness filtering: ', ' '.join(sorted(bridges.keys())), sep='', file=log_fh) merged = set() made_a_join = False for qry_name, (start_hit, end_hit) in bridges.items(): if start_hit.ref_name in merged or end_hit.ref_name in merged: continue self._merge_bridged_contig_pair(start_hit, end_hit, ref_contigs, qry_contigs, log_fh=log_fh, log_outprefix=log_outprefix) merged.add(start_hit.ref_name) merged.add(end_hit.ref_name) made_a_join = True if writing_log_file: print(log_outprefix, '\tMade at least one contig join: ', made_a_join, sep='', file=log_fh) return made_a_join
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _write_act_files(self, ref_fasta, qry_fasta, coords_file, outprefix): '''Writes crunch file and shell script to start up ACT, showing comparison of ref and qry''' if self.verbose: print('Making ACT files from', ref_fasta, qry_fasta, coords_file) ref_fasta = os.path.relpath(ref_fasta) qry_fasta = os.path.relpath(qry_fasta) coords_file = os.path.relpath(coords_file) outprefix = os.path.relpath(outprefix) self._index_fasta(ref_fasta) self._index_fasta(qry_fasta) crunch_file = outprefix + '.crunch' pymummer.coords_file.convert_to_msp_crunch( coords_file, crunch_file, ref_fai=ref_fasta + '.fai', qry_fai=qry_fasta + '.fai' ) bash_script = outprefix + '.start_act.sh' with open(bash_script, 'w') as f: print('#!/usr/bin/env bash', file=f) print('act', ref_fasta, crunch_file, qry_fasta, file=f) pyfastaq.utils.syscall('chmod +x ' + bash_script)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _contigs_dict_to_file(self, contigs, fname): '''Writes dictionary of contigs to file''' f = pyfastaq.utils.open_file_write(fname) for contig in sorted(contigs, key=lambda x:len(contigs[x]), reverse=True): print(contigs[contig], file=f) pyfastaq.utils.close(f)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_spades_circular_nodes(self, fastg): '''Returns set of names of nodes in SPAdes fastg file that are circular. Names will match those in spades fasta file''' seq_reader = pyfastaq.sequences.file_reader(fastg) names = set([x.id.rstrip(';') for x in seq_reader if ':' in x.id]) found_fwd = set() found_rev = set() for name in names: l = name.split(':') if len(l) != 2: continue if l[0] == l[1]: if l[0][-1] == "'": found_rev.add(l[0][:-1]) else: found_fwd.add(l[0]) return found_fwd.intersection(found_rev)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _make_new_contig_from_nucmer_and_spades(self, original_contig, hits, circular_spades, log_fh=None, log_outprefix=None): '''Tries to make new circularised contig from contig called original_contig. hits = list of nucmer hits, all with ref=original contg. circular_spades=set of query contig names that spades says are circular''' writing_log_file = None not in [log_fh, log_outprefix] hits_to_circular_contigs = [x for x in hits if x.qry_name in circular_spades] if len(hits_to_circular_contigs) == 0: if writing_log_file: print(log_outprefix, original_contig, 'No matches to SPAdes circular contigs', sep='\t', file=log_fh) return None, None for hit in hits_to_circular_contigs: print(log_outprefix, original_contig, 'Checking hit:', hit, sep='\t', file=log_fh) percent_query_covered = 100 * (hit.hit_length_qry / hit.qry_length) if self.min_spades_circular_percent <= percent_query_covered: print(log_outprefix, '\t', original_contig, '\t\tHit is long enough. Percent of contig covered by hit is ', percent_query_covered, sep='', file=log_fh) # the spades contig hit is long enough, but now check that # the input contig is covered by hits from this spades contig hit_intervals = [x.ref_coords() for x in hits_to_circular_contigs if x.qry_name == hit.qry_name] if len(hit_intervals) > 0: pyfastaq.intervals.merge_overlapping_in_list(hit_intervals) percent_covered = 100 * pyfastaq.intervals.length_sum_from_list(hit_intervals) / hit.ref_length if writing_log_file: print(log_outprefix, '\t', original_contig, '\t\treference bases covered by spades contig:', ', '.join([str(x) for x in hit_intervals]), sep='', file=log_fh) print(log_outprefix, '\t', original_contig, '\t\t ... which is ', percent_covered, ' percent of ', hit.ref_length, ' bases', sep='', file=log_fh) if self.min_spades_circular_percent <= percent_covered: if writing_log_file: print(log_outprefix, original_contig, '\tUsing hit to call as circular (enough bases covered)', sep='\t', file=log_fh) return pyfastaq.sequences.Fasta(original_contig, self.reassembly_contigs[hit.qry_name].seq), hit.qry_name elif writing_log_file: print(log_outprefix, original_contig, '\tNot using hit to call as circular (not enough bases covered)', sep='\t', file=log_fh) else: print(log_outprefix, original_contig, '\tNot using hit to call as circular (hit too short)', sep='\t', file=log_fh) if writing_log_file: print(log_outprefix, original_contig, 'No suitable matches to SPAdes circular contigs', sep='\t', file=log_fh) return None, None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_hunk(self, header, diff, encoding): """Parse hunk details."""
header_info = RE_HUNK_HEADER.match(header) hunk_info = header_info.groups() hunk = Hunk(*hunk_info) source_line_no = hunk.source_start target_line_no = hunk.target_start expected_source_end = source_line_no + hunk.source_length expected_target_end = target_line_no + hunk.target_length for diff_line_no, line in diff: if encoding is not None: line = line.decode(encoding) valid_line = RE_HUNK_EMPTY_BODY_LINE.match(line) if not valid_line: valid_line = RE_HUNK_BODY_LINE.match(line) if not valid_line: raise UnidiffParseError('Hunk diff line expected: %s' % line) line_type = valid_line.group('line_type') if line_type == LINE_TYPE_EMPTY: line_type = LINE_TYPE_CONTEXT value = valid_line.group('value') original_line = Line(value, line_type=line_type) if line_type == LINE_TYPE_ADDED: original_line.target_line_no = target_line_no target_line_no += 1 elif line_type == LINE_TYPE_REMOVED: original_line.source_line_no = source_line_no source_line_no += 1 elif line_type == LINE_TYPE_CONTEXT: original_line.target_line_no = target_line_no target_line_no += 1 original_line.source_line_no = source_line_no source_line_no += 1 elif line_type == LINE_TYPE_NO_NEWLINE: pass else: original_line = None # stop parsing if we got past expected number of lines if (source_line_no > expected_source_end or target_line_no > expected_target_end): raise UnidiffParseError('Hunk is longer than expected') if original_line: original_line.diff_line_no = diff_line_no hunk.append(original_line) # if hunk source/target lengths are ok, hunk is complete if (source_line_no == expected_source_end and target_line_no == expected_target_end): break # report an error if we haven't got expected number of lines if (source_line_no < expected_source_end or target_line_no < expected_target_end): raise UnidiffParseError('Hunk is shorter than expected') self.append(hunk)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def path(self): """Return the file path abstracted from VCS."""
if (self.source_file.startswith('a/') and self.target_file.startswith('b/')): filepath = self.source_file[2:] elif (self.source_file.startswith('a/') and self.target_file == '/dev/null'): filepath = self.source_file[2:] elif (self.target_file.startswith('b/') and self.source_file == '/dev/null'): filepath = self.target_file[2:] else: filepath = self.source_file return filepath
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_tag_value(string, pre, post, tagtype=float, greedy=True): """ Extracts the value of a tag from a string. Parameters pre : str regular expression to match before the the tag value post : str | list | tuple regular expression to match after the the tag value if list than the regular expressions will be combined into the regular expression (?=post[0]|post[1]|..) tagtype : str | float | int the type to which the tag value should be converted to greedy : bool Whether the regular expression is gredy or not. Returns Tag value if found, None otherwise Example get_tag_value('PID_23.5.txt', pre=r'PID_' , post='(?=_|\.txt)') should return 23.5 get_tag_value('PID_23.5_.txt', pre=r'PID_', post='(?=_|\.txt)') should return 23.5 get_tag_value('PID_23_5_.txt', pre=r'PID_', post='(?=_|\.txt)') should return 23 get_tag_value('PID_23.txt', pre=r'PID_', post='.txt') should return 23 get_tag_value('PID.txt', pre=r'PID_', post='.txt') should return None TODO Make list/tuple input for pre """
greedy = '?' if greedy else '' # For greedy search if isinstance(post, (list, tuple)): post = '(?=' + '|'.join(post) + ')' tag_list = re.findall(r'{pre}(.+{greedy}){post}'.format(pre=pre, post=post, greedy=greedy), string) if len(tag_list) > 1: raise ValueError('More than one matching pattern found... check filename') elif len(tag_list) == 0: return None else: return tagtype(tag_list[0])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_files(dirname=None, pattern='*.*', recursive=True): """ Get all file names within a given directory those names match a given pattern. Parameters dirname : str | None Directory containing the datafiles. If None is given, open a dialog box. pattern : str Return only files whose names match the specified pattern. recursive : bool True : Search recursively within all sub-directories. False : Search only in given directory. Returns ------- matches: list List of file names (including full path). """
# get dirname from user if not given if dirname is None: from FlowCytometryTools.gui import dialogs dirname = dialogs.select_directory_dialog('Select a directory') # find all files in dirname that match pattern if recursive: # search subdirs matches = [] for root, dirnames, filenames in os.walk(dirname): for filename in fnmatch.filter(filenames, pattern): matches.append(os.path.join(root, filename)) else: matches = glob.glob(os.path.join(dirname, pattern)) return matches
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(path): """ Load pickled object from the specified file path. Parameters path : string File path Returns ------- unpickled : type of object stored in file """
f = open(path, 'rb') try: return pickle.load(f) finally: f.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_iter(obj): """Convert an object to a list if it is not already an iterable. Nones are returned unaltered. This is an awful function that proliferates an explosion of types, please do not use anymore. """
if isinstance(obj, type(None)): return None elif isinstance(obj, six.string_types): return [obj] else: # Nesting here since symmetry is broken in isinstance checks. # Strings are iterables in python 3, so the relative order of if statements is important. if isinstance(obj, collections.Iterable): return obj else: return [obj]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_list(obj): """ Converts an object into a list if it not an iterable, forcing tuples into lists. Nones are returned unchanged. """
obj = to_iter(obj) if isinstance(obj, type(None)): return None else: return list(obj)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy(self, deep=True): """ Make a copy of this object Parameters deep : boolean, default True Make a deep copy, i.e. also copy data Returns ------- copy : type of caller """
from copy import copy, deepcopy if deep: return deepcopy(self) else: return copy(self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tlog(x, th=1, r=_display_max, d=_l_mmax): """ Truncated log10 transform. Parameters x : num | num iterable values to be transformed. th : num values below th are transormed to 0. Must be positive. r : num (default = 10**4) maximal transformed value. d : num (default = log10(2**18)) log10 of maximal possible measured value. tlog(10**d) = r Returns ------- Array of transformed values. """
if th <= 0: raise ValueError('Threshold value must be positive. %s given.' % th) return where(x <= th, log10(th) * 1. * r / d, log10(x) * 1. * r / d)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tlog_inv(y, th=1, r=_display_max, d=_l_mmax): """ Inverse truncated log10 transform. Values Parameters y : num | num iterable values to be transformed. th : num Inverse values below th are transormed to th. Must be > positive. r : num (default = 10**4) maximal transformed value. d : num (default = log10(2**18)) log10 of maximal possible measured value. tlog_inv(r) = 10**d Returns ------- Array of transformed values. """
if th <= 0: raise ValueError('Threshold value must be positive. %s given.' % th) x = 10 ** (y * 1. * d / r) try: x[x < th] = th except TypeError: if x < th: x = th return x
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hlog_inv(y, b=500, r=_display_max, d=_l_mmax): """ Inverse of base 10 hyperlog transform. """
aux = 1. * d / r * y s = sign(y) if s.shape: # to catch case where input is a single number s[s == 0] = 1 elif s == 0: s = 1 return s * 10 ** (s * aux) + b * aux - s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _x_for_spln(x, nx, log_spacing): """ Create vector of values to be used in constructing a spline. Parameters x : num | num iterable Resulted values will span the range [min(x), max(x)] nx : int Length of returned vector. log_spacing: bool False - Create linearly spaced values. True - Create logarithmically spaced values. To extend to negative values, the spacing is done separately on the negative and positive range, and these are later combined. The number of points in the negative/positive range is proportional to their relative range in log space. i.e., for data in the range [-100, 1000] 2/5 of the resulting points will be in the negative range. Returns ------- x_spln : array """
x = asarray(x) xmin = min(x) xmax = max(x) if xmin == xmax: return asarray([xmin] * nx) if xmax <= 0: # all values<=0 return -_x_for_spln(-x, nx, log_spacing)[::-1] if not log_spacing: return linspace(xmin, xmax, nx) # All code below is to handle-log-spacing when x has potentially both negative # and positive values. if xmin > 0: return logspace(log10(xmin), log10(xmax), nx) else: lxmax = max([log10(xmax), 0]) lxmin = max([log10(abs(xmin)), 0]) # All the code below is for log-spacing, when xmin < 0 and xmax > 0 if lxmax == 0 and lxmin == 0: return linspace(xmin, xmax, nx) # Use linear spacing as fallback if xmin > 0: x_spln = logspace(lxmin, lxmax, nx) elif xmin == 0: x_spln = r_[0, logspace(-1, lxmax, nx - 1)] else: # (xmin < 0) f = lxmin / (lxmin + lxmax) nx_neg = int(f * nx) nx_pos = nx - nx_neg if nx <= 1: # If triggered fix edge case behavior raise AssertionError(u'nx should never bebe 0 or 1') # Work-around various edge cases if nx_neg == 0: nx_neg = 1 nx_pos = nx_pos - 1 if nx_pos == 0: nx_pos = 1 nx_neg = nx_neg - 1 x_spln_pos = logspace(-1, lxmax, nx_pos) x_spln_neg = -logspace(lxmin, -1, nx_neg) x_spln = r_[x_spln_neg, x_spln_pos] return x_spln
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _make_hlog_numeric(b, r, d): """ Return a function that numerically computes the hlog transformation for given parameter values. """
hlog_obj = lambda y, x, b, r, d: hlog_inv(y, b, r, d) - x find_inv = vectorize(lambda x: brentq(hlog_obj, -2 * r, 2 * r, args=(x, b, r, d))) return find_inv
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hlog(x, b=500, r=_display_max, d=_l_mmax): """ Base 10 hyperlog transform. Parameters x : num | num iterable values to be transformed. b : num Parameter controling the location of the shift from linear to log transformation. r : num (default = 10**4) maximal transformed value. d : num (default = log10(2**18)) log10 of maximal possible measured value. hlog_inv(r) = 10**d Returns ------- Array of transformed values. """
hlog_fun = _make_hlog_numeric(b, r, d) if not hasattr(x, '__len__'): # if transforming a single number y = hlog_fun(x) else: n = len(x) if not n: # if transforming empty container return x else: y = hlog_fun(x) return y
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform_frame(frame, transform, columns=None, direction='forward', return_all=True, args=(), **kwargs): """ Apply transform to specified columns. direction: 'forward' | 'inverse' return_all: bool True - return all columns, with specified ones transformed. False - return only specified columns. .. warning:: deprecated """
tfun, tname = parse_transform(transform, direction) columns = to_list(columns) if columns is None: columns = frame.columns if return_all: transformed = frame.copy() for c in columns: transformed[c] = tfun(frame[c], *args, **kwargs) else: transformed = frame.filter(columns).apply(tfun, *args, **kwargs) return transformed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform(self, x, use_spln=False, **kwargs): """ Apply transform to x Parameters x : float-array-convertible Data to be transformed. Should support conversion to an array of floats. use_spln: bool True - transform using the spline specified in self.slpn. If self.spln is None, set the spline. False - transform using self.tfun kwargs: Keyword arguments to be passed to self.set_spline. Only used if use_spln=True & self.spln=None. Returns ------- Array of transformed values. """
x = asarray(x, dtype=float) if use_spln: if self.spln is None: self.set_spline(x.min(), x.max(), **kwargs) return apply_along_axis(self.spln, 0, x) else: return self.tfun(x, *self.args, **self.kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pypi_register(server='pypitest'): """Register and prep user for PyPi upload. .. note:: May need to weak ~/.pypirc file per issue: http://stackoverflow.com/questions/1569315 """
base_command = 'python setup.py register' if server == 'pypitest': command = base_command + ' -r https://testpypi.python.org/pypi' else: command = base_command _execute_setup_command(command)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apply_format(var, format_str): """Format all non-iterables inside of the iterable var using the format_str Example: will return ['2.0', ['1.0', '4.0'], '4.0', '1.0'] """
if isinstance(var, (list, tuple)): new_var = map(lambda x: apply_format(x, format_str), var) if isinstance(var, tuple): new_var = '(' + ', '.join(new_var) + ')' elif isinstance(var, list): new_var = '[' + ', '.join(new_var) + ']' return '{}'.format(new_var) else: return format_str.format(var)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_spawnable(source_channels, target_channels): """Check whether gate is spawnable on the target channels."""
if len(target_channels) != len(set(target_channels)): raise Exception('Spawn channels must be unique') return source_channels.issubset( set(target_channels))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def key_press_handler(event, canvas, toolbar=None): """ Handles keyboard shortcuts for the FCToolbar. """
if event.key is None: return key = event.key.encode('ascii', 'ignore') if key in ['1']: toolbar.create_gate_widget(kind='poly') elif key in ['2', '3', '4']: kind = {'2': 'quad', '3': 'horizontal threshold', '4': 'vertical threshold'}[key] toolbar.create_gate_widget(kind=kind) elif key in ['9']: toolbar.remove_active_gate() elif key in ['0']: toolbar.load_fcs() elif key in ['a']: toolbar.set_axes(('d1', 'd2'), pl.gca()) elif key in ['b']: toolbar.set_axes(('d2', 'd1'), pl.gca()) elif key in ['c']: toolbar.set_axes(('d1', 'd3'), pl.gca()) elif key in ['8']: print(toolbar.get_generation_code())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_callback(self, func): """ Registers a call back function """
if func is None: return func_list = to_list(func) if not hasattr(self, 'callback_list'): self.callback_list = func_list else: self.callback_list.extend(func_list)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_artist(self): """ decides whether the artist should be visible or not in the current axis current_axis : names of x, y axis """
verts = self.coordinates if not self.tracky: trans = self.ax.get_xaxis_transform(which='grid') elif not self.trackx: trans = self.ax.get_yaxis_transform(which='grid') else: trans = self.ax.transData self.artist = pl.Line2D([verts[0]], [verts[1]], transform=trans, picker=15) self.update_looks('inactive') self.ax.add_artist(self.artist)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ignore(self, event): """ Ignores events. """
if hasattr(event, 'inaxes'): if event.inaxes != self.ax: return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def spawn(self, channels, ax): """ Spawns a graphical gate that can be used to update the coordinates of the current gate. """
if _check_spawnable(self.source_channels, channels): sgate = self.gate_type(self.verts, ax, channels) self.spawn_list.append(sgate) return sgate else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_spawned_gates(self, spawn_gate=None): """ Removes all spawned gates. """
if spawn_gate is None: for sg in list(self.spawn_list): self.spawn_list.remove(sg) sg.remove() else: spawn_gate.remove() self.spawn_list.remove(spawn_gate)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_generation_code(self, **gencode): """ Generates python code that can create the gate. """
channels, verts = self.coordinates channels = ', '.join(["'{}'".format(ch) for ch in channels]) verts = list(verts) ## Formatting the vertexes # List level (must be first), used for gates that may have multiple vertexes like a polygon if len(verts) == 1: verts = verts[0] # Tuple level (must be second), used for catching the number of dimensions # on which a vertex is defined if len(verts) == 1: verts = verts[0] # Format vertices to include less sigfigs verts = apply_format(verts, '{:.3e}') gencode.setdefault('name', self.name) gencode.setdefault('region', self.region) gencode.setdefault('gate_type', self._gencode_gate_class) gencode.setdefault('verts', verts) gencode.setdefault('channels', channels) format_string = "{name} = {gate_type}({verts}, ({channels}), region='{region}', name='{name}')" return format_string.format(**gencode)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _gencode_gate_class(self): """ Returns the class name that generates this gate. """
channels, verts = self.coordinates num_channels = len(channels) gate_type_name = self.gate_type.__name__ if gate_type_name == 'ThresholdGate' and num_channels == 2: gate_type_name = 'QuadGate' return gate_type_name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def source_channels(self): """ Returns a set describing the source channels on which the gate is defined. """
source_channels = [v.coordinates.keys() for v in self.verts] return set(itertools.chain(*source_channels))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pick_event_handler(self, event): """ Handles pick events """
info = {'options': self.get_available_channels(), 'guiEvent': event.mouseevent.guiEvent, } if hasattr(self, 'xlabel_artist') and (event.artist == self.xlabel_artist): info['axis_num'] = 0 self.callback(Event('axis_click', info)) if hasattr(self, 'ylabel_artist') and (event.artist == self.ylabel_artist): info['axis_num'] = 1 self.callback(Event('axis_click', info))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_data(self): """Plots the loaded data"""
# Clear the plot before plotting onto it self.ax.cla() if self.sample is None: return if self.current_channels is None: self.current_channels = self.sample.channel_names[:2] channels = self.current_channels channels_to_plot = channels[0] if len(channels) == 1 else channels self.sample.plot(channels_to_plot, ax=self.ax) xaxis = self.ax.get_xaxis() yaxis = self.ax.get_yaxis() self.xlabel_artist = xaxis.get_label() self.ylabel_artist = yaxis.get_label() self.xlabel_artist.set_picker(5) self.ylabel_artist.set_picker(5) self.fig.canvas.draw()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_generation_code(self): """Return python code that generates all drawn gates."""
if len(self.gates) < 1: code = '' else: import_list = set([gate._gencode_gate_class for gate in self.gates]) import_list = 'from FlowCytometryTools import ' + ', '.join(import_list) code_list = [gate.get_generation_code() for gate in self.gates] code_list.sort() code_list = '\n'.join(code_list) code = import_list + 2 * '\n' + code_list self.callback(Event('generated_code', {'code': code})) return code
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def replace(self): """Reformat values inside the self.doc_dict using self.doc_dict TODO: Make support for partial_formatting """
doc_dict = self.doc_dict.copy() for k, v in doc_dict.items(): if '{' and '}' in v: self.doc_dict[k] = v.format(**doc_dict)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _format(self, doc): """ Formats the docstring using self.doc_dict """
if self.allow_partial_formatting: mapping = FormatDict(self.doc_dict) else: mapping = self.doc_dict formatter = string.Formatter() return formatter.vformat(doc, (), mapping)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_package_version(path): '''Extracts the version''' with open(VERSION_FILE, "rt") as f: verstrline = f.read() VERSION = r"^version = ['\"]([^'\"]*)['\"]" results = re.search(VERSION, verstrline, re.M) if results: version = results.group(1) else: raise RuntimeError("Unable to find version string in {}.".format(path)) return version
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _assign_IDS_to_datafiles(datafiles, parser, measurement_class=None, **kwargs): """ Assign measurement IDS to datafiles using specified parser. Parameters datafiles : iterable of str Path to datafiles. An ID will be assigned to each. Note that this function does not check for uniqueness of IDs! {_bases_filename_parser} measurement_class: object Used to create a temporary object when reading the ID from the datafile. The measurement class needs to have an `ID_from_data` method. Only used when parser='read'. kwargs: dict Additional parameters to be passed to parser is it is a callable, or 'read'. If parser is 'read', kwargs are passed to the measurement class's `ID_from_data` method. Returns ------- Dict of ID:datafile """
if isinstance(parser, collections.Mapping): fparse = lambda x: parser[x] elif hasattr(parser, '__call__'): fparse = lambda x: parser(x, **kwargs) elif parser == 'name': kwargs.setdefault('pre', 'Well_') kwargs.setdefault('post', ['_', '\.', '$']) kwargs.setdefault('tagtype', str) fparse = lambda x: get_tag_value(os.path.basename(x), **kwargs) elif parser == 'number': fparse = lambda x: int(x.split('.')[-2]) elif parser == 'read': fparse = lambda x: measurement_class(ID='temporary', datafile=x).ID_from_data(**kwargs) else: raise ValueError('Encountered unsupported value "%s" for parser parameter.' % parser) d = dict((fparse(dfile), dfile) for dfile in datafiles) return d
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def set_data(self, data=None, **kwargs): ''' Read data into memory, applying all actions in queue. Additionally, update queue and history. ''' if data is None: data = self.get_data(**kwargs) setattr(self, '_data', data) self.history += self.queue self.queue = []
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def set_meta(self, meta=None, **kwargs): ''' Assign values to self.meta. Meta is not returned ''' if meta is None: meta = self.get_meta(**kwargs) setattr(self, '_meta', meta)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_data(self, **kwargs): ''' Get the measurement data. If data is not set, read from 'self.datafile' using 'self.read_data'. ''' if self.queue: new = self.apply_queued() return new.get_data() else: return self._get_attr_from_file('data', **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apply(self, func, applyto='measurement', noneval=nan, setdata=False): """ Apply func either to self or to associated data. If data is not already parsed, try and read it. Parameters func : callable The function either accepts a measurement object or an FCS object. Does some calculation and returns the result. applyto : ['data' | 'measurement'] * 'data' : apply to associated data * 'measurement' : apply to measurement object itself. noneval : obj Value to return if `applyto` is 'data', but no data is available. setdata : bool Used only if data is not already set. If true parsed data will be assigned to self.data Otherwise data will be discarded at end of apply. """
applyto = applyto.lower() if applyto == 'data': if self.data is not None: data = self.data elif self.datafile is None: return noneval else: data = self.read_data() if setdata: self.data = data return func(data) elif applyto == 'measurement': return func(self) else: raise ValueError('Encountered unsupported value "%s" for applyto parameter.' % applyto)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_files(cls, ID, datafiles, parser, readdata_kwargs={}, readmeta_kwargs={}, **ID_kwargs): """ Create a Collection of measurements from a set of data files. Parameters {_bases_ID} {_bases_data_files} {_bases_filename_parser} {_bases_ID_kwargs} """
d = _assign_IDS_to_datafiles(datafiles, parser, cls._measurement_class, **ID_kwargs) measurements = [] for sID, dfile in d.items(): try: measurements.append(cls._measurement_class(sID, datafile=dfile, readdata_kwargs=readdata_kwargs, readmeta_kwargs=readmeta_kwargs)) except: msg = 'Error occurred while trying to parse file: %s' % dfile raise IOError(msg) return cls(ID, measurements)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter(self, criteria, applyto='measurement', ID=None): """ Filter measurements according to given criteria. Retain only Measurements for which criteria returns True. TODO: add support for multiple criteria Parameters criteria : callable Returns bool. applyto : 'measurement' | 'keys' | 'data' | mapping 'measurement' : criteria is applied to Measurement objects 'keys' : criteria is applied to the keys. 'data' : criteria is applied to the Measurement objects' data. mapping : for each key criteria is applied to mapping value with same key. ID : str ID of the filtered collection. If None is given, append '.filterd' to the current sample ID. Returns ------- Filtered Collection. """
fil = criteria new = self.copy() if isinstance(applyto, collections.Mapping): remove = (k for k, v in self.items() if not fil(applyto[k])) elif applyto == 'measurement': remove = (k for k, v in self.items() if not fil(v)) elif applyto == 'keys': remove = (k for k, v in self.items() if not fil(k)) elif applyto == 'data': remove = (k for k, v in self.items() if not fil(v.get_data())) else: raise ValueError('Unsupported value "%s" for applyto parameter.' % applyto) for r in remove: del new[r] if ID is None: ID = self.ID new.ID = ID return new
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_by_key(self, keys, ID=None): """ Keep only Measurements with given keys. """
keys = to_list(keys) fil = lambda x: x in keys if ID is None: ID = self.ID return self.filter(fil, applyto='keys', ID=ID)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_by_IDs(self, ids, ID=None): """ Keep only Measurements with given IDs. """
fil = lambda x: x in ids return self.filter_by_attr('ID', fil, ID)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_by_rows(self, rows, ID=None): """ Keep only Measurements in corresponding rows. """
rows = to_list(rows) fil = lambda x: x in rows applyto = {k: self._positions[k][0] for k in self.keys()} if ID is None: ID = self.ID return self.filter(fil, applyto=applyto, ID=ID)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_by_cols(self, cols, ID=None): """ Keep only Measurements in corresponding columns. """
rows = to_list(cols) fil = lambda x: x in rows applyto = {k: self._positions[k][1] for k in self.keys()} if ID is None: ID = self.ID + '.filtered_by_cols' return self.filter(fil, applyto=applyto, ID=ID)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_files(cls, ID, datafiles, parser='name', position_mapper=None, readdata_kwargs={}, readmeta_kwargs={}, ID_kwargs={}, **kwargs): """ Create an OrderedCollection of measurements from a set of data files. Parameters {_bases_ID} {_bases_data_files} {_bases_filename_parser} {_bases_position_mapper} {_bases_ID_kwargs} kwargs : dict Additional key word arguments to be passed to constructor. """
if position_mapper is None: if isinstance(parser, six.string_types): position_mapper = parser else: msg = "When using a custom parser, you must specify the position_mapper keyword." raise ValueError(msg) d = _assign_IDS_to_datafiles(datafiles, parser, cls._measurement_class, **ID_kwargs) measurements = [] for sID, dfile in d.items(): try: measurements.append(cls._measurement_class(sID, datafile=dfile, readdata_kwargs=readdata_kwargs, readmeta_kwargs=readmeta_kwargs)) except: msg = 'Error occured while trying to parse file: %s' % dfile raise IOError(msg) return cls(ID, measurements, position_mapper, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _is_valid_position(self, position): ''' check if given position is valid for this collection ''' row, col = position valid_r = row in self.row_labels valid_c = col in self.col_labels return valid_r and valid_c
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _get_ID2position_mapper(self, position_mapper): ''' Defines a position parser that is used to map between sample IDs and positions. Parameters -------------- {_bases_position_mapper} TODO: Fix the name to work with more than 26 letters of the alphabet. ''' def num_parser(x, order): i, j = unravel_index(int(x - 1), self.shape, order=order) return (self.row_labels[i], self.col_labels[j]) if hasattr(position_mapper, '__call__'): mapper = position_mapper elif isinstance(position_mapper, collections.Mapping): mapper = lambda x: position_mapper[x] elif position_mapper == 'name': mapper = lambda x: (x[0], int(x[1:])) elif position_mapper in ('row_first_enumerator', 'number'): mapper = lambda x: num_parser(x, 'F') elif position_mapper == 'col_first_enumerator': mapper = lambda x: num_parser(x, 'C') else: msg = '"{}" is not a known key_to_position_parser.'.format(position_mapper) raise ValueError(msg) return mapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def set_positions(self, positions=None, position_mapper='name', ids=None): ''' checks for position validity & collisions, but not that all measurements are assigned. Parameters ----------- positions : is dict-like of measurement_key:(row,col) parser : callable - gets key and returns position mapping - key:pos 'name' - parses things like 'A1', 'G12' 'number' - converts number to positions, going over rows first. ids : parser will be applied to specified ids only. If None is given, parser will be applied to all measurements. TODO: output a more informative message for position collisions ''' if positions is None: if ids is None: ids = self.keys() else: ids = to_list(ids) mapper = self._get_ID2position_mapper(position_mapper) positions = dict((ID, mapper(ID)) for ID in ids) else: pass # check that resulting assignment is unique (one measurement per position) temp = self._positions.copy() temp.update(positions) if not len(temp.values()) == len(set(temp.values())): msg = 'A position can only be occupied by a single measurement' raise Exception(msg) for k, pos in positions.items(): if not self._is_valid_position(pos): msg = 'Position {} is not supported for this collection'.format(pos) raise ValueError(msg) self._positions[k] = pos self[k]._set_position(self.ID, pos)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_positions(self, copy=True): ''' Get a dictionary of measurement positions. ''' if copy: return self._positions.copy() else: return self._positions
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def dropna(self): ''' Remove rows and cols that have no assigned measurements. Return new instance. ''' new = self.copy() tmp = self._dict2DF(self, nan, True) new.row_labels = list(tmp.index) new.col_labels = list(tmp.columns) return new
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate_input(self): """Raise appropriate exception if gate was defined incorrectly."""
if self.vert[1] <= self.vert[0]: raise ValueError(u'{} must be larger than {}'.format(self.vert[1], self.vert[0]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _identify(self, dataframe): """Return bool series which is True for indexes that 'pass' the gate"""
idx = ((dataframe[self.channels[0]] <= self.vert[1]) & (dataframe[self.channels[0]] >= self.vert[0])) if self.region == 'out': idx = ~idx return idx
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_paths(): """Generate paths to test data. Done in a function to protect namespace a bit."""
import os base_path = os.path.dirname(os.path.abspath(__file__)) test_data_dir = os.path.join(base_path, 'tests', 'data', 'Plate01') test_data_file = os.path.join(test_data_dir, 'RFP_Well_A3.fcs') return test_data_dir, test_data_file
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def upload_prev(ver, doc_root='./'): 'push a copy of older release to appropriate version directory' local_dir = doc_root + 'build/html' remote_dir = '/usr/share/nginx/pandas/pandas-docs/version/%s/' % ver cmd = 'cd %s; rsync -avz . pandas@pandas.pydata.org:%s -essh' cmd = cmd % (local_dir, remote_dir) print cmd if os.system(cmd): raise SystemExit( 'Upload to %s from %s failed' % (remote_dir, local_dir)) local_dir = doc_root + 'build/latex' pdf_cmd = 'cd %s; scp pandas.pdf pandas@pandas.pydata.org:%s' pdf_cmd = pdf_cmd % (local_dir, remote_dir) if os.system(pdf_cmd): raise SystemExit('Upload PDF to %s from %s failed' % (ver, doc_root))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plotFCM(data, channel_names, kind='histogram', ax=None, autolabel=True, xlabel_kwargs={}, ylabel_kwargs={}, colorbar=False, grid=False, **kwargs): """ Plots the sample on the current axis. Follow with a call to matplotlibs show() in order to see the plot. Parameters data : DataFrame {graph_plotFCM_pars} {common_plot_ax} Returns ------- The output of the plot command used """
if ax == None: ax = pl.gca() xlabel_kwargs.setdefault('size', 16) ylabel_kwargs.setdefault('size', 16) channel_names = to_list(channel_names) if len(channel_names) == 1: # 1D so histogram plot kwargs.setdefault('color', 'gray') kwargs.setdefault('histtype', 'stepfilled') kwargs.setdefault('bins', 200) # Do not move above x = data[channel_names[0]].values if len(x) >= 1: if (len(x) == 1) and isinstance(kwargs['bins'], int): # Only needed for hist (not hist2d) due to hist function doing # excessive input checking warnings.warn("One of the data sets only has a single event. " "This event won't be plotted unless the bin locations" " are explicitly provided to the plotting function. ") return None plot_output = ax.hist(x, **kwargs) else: return None elif len(channel_names) == 2: x = data[channel_names[0]].values # value of first channel y = data[channel_names[1]].values # value of second channel if len(x) == 0: # Don't draw a plot if there's no data return None if kind == 'scatter': kwargs.setdefault('edgecolor', 'none') plot_output = ax.scatter(x, y, **kwargs) elif kind == 'histogram': kwargs.setdefault('bins', 200) # Do not move above kwargs.setdefault('cmin', 1) kwargs.setdefault('cmap', pl.cm.copper) kwargs.setdefault('norm', matplotlib.colors.LogNorm()) plot_output = ax.hist2d(x, y, **kwargs) mappable = plot_output[-1] if colorbar: pl.colorbar(mappable, ax=ax) else: raise ValueError("Not a valid plot type. Must be 'scatter', 'histogram'") else: raise ValueError('Received an unexpected number of channels: "{}"'.format(channel_names)) pl.grid(grid) if autolabel: y_label_text = 'Counts' if len(channel_names) == 1 else channel_names[1] ax.set_xlabel(channel_names[0], **xlabel_kwargs) ax.set_ylabel(y_label_text, **ylabel_kwargs) return plot_output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def autoscale_subplots(subplots=None, axis='both'): """ Sets the x and y axis limits for each subplot to match the x and y axis limits of the most extreme data points encountered. The limits are set to the same values for all subplots. Parameters subplots : ndarray or list of matplotlib.axes.Axes axis : ['x' | 'y' | 'both' / 'xy' / 'yx' | 'none' / ''] 'x' : autoscales the x axis 'y' : autoscales the y axis 'both', 'xy', 'yx' : autoscales both axis 'none', '' : autoscales nothing """
axis_options = ('x', 'y', 'both', 'none', '', 'xy', 'yx') if axis.lower() not in axis_options: raise ValueError('axis must be in {0}'.format(axis_options)) if subplots is None: subplots = plt.gcf().axes data_limits = [(ax.xaxis.get_data_interval(), ax.yaxis.get_data_interval()) for loc, ax in numpy.ndenumerate(subplots)] # TODO: Make a proper iterator xlims, ylims = zip(*data_limits) xmins_list, xmaxs_list = zip(*xlims) ymins_list, ymaxs_list = zip(*ylims) xmin = numpy.min(xmins_list) xmax = numpy.max(xmaxs_list) ymin = numpy.min(ymins_list) ymax = numpy.max(ymaxs_list) for loc, ax in numpy.ndenumerate(subplots): if axis in ('x', 'both', 'xy', 'yx'): ax.set_xlim((xmin, xmax)) if axis in ('y', 'both', 'xy', 'yx'): ax.set_ylim((ymin, ymax))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def scale_subplots(subplots=None, xlim='auto', ylim='auto'): """Set the x and y axis limits for a collection of subplots. Parameters subplots : ndarray or list of matplotlib.axes.Axes xlim : None | 'auto' | (xmin, xmax) 'auto' : sets the limits according to the most extreme values of data encountered. ylim : None | 'auto' | (ymin, ymax) """
auto_axis = '' if xlim == 'auto': auto_axis += 'x' if ylim == 'auto': auto_axis += 'y' autoscale_subplots(subplots, auto_axis) for loc, ax in numpy.ndenumerate(subplots): if 'x' not in auto_axis: ax.set_xlim(xlim) if 'y' not in auto_axis: ax.set_ylim(ylim)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _plot_table(matrix, text_format='{:.2f}', cmap=None, **kwargs): """ Plot a numpy matrix as a table. Uses the current axis bounding box to decide on limits. text_format specifies the formatting to apply to the values. Parameters matrix : ndarray text_format : str Indicates how to format the the values text_format = {:.2} -> keeps all digits until the first 2 significant digits past the decimal text_format = {:.2f} -> keeps only 2 digits past the decimal cmap : None | colormap if a colormap is provided, this colormap will be used to choose the color of the text. **kwargs : all other arguments passed to plt.text function Examples plot_table(numpy.random.random((3,3)) plt.show() """
shape = matrix.shape xtick_pos = numpy.arange(shape[1]) ytick_pos = numpy.arange(shape[0]) xtick_grid, ytick_grid = numpy.meshgrid(xtick_pos, ytick_pos) vmax = numpy.nanmax(matrix) vmin = numpy.nanmin(matrix) if not kwargs.get('color', None) and cmap is not None: use_cmap = True norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax, clip=False) else: use_cmap = False for (row, col), w in numpy.ndenumerate(matrix): x = xtick_grid[row, col] y = ytick_grid[row, col] if use_cmap: kwargs['color'] = cmap(norm(w)) plt.text(x, y, text_format.format(w), horizontalalignment='center', verticalalignment='center', transform=plt.gca().transData, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _set_tick_lines_visibility(ax, visible=True): """Set the visibility of the tick lines of the requested axis."""
for i, thisAxis in enumerate((ax.get_xaxis(), ax.get_yaxis())): for thisItem in thisAxis.get_ticklines(): if isinstance(visible, list): thisItem.set_visible(visible[i]) else: thisItem.set_visible(visible)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _set_tick_labels_visibility(ax, visible=True): """Set the visibility of the tick labels of the requested axis."""
for i, thisAxis in enumerate((ax.get_xaxis(), ax.get_yaxis())): for thisItem in thisAxis.get_ticklabels(): if isinstance(visible, list): thisItem.set_visible(visible[i]) else: thisItem.set_visible(visible)