func_code_string
stringlengths
52
1.94M
func_documentation_string
stringlengths
1
47.2k
def is_unique(self): if self._is_unique is None: t = self.values[:-1] == self.values[1:] # type: np.ndarray self._is_unique = ~np.any(t) return self._is_unique
True if no duplicate entries.
def locate_key(self, key): left = bisect.bisect_left(self, key) right = bisect.bisect_right(self, key) diff = right - left if diff == 0: raise KeyError(key) elif diff == 1: return left else: return slice(left, right)
Get index location for the requested key. Parameters ---------- key : object Value to locate. Returns ------- loc : int or slice Location of `key` (will be slice if there are duplicate entries). Examples -------- >>> imp...
def locate_intersection(self, other): # check inputs other = SortedIndex(other, copy=False) # find intersection assume_unique = self.is_unique and other.is_unique loc = np.in1d(self, other, assume_unique=assume_unique) loc_other = np.in1d(other, self, assume_uniq...
Locate the intersection with another array. Parameters ---------- other : array_like, int Array of values to intersect. Returns ------- loc : ndarray, bool Boolean array with location of intersection. loc_other : ndarray, bool ...
def locate_keys(self, keys, strict=True): # check inputs keys = SortedIndex(keys, copy=False) # find intersection loc, found = self.locate_intersection(keys) if strict and np.any(~found): raise KeyError(keys[~found]) return loc
Get index locations for the requested keys. Parameters ---------- keys : array_like Array of keys to locate. strict : bool, optional If True, raise KeyError if any keys are not found in the index. Returns ------- loc : ndarray, bool ...
def intersect(self, other): loc = self.locate_keys(other, strict=False) return self.compress(loc, axis=0)
Intersect with `other` sorted index. Parameters ---------- other : array_like, int Array of values to intersect with. Returns ------- out : SortedIndex Values in common. Examples -------- >>> import allel >>> idx...
def locate_range(self, start=None, stop=None): # locate start and stop indices if start is None: start_index = 0 else: start_index = bisect.bisect_left(self, start) if stop is None: stop_index = len(self) else: stop_index =...
Locate slice of index containing all entries within `start` and `stop` values **inclusive**. Parameters ---------- start : int, optional Start value. stop : int, optional Stop value. Returns ------- loc : slice Slice o...
def intersect_range(self, start=None, stop=None): try: loc = self.locate_range(start=start, stop=stop) except KeyError: return self.values[0:0] else: return self[loc]
Intersect with range defined by `start` and `stop` values **inclusive**. Parameters ---------- start : int, optional Start value. stop : int, optional Stop value. Returns ------- idx : SortedIndex Examples -------...
def locate_intersection_ranges(self, starts, stops): # check inputs starts = asarray_ndim(starts, 1) stops = asarray_ndim(stops, 1) check_dim0_aligned(starts, stops) # find indices of start and stop values in idx start_indices = np.searchsorted(self, starts) ...
Locate the intersection with a set of ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. Returns ------- loc : ndarray, bool Boolean array with location of ent...
def locate_ranges(self, starts, stops, strict=True): loc, found = self.locate_intersection_ranges(starts, stops) if strict and np.any(~found): raise KeyError(starts[~found], stops[~found]) return loc
Locate items within the given ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. strict : bool, optional If True, raise KeyError if any ranges contain no entries. Retu...
def intersect_ranges(self, starts, stops): loc = self.locate_ranges(starts, stops, strict=False) return self.compress(loc, axis=0)
Intersect with a set of ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. Returns ------- idx : SortedIndex Examples -------- >>> import allel ...
def locate_intersection(self, other): # check inputs other = UniqueIndex(other) # find intersection assume_unique = True loc = np.in1d(self, other, assume_unique=assume_unique) loc_other = np.in1d(other, self, assume_unique=assume_unique) return loc, loc_...
Locate the intersection with another array. Parameters ---------- other : array_like Array to intersect. Returns ------- loc : ndarray, bool Boolean array with location of intersection. loc_other : ndarray, bool Boolean array ...
def locate_keys(self, keys, strict=True): # check inputs keys = UniqueIndex(keys) # find intersection loc, found = self.locate_intersection(keys) if strict and np.any(~found): raise KeyError(keys[~found]) return loc
Get index locations for the requested keys. Parameters ---------- keys : array_like Array of keys to locate. strict : bool, optional If True, raise KeyError if any keys are not found in the index. Returns ------- loc : ndarray, bool ...
def locate_key(self, k1, k2=None): loc1 = self.l1.locate_key(k1) if k2 is None: return loc1 if isinstance(loc1, slice): offset = loc1.start try: loc2 = SortedIndex(self.l2[loc1], copy=False).locate_key(k2) except KeyError: ...
Get index location for the requested key. Parameters ---------- k1 : object Level 1 key. k2 : object, optional Level 2 key. Returns ------- loc : int or slice Location of requested key (will be slice if there are duplicate ...
def locate_range(self, key, start=None, stop=None): loc1 = self.l1.locate_key(key) if start is None and stop is None: loc = loc1 elif isinstance(loc1, slice): offset = loc1.start idx = SortedIndex(self.l2[loc1], copy=False) try: ...
Locate slice of index containing all entries within the range `key`:`start`-`stop` **inclusive**. Parameters ---------- key : object Level 1 key value. start : object, optional Level 2 start value. stop : object, optional Level 2 stop ...
def locate_key(self, chrom, pos=None): if pos is None: # we just want the region for a chromosome if chrom in self.chrom_ranges: # return previously cached result return self.chrom_ranges[chrom] else: loc_chrom = np.non...
Get index location for the requested key. Parameters ---------- chrom : object Chromosome or contig. pos : int, optional Position within chromosome or contig. Returns ------- loc : int or slice Location of requested key (will ...
def locate_range(self, chrom, start=None, stop=None): slice_chrom = self.locate_key(chrom) if start is None and stop is None: return slice_chrom else: pos_chrom = SortedIndex(self.pos[slice_chrom]) try: slice_within_chrom = pos_chrom.l...
Locate slice of index containing all entries within the range `key`:`start`-`stop` **inclusive**. Parameters ---------- chrom : object Chromosome or contig. start : int, optional Position start value. stop : int, optional Position stop...
def set_index(self, index): if index is None: pass elif isinstance(index, str): index = SortedIndex(self[index], copy=False) elif isinstance(index, (tuple, list)) and len(index) == 2: index = SortedMultiIndex(self[index[0]], self[index[1]], ...
Set or reset the index. Parameters ---------- index : string or pair of strings, optional Names of columns to use for positional index, e.g., 'POS' if table contains a 'POS' column and records from a single chromosome/contig, or ('CHROM', 'POS') if table cont...
def query_position(self, chrom=None, position=None): if self.index is None: raise ValueError('no index has been set') if isinstance(self.index, SortedIndex): # ignore chrom loc = self.index.locate_key(position) else: loc = self.index.locat...
Query the table, returning row or rows matching the given genomic position. Parameters ---------- chrom : string, optional Chromosome/contig. position : int, optional Position (1-based). Returns ------- result : row or VariantTabl...
def query_region(self, chrom=None, start=None, stop=None): if self.index is None: raise ValueError('no index has been set') if isinstance(self.index, SortedIndex): # ignore chrom loc = self.index.locate_range(start, stop) else: loc = self....
Query the table, returning row or rows within the given genomic region. Parameters ---------- chrom : string, optional Chromosome/contig. start : int, optional Region start position (1-based). stop : int, optional Region stop position ...
def to_vcf(self, path, rename=None, number=None, description=None, fill=None, write_header=True): r write_vcf(path, callset=self, rename=rename, number=number, description=description, fill=fill, write_header=write_header)
r"""Write to a variant call format (VCF) file. Parameters ---------- path : string File path. rename : dict, optional Rename these columns in the VCF. number : dict, optional Override the number specified in INFO headers. description :...
def to_mask(self, size, start_name='start', stop_name='end'): m = np.zeros(size, dtype=bool) for start, stop in self[[start_name, stop_name]]: m[start-1:stop] = True return m
Construct a mask array where elements are True if the fall within features in the table. Parameters ---------- size : int Size of chromosome/contig. start_name : string, optional Name of column with start coordinates. stop_name : string, optional...
def from_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', dtype=None): a = gff3_to_recarray(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, at...
Read a feature table from a GFF3 format file. Parameters ---------- path : string File path. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If ...
def pairwise_distance(x, metric, chunked=False, blen=None): import scipy.spatial # check inputs if not hasattr(x, 'ndim'): x = np.asarray(x) if x.ndim < 2: raise ValueError('array with at least 2 dimensions expected') if x.ndim == 2: # use scipy to calculate distance, it...
Compute pairwise distance between individuals (e.g., samples or haplotypes). Parameters ---------- x : array_like, shape (n, m, ...) Array of m observations (e.g., samples or haplotypes) in a space with n dimensions (e.g., variants). Note that the order of the first two dimensio...
def pdist(x, metric): if isinstance(metric, str): import scipy.spatial if hasattr(scipy.spatial.distance, metric): metric = getattr(scipy.spatial.distance, metric) else: raise ValueError('metric name not found') m = x.shape[1] dist = list() for i, j i...
Alternative implementation of :func:`scipy.spatial.distance.pdist` which is slower but more flexible in that arrays with >2 dimensions can be passed, allowing for multidimensional observations, e.g., diploid genotype calls or allele counts. Parameters ---------- x : array_like, shape (n, m, ......
def pairwise_dxy(pos, gac, start=None, stop=None, is_accessible=None): if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) gac = asarray_ndim(gac, 3) # compute this once here, to avoid repeated evaluation within the loop gan = np.sum(gac, axis=2) m = gac.shape[1] ...
Convenience function to calculate a pairwise distance matrix using nucleotide divergence (a.k.a. Dxy) as the distance metric. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions. gac : array_like, int, shape (n_variants, n_samples, n_alleles) Per-genot...
def pcoa(dist): import scipy.linalg # This implementation is based on the skbio.math.stats.ordination.PCoA # implementation, with some minor adjustments. # check inputs dist = ensure_square(dist) # perform scaling e_matrix = (dist ** 2) / -2 row_means = np.mean(e_matrix, axis=1, kee...
Perform principal coordinate analysis of a distance matrix, a.k.a. classical multi-dimensional scaling. Parameters ---------- dist : array_like Distance matrix in condensed form. Returns ------- coords : ndarray, shape (n_samples, n_dimensions) Transformed coordinates for t...
def condensed_coords(i, j, n): # guard conditions if i == j or i >= n or j >= n or i < 0 or j < 0: raise ValueError('invalid coordinates: %s, %s' % (i, j)) # normalise order i, j = sorted([i, j]) # calculate number of items in rows before this one (sum of arithmetic # progression) ...
Transform square distance matrix coordinates to the corresponding index into a condensed, 1D form of the matrix. Parameters ---------- i : int Row index. j : int Column index. n : int Size of the square matrix (length of first or second dimension). Returns -----...
def condensed_coords_within(pop, n): return [condensed_coords(i, j, n) for i, j in itertools.combinations(sorted(pop), 2)]
Return indices into a condensed distance matrix for all pairwise comparisons within the given population. Parameters ---------- pop : array_like, int Indices of samples or haplotypes within the population. n : int Size of the square matrix (length of first or second dimension). ...
def condensed_coords_between(pop1, pop2, n): return [condensed_coords(i, j, n) for i, j in itertools.product(sorted(pop1), sorted(pop2))]
Return indices into a condensed distance matrix for all pairwise comparisons between two populations. Parameters ---------- pop1 : array_like, int Indices of samples or haplotypes within the first population. pop2 : array_like, int Indices of samples or haplotypes within the second ...
def plot_pairwise_distance(dist, labels=None, colorbar=True, ax=None, imshow_kwargs=None): import matplotlib.pyplot as plt # check inputs dist_square = ensure_square(dist) # set up axes if ax is None: # make a square figure x = plt.rcParams['figure.fig...
Plot a pairwise distance matrix. Parameters ---------- dist : array_like The distance matrix in condensed form. labels : sequence of strings, optional Sample labels for the axes. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional...
def jackknife(values, statistic): if isinstance(values, tuple): # multiple input arrays n = len(values[0]) masked_values = [np.ma.asarray(v) for v in values] for m in masked_values: assert m.ndim == 1, 'only 1D arrays supported' assert m.shape[0] == n, 'i...
Estimate standard error for `statistic` computed over `values` using the jackknife. Parameters ---------- values : array_like or tuple of array_like Input array, or tuple of input arrays. statistic : function The statistic to compute. Returns ------- m : float M...
def plot_variant_locator(pos, step=None, ax=None, start=None, stop=None, flip=False, line_kwargs=None): import matplotlib.pyplot as plt # check inputs pos = SortedIndex(pos, copy=False) # set up axes if ax is None: x = plt.rcParams['figu...
Plot lines indicating the physical genome location of variants from a single chromosome/contig. By default the top x axis is in variant index space, and the bottom x axis is in genome position space. Parameters ---------- pos : array_like A sorted 1-dimensional array of genomic positions f...
def tabulate_state_transitions(x, states, pos=None): # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, _ = state_transitions(x, states) # start to build a dataframe items = [('lstate', transitions...
Construct a dataframe where each row provides information about a state transition. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional...
def tabulate_state_blocks(x, states, pos=None): # check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, observations = state_transitions(x, states) # setup some helpers t = transitions[1:, 0] o = ob...
Construct a dataframe where each row provides information about continuous state blocks. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, opt...
def write_vcf(path, callset, rename=None, number=None, description=None, fill=None, write_header=True): names, callset = normalize_callset(callset) with open(path, 'w') as vcf_file: if write_header: write_vcf_header(vcf_file, names, callset=callset, rename=rename, ...
Preliminary support for writing a VCF file. Currently does not support sample data. Needs further work.
def asarray_ndim(a, *ndims, **kwargs): allow_none = kwargs.pop('allow_none', False) kwargs.setdefault('copy', False) if a is None and allow_none: return None a = np.array(a, **kwargs) if a.ndim not in ndims: if len(ndims) > 1: expect_str = 'one of %s' % str(ndims) ...
Ensure numpy array. Parameters ---------- a : array_like *ndims : int, optional Allowed values for number of dimensions. **kwargs Passed through to :func:`numpy.array`. Returns ------- a : numpy.ndarray
def hdf5_cache(filepath=None, parent=None, group=None, names=None, typed=False, hashed_key=False, **h5dcreate_kwargs): # initialise HDF5 file path if filepath is None: import tempfile filepath = tempfile.mktemp(prefix='scikit_allel_', suffix='.h5') atexit.register(os....
HDF5 cache decorator. Parameters ---------- filepath : string, optional Path to HDF5 file. If None a temporary file name will be used. parent : string, optional Path to group within HDF5 file to use as parent. If None the root group will be used. group : string, optional ...
def pca(gn, n_components=10, copy=True, scaler='patterson', ploidy=2): # set up the model model = GenotypePCA(n_components, copy=copy, scaler=scaler, ploidy=ploidy) # fit the model and project the input data onto the new dimensions coords = model.fit_transform(gn) return coords, model
Perform principal components analysis of genotype data, via singular value decomposition. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom ...
def randomized_pca(gn, n_components=10, copy=True, iterated_power=3, random_state=None, scaler='patterson', ploidy=2): # set up the model model = GenotypeRandomizedPCA(n_components, copy=copy, iterated_power=iterated_power, ...
Perform principal components analysis of genotype data, via an approximate truncated singular value decomposition using randomization to speed up the computation. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the numbe...
def h_hat(ac): # check inputs ac = asarray_ndim(ac, 2) assert ac.shape[1] == 2, 'only biallelic variants supported' # compute allele number an = ac.sum(axis=1) # compute estimator x = (ac[:, 0] * ac[:, 1]) / (an * (an - 1)) return x
Unbiased estimator for h, where 2*h is the heterozygosity of the population. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array for a single population. Returns ------- h_hat : ndarray, float, shape (n_variants,) Notes ----- Used in P...
def patterson_f2(aca, acb): # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb) # compute...
Unbiased estimator for F2(A, B), the branch length between populations A and B. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------- ...
def patterson_f3(acc, aca, acb): # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' acc = AlleleCountsArray(acc, copy=Fal...
Unbiased estimator for F3(C; A, B), the three-population test for admixture in population C. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source ...
def patterson_d(aca, acb, acc, acd): # check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' acc = AlleleCountsArray(acc, copy...
Unbiased estimator for D(A, B; C, D), the normalised four-population test for admixture between (A or B) and (C or D), also known as the "ABBA BABA" test. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_...
def moving_patterson_f3(acc, aca, acb, size, start=0, stop=None, step=None, normed=True): # calculate per-variant values T, B = patterson_f3(acc, aca, acb) # calculate value of statistic within each block if normed: T_bsum = moving_statistic(T, statistic=np.nansum, s...
Estimate F3(C; A, B) in moving windows. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_varia...
def moving_patterson_d(aca, acb, acc, acd, size, start=0, stop=None, step=None): # calculate per-variant values num, den = patterson_d(aca, acb, acc, acd) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele numb...
Estimate D(A, B; C, D) in moving windows. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele coun...
def average_patterson_f3(acc, aca, acb, blen, normed=True): # calculate per-variant values T, B = patterson_f3(acc, aca, acb) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enoug...
Estimate F3(C; A, B) and standard error using the block-jackknife. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : arra...
def average_patterson_d(aca, acb, acc, acd, blen): # calculate per-variant values num, den = patterson_d(aca, acb, acc, acd) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough...
Estimate D(A, B; C, D) and standard error using the block-jackknife. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_varia...
def get_chunks(data, chunks=None): if chunks is None: if hasattr(data, 'chunklen') and hasattr(data, 'shape'): # bcolz carray, chunk first dimension only return (data.chunklen,) + data.shape[1:] elif hasattr(data, 'chunks') and hasattr(data, 'shape') and \ ...
Try to guess a reasonable chunk shape to use for block-wise algorithms operating over `data`.
def iter_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix'): # prepare fill values for attributes if attributes is not None: attributes = list(attributes) if isinstance(attributes_fill, (list, tuple)): if len(...
Iterate over records in a GFF3 file. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position ...
def gff3_to_recarray(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix', dtype=None): # read records recs = list(iter_gff3(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fil...
Load data from a GFF3 into a NumPy recarray. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be posi...
def gff3_to_dataframe(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix', **kwargs): import pandas # read records recs = list(iter_gff3(path, attributes=attributes, region=region, score_fill=score_fill, ph...
Load data from a GFF3 into a pandas DataFrame. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be po...
def ehh_decay(h, truncate=False): # check inputs # N.B., ensure int8 so we can use cython optimisation h = HaplotypeArray(np.asarray(h), copy=False) if h.min() < 0: raise NotImplementedError('missing calls are not supported') # initialise n_variants = h.n_variants # number of rows,...
Compute the decay of extended haplotype homozygosity (EHH) moving away from the first variant. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. truncate : bool, optional If True, the return array will exclude trailing zeros. Returns ...
def voight_painting(h): # check inputs # N.B., ensure int8 so we can use cython optimisation h = HaplotypeArray(np.asarray(h), copy=False) if h.max() > 1: raise NotImplementedError('only biallelic variants are supported') if h.min() < 0: raise NotImplementedError('missing calls ...
Paint haplotypes, assigning a unique integer to each shared haplotype prefix. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- painting : ndarray, int, shape (n_variants, n_haplotypes) Painting array. indices :...
def plot_voight_painting(painting, palette='colorblind', flank='right', ax=None, height_factor=0.01): import seaborn as sns from matplotlib.colors import ListedColormap import matplotlib.pyplot as plt if flank == 'left': painting = painting[::-1] n_colors = pain...
Plot a painting of shared haplotype prefixes. Parameters ---------- painting : array_like, int, shape (n_variants, n_haplotypes) Painting array. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. palette : string, optional A...
def fig_voight_painting(h, index=None, palette='colorblind', height_factor=0.01, fig=None): import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec import seaborn as sns # check inputs h = asarray_ndim(h, 2) if index is None: # use midpoint ...
Make a figure of shared haplotype prefixes for both left and right flanks, centred on some variant of choice. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. index : int, optional Index of the variant within the haplotype array to centre ...
def compute_ihh_gaps(pos, map_pos, gap_scale, max_gap, is_accessible): # check inputs if map_pos is None: # integrate over physical distance map_pos = pos else: map_pos = asarray_ndim(map_pos, 1) check_dim0_aligned(pos, map_pos) # compute physical gaps physical_g...
Compute spacing between variants for integrating haplotype homozygosity. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions (physical distance). map_pos : array_like, float, shape (n_variants,) Variant positions (genetic map distance). gap_scale :...
def ihs(h, pos, map_pos=None, min_ehh=0.05, min_maf=0.05, include_edges=False, gap_scale=20000, max_gap=200000, is_accessible=None, use_threads=True): # check inputs h = asarray_ndim(h, 2) check_integer_dtype(h) pos = asarray_ndim(pos, 1) check_dim0_aligned(h, pos) h = memoryview_sa...
Compute the unstandardized integrated haplotype score (IHS) for each variant, comparing integrated haplotype homozygosity between the reference (0) and alternate (1) alleles. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. pos : array_like, i...
def xpehh(h1, h2, pos, map_pos=None, min_ehh=0.05, include_edges=False, gap_scale=20000, max_gap=200000, is_accessible=None, use_threads=True): # check inputs h1 = asarray_ndim(h1, 2) check_integer_dtype(h1) h2 = asarray_ndim(h2, 2) check_integer_dtype(h2) pos = asarray_...
Compute the unstandardized cross-population extended haplotype homozygosity score (XPEHH) for each variant. Parameters ---------- h1 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the first population. h2 : array_like, int, shape (n_variants, n_haplotypes) H...
def nsl(h, use_threads=True): # check inputs h = asarray_ndim(h, 2) check_integer_dtype(h) h = memoryview_safe(h) # # check there are no invariant sites # ac = h.count_alleles() # assert np.all(ac.is_segregating()), 'please remove non-segregating sites' if use_threads and multiproce...
Compute the unstandardized number of segregating sites by length (nSl) for each variant, comparing the reference and alternate alleles, after Ferrer-Admetlla et al. (2014). Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. use_threads : bool, o...
def xpnsl(h1, h2, use_threads=True): # check inputs h1 = asarray_ndim(h1, 2) check_integer_dtype(h1) h2 = asarray_ndim(h2, 2) check_integer_dtype(h2) check_dim0_aligned(h1, h2) h1 = memoryview_safe(h1) h2 = memoryview_safe(h2) if use_threads and multiprocessing.cpu_count() > 1: ...
Cross-population version of the NSL statistic. Parameters ---------- h1 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the first population. h2 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the second population. use_threads : bool,...
def haplotype_diversity(h): # check inputs h = HaplotypeArray(h, copy=False) # number of haplotypes n = h.n_haplotypes # compute haplotype frequencies f = h.distinct_frequencies() # estimate haplotype diversity hd = (1 - np.sum(f**2)) * n / (n - 1) return hd
Estimate haplotype diversity. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- hd : float Haplotype diversity.
def moving_haplotype_diversity(h, size, start=0, stop=None, step=None): hd = moving_statistic(values=h, statistic=haplotype_diversity, size=size, start=start, stop=stop, step=step) return hd
Estimate haplotype diversity in moving windows. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional T...
def garud_h(h): # check inputs h = HaplotypeArray(h, copy=False) # compute haplotype frequencies f = h.distinct_frequencies() # compute H1 h1 = np.sum(f**2) # compute H12 h12 = np.sum(f[:2])**2 + np.sum(f[2:]**2) # compute H123 h123 = np.sum(f[:3])**2 + np.sum(f[3:]**2) ...
Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures of soft sweeps, as defined in Garud et al. (2015). Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- h1 : float H1 statistic (sum of squares of...
def moving_garud_h(h, size, start=0, stop=None, step=None): gh = moving_statistic(values=h, statistic=garud_h, size=size, start=start, stop=stop, step=step) h1 = gh[:, 0] h12 = gh[:, 1] h123 = gh[:, 2] h2_h1 = gh[:, 3] return h1, h12, h123, h2_h1
Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures of soft sweeps, as defined in Garud et al. (2015), in moving windows, Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants)....
def plot_haplotype_frequencies(h, palette='Paired', singleton_color='w', ax=None): import matplotlib.pyplot as plt import seaborn as sns # check inputs h = HaplotypeArray(h, copy=False) # setup figure if ax is None: width = plt.rcParams['figure.figsize...
Plot haplotype frequencies. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. palette : string, optional A Seaborn palette name. singleton_color : string, optional Color to paint singleton haplotypes. ax : axes, optional ...
def moving_hfs_rank(h, size, start=0, stop=None): # determine windows windows = np.asarray(list(index_windows(h, size=size, start=start, stop=stop, step=None))) # setup output hr = np.zeros((windows.shape[0], h.shape[1]), dtype='i4') # iterate over wi...
Helper function for plotting haplotype frequencies in moving windows. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : i...
def plot_moving_haplotype_frequencies(pos, h, size, start=0, stop=None, n=None, palette='Paired', singleton_color='w', ax=None): import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns # setup figure ...
Plot haplotype frequencies in moving windows over the genome. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The...
def moving_delta_tajima_d(ac1, ac2, size, start=0, stop=None, step=None): d1 = moving_tajima_d(ac1, size=size, start=start, stop=stop, step=step) d2 = moving_tajima_d(ac2, size=size, start=start, stop=stop, step=step) delta = d1 - d2 delta_z = (delta - np.mean(delta)) / np.std(delta) return del...
Compute the difference in Tajima's D between two populations in moving windows. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the s...
def make_similar_sized_bins(x, n): # copy and sort the array y = np.array(x).flatten() y.sort() # setup bins bins = [y[0]] # determine step size step = len(y) // n # add bin edges for i in range(step, len(y), step): # get value at this index v = y[i] # on...
Utility function to create a set of bins over the range of values in `x` such that each bin contains roughly the same number of values. Parameters ---------- x : array_like The values to be binned. n : int The number of bins to create. Returns ------- bins : ndarray ...
def standardize(score): score = asarray_ndim(score, 1) return (score - np.nanmean(score)) / np.nanstd(score)
Centre and scale to unit variance.
def standardize_by_allele_count(score, aac, bins=None, n_bins=None, diagnostics=True): from scipy.stats import binned_statistic # check inputs score = asarray_ndim(score, 1) aac = asarray_ndim(aac, 1) check_dim0_aligned(score, aac) # remove nans nonan = ~...
Standardize `score` within allele frequency bins. Parameters ---------- score : array_like, float The score to be standardized, e.g., IHS or NSL. aac : array_like, int An array of alternate allele counts. bins : array_like, int, optional Allele count bins, overrides `n_bins`...
def pbs(ac1, ac2, ac3, window_size, window_start=0, window_stop=None, window_step=None, normed=True): # normalise and check inputs ac1 = AlleleCountsArray(ac1) ac2 = AlleleCountsArray(ac2) ac3 = AlleleCountsArray(ac3) check_dim0_aligned(ac1, ac2, ac3) # compute fst fst12 = movin...
Compute the population branching statistic (PBS) which performs a comparison of allele frequencies between three populations to detect genome regions that are unusually differentiated in one population relative to the other two populations. Parameters ---------- ac1 : array_like, int Allele...
def moving_statistic(values, statistic, size, start=0, stop=None, step=None, **kwargs): windows = index_windows(values, size, start, stop, step) # setup output out = np.array([statistic(values[i:j], **kwargs) for i, j in windows]) return out
Calculate a statistic in a moving window over `values`. Parameters ---------- values : array_like The data to summarise. statistic : function The statistic to compute within each window. size : int The window size (number of values). start : int, optional The in...
def index_windows(values, size, start, stop, step): # determine step if stop is None: stop = len(values) if step is None: # non-overlapping step = size # iterate over windows for window_start in range(start, stop, step): window_stop = window_start + size ...
Convenience function to construct windows for the :func:`moving_statistic` function.
def position_windows(pos, size, start, stop, step): last = False # determine start and stop positions if start is None: start = pos[0] if stop is None: stop = pos[-1] if step is None: # non-overlapping step = size windows = [] for window_start in range(st...
Convenience function to construct windows for the :func:`windowed_statistic` and :func:`windowed_count` functions.
def window_locations(pos, windows): start_locs = np.searchsorted(pos, windows[:, 0]) stop_locs = np.searchsorted(pos, windows[:, 1], side='right') locs = np.column_stack((start_locs, stop_locs)) return locs
Locate indices in `pos` corresponding to the start and stop positions of `windows`.
def windowed_count(pos, size=None, start=None, stop=None, step=None, windows=None): # assume sorted positions if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) # setup windows if windows is None: windows = position_windows(pos, size, start, s...
Count the number of items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. size : int, optional The window size (number of bases). start : int, optional ...
def windowed_statistic(pos, values, statistic, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan): # assume sorted positions if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) # check lengths are equal if isinstance(values, tu...
Calculate a statistic from items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. values : array_like, int, shape (n_items,) The values to summarise. May also...
def per_base(x, windows, is_accessible=None, fill=np.nan): # calculate window sizes if is_accessible is None: # N.B., window stops are included n_bases = np.diff(windows, axis=1).reshape(-1) + 1 else: n_bases = np.array([np.count_nonzero(is_accessible[i-1:j]) ...
Calculate the per-base value of a windowed statistic. Parameters ---------- x : array_like, shape (n_windows,) The statistic to average per-base. windows : array_like, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions using 1-based...
def equally_accessible_windows(is_accessible, size, start=0, stop=None, step=None): pos_accessible, = np.nonzero(is_accessible) pos_accessible += 1 # convert to 1-based coordinates # N.B., need some care in handling start and stop positions, these are # genomic positions at which to start and stop...
Create windows each containing the same number of accessible bases. Parameters ---------- is_accessible : array_like, bool, shape (n_bases,) Array defining accessible status of all bases on a contig/chromosome. size : int Window size (number of accessible bases). start : int, option...
def attachment_form(context, obj): if context['user'].has_perm('attachments.add_attachment'): return { 'form': AttachmentForm(), 'form_url': add_url_for_obj(obj), 'next': context.request.build_absolute_uri(), } else: return {'form': None}
Renders a "upload attachment" form. The user must own ``attachments.add_attachment permission`` to add attachments.
def attachment_delete_link(context, attachment): if context['user'].has_perm('attachments.delete_foreign_attachments') or ( context['user'] == attachment.creator and context['user'].has_perm('attachments.delete_attachment') ): return { 'next': context.request.build_absol...
Renders a html link to the delete view of the given attachment. Returns no content if the request-user has no permission to delete attachments. The user must own either the ``attachments.delete_attachment`` permission and is the creator of the attachment, that he can delete it or he has ``attachments.d...
def attachment_upload(instance, filename): return 'attachments/{app}_{model}/{pk}/{filename}'.format( app=instance.content_object._meta.app_label, model=instance.content_object._meta.object_name.lower(), pk=instance.content_object.pk, filename=filename, )
Stores the attachment in a "per module/appname/primary key" folder
def show_heatmap(self, blocking=True, output_file=None, enable_scroll=False): if output_file is None: if enable_scroll: # Add a new axes which will be used as scroll bar. axpos = plt.axes([0.12, 0.1, 0.625, 0.03]) spos = Slider(axpos, "Scroll"...
Method to actually display the heatmap created. @param blocking: When set to False makes an unblocking plot show. @param output_file: If not None the heatmap image is output to this file. Supported formats: (eps, pdf, pgf, png, ps, raw, rgba, svg, svgz) @para...
def __profile_file(self): self.line_profiler = pprofile.Profile() self.line_profiler.runfile( open(self.pyfile.path, "r"), {}, self.pyfile.path )
Method used to profile the given file line by line.
def __get_line_profile_data(self): if self.line_profiler is None: return {} # the [0] is because pprofile.Profile.file_dict stores the line_dict # in a list so that it can be modified in a thread-safe way # see https://github.com/vpelletier/pprofile/blob/da3d60a1b59a...
Method to procure line profiles. @return: Line profiles if the file has been profiles else empty dictionary.
def __fetch_heatmap_data_from_profile(self): # Read lines from file. with open(self.pyfile.path, "r") as file_to_read: for line in file_to_read: # Remove return char from the end of the line and add a # space in the beginning for better visibility. ...
Method to create heatmap data from profile information.
def __create_heatmap_plot(self): # Define the heatmap plot. height = len(self.pyfile.lines) / 3 width = max(map(lambda x: len(x), self.pyfile.lines)) / 8 self.fig, self.ax = plt.subplots(figsize=(width, height)) # Set second sub plot to occupy bottom 20% plt.subp...
Method to actually create the heatmap from profile stats.
def main(): # Create command line parser. parser = argparse.ArgumentParser() # Adding command line arguments. parser.add_argument("-o", "--out", help="Output file", default=None) parser.add_argument( "pyfile", help="Python file to be profiled", default=None ) # Parse command lin...
Starting point for the program execution.
def round(self, ndigits=0): if ndigits is None: ndigits = 0 return self.__class__( amount=self.amount.quantize(Decimal('1e' + str(-ndigits))), currency=self.currency)
Rounds the amount using the current ``Decimal`` rounding algorithm.
def run(): sys.path.insert(0, os.getcwd()) logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler()]) parser = argparse.ArgumentParser(description="Manage Application", add_help=False) parser.add_argument('app', metavar='app', type=str, help='Application mod...
CLI endpoint.
def command(self, init=False): def wrapper(func): header = '\n'.join([s for s in (func.__doc__ or '').split('\n') if not s.strip().startswith(':')]) parser = self.parsers.add_parser(func.__name__, description=header) args, vargs, kw, d...
Define CLI command.
def routes_register(app, handler, *paths, methods=None, router=None, name=None): if router is None: router = app.router handler = to_coroutine(handler) resources = [] for path in paths: # Register any exception to app if isinstance(path, type) and issubclass(path, BaseExcept...
Register routes.
def parse(path): parsed = re.sre_parse.parse(path) for case, _ in parsed: if case not in (re.sre_parse.LITERAL, re.sre_parse.ANY): break else: return path path = path.strip('^$') def parse_(match): [part] = match.groups() match = DYNR_RE.match(part) ...
Parse URL path and convert it to regexp if needed.
def url_for(self, *subgroups, **groups): parsed = re.sre_parse.parse(self._pattern.pattern) subgroups = {n:str(v) for n, v in enumerate(subgroups, 1)} groups_ = dict(parsed.pattern.groupdict) subgroups.update({ groups_[k0]: str(v0) for k0, v0 in groups.it...
Build URL.
def state_not_literal(self, value): value = negate = chr(value) while value == negate: value = choice(self.literals) yield value
Parse not literal.
def state_max_repeat(self, value): min_, max_, value = value value = [val for val in Traverser(value, self.groups)] if not min_ and max_: for val in value: if isinstance(val, required): min_ = 1 break for val in...
Parse repeatable parts.
def state_in(self, value): value = [val for val in Traverser(value, self.groups)] if not value or not value[0]: for val in self.literals - set(value): return (yield val) yield value[0]
Parse ranges.
def state_category(value): if value == re.sre_parse.CATEGORY_DIGIT: return (yield '0') if value == re.sre_parse.CATEGORY_WORD: return (yield 'x')
Parse categories.
def state_subpattern(self, value): num, *_, parsed = value if num in self.groups: return (yield required(self.groups[num])) yield from Traverser(parsed, groups=self.groups)
Parse subpatterns.