func_code_string
stringlengths
52
1.94M
func_documentation_string
stringlengths
1
47.2k
def vcf_to_csv(input, output, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', transformers=None, ...
r"""Read data from a VCF file and write out to a comma-separated values (CSV) file. Parameters ---------- input : string {input} output : string {output} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} t...
def vcf_to_recarray(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', ...
Read data from a VCF file into a NumPy recarray. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional ...
def write_fasta(path, sequences, names, mode='w', width=80): # check inputs if isinstance(sequences, np.ndarray): # single sequence sequences = [sequences] names = [names] if len(sequences) != len(names): raise ValueError('must provide the same number of sequences and na...
Write nucleotide sequences stored as numpy arrays to a FASTA file. Parameters ---------- path : string File path. sequences : sequence of arrays One or more ndarrays of dtype 'S1' containing the sequences. names : sequence of strings Names of the sequences. mode : strin...
def heterozygosity_observed(g, fill=np.nan): # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # count hets n_het = np.asarray(g.count_het(axis=1)) n_called = np.asarray(g.count_called(axis=1)) # calculate rate of obser...
Calculate the rate of observed heterozygosity for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where all calls are missing. Returns ------- ho : ndarray, f...
def heterozygosity_expected(af, ploidy, fill=np.nan): # check inputs af = asarray_ndim(af, 2) # calculate expected heterozygosity out = 1 - np.sum(np.power(af, ploidy), axis=1) # fill values where allele frequencies could not be calculated af_sum = np.sum(af, axis=1) with ignore_invalid...
Calculate the expected rate of heterozygosity for each variant under Hardy-Weinberg equilibrium. Parameters ---------- af : array_like, float, shape (n_variants, n_alleles) Allele frequencies array. ploidy : int Sample ploidy. fill : float, optional Use this value for v...
def inbreeding_coefficient(g, fill=np.nan): # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # calculate observed and expected heterozygosity ho = heterozygosity_observed(g) af = g.count_alleles().to_frequencies() he =...
Calculate the inbreeding coefficient for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where the expected heterozygosity is zero. Returns ------- f ...
def mendel_errors(parent_genotypes, progeny_genotypes): # setup parent_genotypes = GenotypeArray(parent_genotypes) progeny_genotypes = GenotypeArray(progeny_genotypes) check_ploidy(parent_genotypes.ploidy, 2) check_ploidy(progeny_genotypes.ploidy, 2) # transform into per-call allele counts ...
Locate genotype calls not consistent with Mendelian transmission of alleles. Parameters ---------- parent_genotypes : array_like, int, shape (n_variants, 2, 2) Genotype calls for the two parents. progeny_genotypes : array_like, int, shape (n_variants, n_progeny, 2) Genotype calls fo...
def paint_transmission(parent_haplotypes, progeny_haplotypes): # check inputs parent_haplotypes = HaplotypeArray(parent_haplotypes) progeny_haplotypes = HaplotypeArray(progeny_haplotypes) if parent_haplotypes.n_haplotypes != 2: raise ValueError('exactly two parental haplotypes should be pro...
Paint haplotypes inherited from a single diploid parent according to their allelic inheritance. Parameters ---------- parent_haplotypes : array_like, int, shape (n_variants, 2) Both haplotypes from a single diploid parent. progeny_haplotypes : array_like, int, shape (n_variants, n_progeny) ...
def phase_progeny_by_transmission(g): # setup g = GenotypeArray(g, dtype='i1', copy=True) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # run the phasing # N.B., a copy has already been made, so no need to make memoryview safe is_phased = _opt_phase_progeny_by_transmission...
Phase progeny genotypes from a trio or cross using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. Returns ------- g : ndarray, int8, shap...
def phase_parents_by_transmission(g, window_size): # setup check_type(g, GenotypeArray) check_dtype(g.values, 'i1') check_ploidy(g.ploidy, 2) if g.is_phased is None: raise ValueError('genotype array must first have progeny phased by transmission') check_min_samples(g.n_samples, 3) ...
Phase parent genotypes from a trio or cross, given progeny genotypes already phased by Mendelian transmission. Parameters ---------- g : GenotypeArray Genotype array, with parents as first two columns and progeny as remaining columns, where progeny genotypes are already phased. wind...
def phase_by_transmission(g, window_size, copy=True): # setup g = np.asarray(g, dtype='i1') g = GenotypeArray(g, copy=copy) g._values = memoryview_safe(g.values) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # phase the progeny is_phased = _opt_phase_progeny_by_transmi...
Phase genotypes in a trio or cross where possible using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. window_size : int Number of previou...
def get_blen_array(data, blen=None): if blen is None: if hasattr(data, 'chunklen'): # bcolz carray return data.chunklen elif hasattr(data, 'chunks') and \ hasattr(data, 'shape') and \ hasattr(data.chunks, '__len__') and \ h...
Try to guess a reasonable block length to use for block-wise iteration over `data`.
def h5fmem(**kwargs): # need a file name even tho nothing is ever written fn = tempfile.mktemp() # file creation args kwargs['mode'] = 'w' kwargs['driver'] = 'core' kwargs['backing_store'] = False # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f
Create an in-memory HDF5 file.
def h5ftmp(**kwargs): # create temporary file name suffix = kwargs.pop('suffix', '.h5') prefix = kwargs.pop('prefix', 'scikit_allel_') tempdir = kwargs.pop('dir', None) fn = tempfile.mktemp(suffix=suffix, prefix=prefix, dir=tempdir) atexit.register(os.remove, fn) # file creation args ...
Create an HDF5 file backed by a temporary file.
def store(data, arr, start=0, stop=None, offset=0, blen=None): # setup blen = _util.get_blen_array(data, blen) if stop is None: stop = len(data) else: stop = min(stop, len(data)) length = stop - start if length < 0: raise ValueError('invalid stop/start') # copy b...
Copy `data` block-wise into `arr`.
def copy(data, start=0, stop=None, blen=None, storage=None, create='array', **kwargs): # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) if stop is None: stop = len(data) else: stop = min(stop, len(data)) length = stop - start ...
Copy `data` block-wise into a new array.
def copy_table(tbl, start=0, stop=None, blen=None, storage=None, create='table', **kwargs): # setup names, columns = _util.check_table_like(tbl) storage = _util.get_storage(storage) blen = _util.get_blen_table(tbl, blen) if stop is None: stop = len(columns[0]) else: ...
Copy `tbl` block-wise into a new table.
def map_blocks(data, f, blen=None, storage=None, create='array', **kwargs): # setup storage = _util.get_storage(storage) if isinstance(data, tuple): blen = max(_util.get_blen_array(d, blen) for d in data) else: blen = _util.get_blen_array(data, blen) if isinstance(data, tuple): ...
Apply function `f` block-wise over `data`.
def reduce_axis(data, reducer, block_reducer, mapper=None, axis=None, blen=None, storage=None, create='array', **kwargs): # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) length = len(data) # normalise axis arg if isinstance(axis, int): ...
Apply an operation to `data` that reduces over one or more axes.
def amax(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs): return reduce_axis(data, axis=axis, reducer=np.amax, block_reducer=np.maximum, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
Compute the maximum value.
def amin(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs): return reduce_axis(data, axis=axis, reducer=np.amin, block_reducer=np.minimum, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
Compute the minimum value.
def asum(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs): return reduce_axis(data, axis=axis, reducer=np.sum, block_reducer=np.add, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
Compute the sum.
def count_nonzero(data, mapper=None, blen=None, storage=None, create='array', **kwargs): return reduce_axis(data, reducer=np.count_nonzero, block_reducer=np.add, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
Count the number of non-zero elements.
def compress(condition, data, axis=0, out=None, blen=None, storage=None, create='array', **kwargs): # setup if out is not None: # argument is only there for numpy API compatibility raise NotImplementedError('out argument is not supported') storage = _util.get_storage(storag...
Return selected slices of an array along given axis.
def take(data, indices, axis=0, out=None, mode='raise', blen=None, storage=None, create='array', **kwargs): # setup if out is not None: # argument is only there for numpy API compatibility raise NotImplementedError('out argument is not supported') length = len(data) if axis...
Take elements from an array along an axis.
def compress_table(condition, tbl, axis=None, out=None, blen=None, storage=None, create='table', **kwargs): # setup if axis is not None and axis != 0: raise NotImplementedError('only axis 0 is supported') if out is not None: # argument is only there for numpy API comp...
Return selected rows of a table.
def take_table(tbl, indices, axis=None, out=None, mode='raise', blen=None, storage=None, create='table', **kwargs): # setup if axis is not None and axis != 0: raise NotImplementedError('only axis 0 is supported') if out is not None: # argument is only there for numpy API ...
Return selected rows of a table.
def subset(data, sel0=None, sel1=None, blen=None, storage=None, create='array', **kwargs): # TODO refactor sel0 and sel1 normalization with ndarray.subset # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) length = len(data) if sel0 is not None: ...
Return selected rows and columns of an array.
def concatenate_table(tup, blen=None, storage=None, create='table', **kwargs): # setup storage = _util.get_storage(storage) if not isinstance(tup, (tuple, list)): raise ValueError('expected tuple or list, found %r' % tup) if len(tup) < 2: raise ValueError('expected two or more table...
Stack tables in sequence vertically (row-wise).
def concatenate(tup, axis=0, blen=None, storage=None, create='array', **kwargs): # setup storage = _util.get_storage(storage) if not isinstance(tup, (tuple, list)): raise ValueError('expected tuple or list, found %r' % tup) if len(tup) < 2: raise ValueError('expected two or more arr...
Concatenate arrays.
def binary_op(data, op, other, blen=None, storage=None, create='array', **kwargs): # normalise scalars if hasattr(other, 'shape') and len(other.shape) == 0: other = other[()] if np.isscalar(other): def f(block): return op(block, other) return map_blocks...
Compute a binary operation block-wise over `data`.
def eval_table(tbl, expression, vm='python', blen=None, storage=None, create='array', vm_kwargs=None, **kwargs): # setup storage = _util.get_storage(storage) names, columns = _util.check_table_like(tbl) length = len(columns[0]) if vm_kwargs is None: vm_kwargs = dict() ...
Evaluate `expression` against columns of a table.
def create_allele_mapping(ref, alt, alleles, dtype='i1'): ref = asarray_ndim(ref, 1) alt = asarray_ndim(alt, 1, 2) alleles = asarray_ndim(alleles, 1, 2) check_dim0_aligned(ref, alt, alleles) # reshape for convenience ref = ref[:, None] if alt.ndim == 1: alt = alt[:, None] if...
Create an array mapping variant alleles into a different allele index system. Parameters ---------- ref : array_like, S1, shape (n_variants,) Reference alleles. alt : array_like, S1, shape (n_variants, n_alt_alleles) Alternate alleles. alleles : array_like, S1, shape (n_variants...
def locate_fixed_differences(ac1, ac2): # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # stack allele counts for convenience pac = np.dstack([ac1, ac2]) # count numbers of alleles called in e...
Locate variants with no shared alleles between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. ...
def locate_private_alleles(*acs): # check inputs acs = [asarray_ndim(ac, 2) for ac in acs] check_dim0_aligned(*acs) acs = ensure_dim1_aligned(*acs) # stack allele counts for convenience pac = np.dstack(acs) # count the numbers of populations with each allele npa = np.sum(pac > 0, ax...
Locate alleles that are found only in a single population. Parameters ---------- *acs : array_like, int, shape (n_variants, n_alleles) Allele counts arrays from each population. Returns ------- loc : ndarray, bool, shape (n_variants, n_alleles) Boolean array where elements are ...
def weir_cockerham_fst(g, subpops, max_allele=None, blen=None): # check inputs if not hasattr(g, 'shape') or not hasattr(g, 'ndim'): g = GenotypeArray(g, copy=False) if g.ndim != 3: raise ValueError('g must have three dimensions') if g.shape[2] != 2: raise NotImplementedErro...
Compute the variance components from the analyses of variance of allele frequencies according to Weir and Cockerham (1984). Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for eac...
def hudson_fst(ac1, ac2, fill=np.nan): # flake8: noqa # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # calculate these once only an1 = np.sum(ac1, axis=1) an2 = np.sum(ac2, axis=1) # cal...
Calculate the numerator and denominator for Fst estimation using the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants...
def patterson_fst(aca, acb): from allel.stats.admixture import patterson_f2, h_hat num = patterson_f2(aca, acb) den = num + h_hat(aca) + h_hat(acb) return num, den
Estimator of differentiation between populations A and B based on the F2 parameter. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------...
def windowed_weir_cockerham_fst(pos, g, subpops, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan, max_allele=None): # compute values per-variant a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # defin...
Estimate average Fst in windows over a single chromosome/contig, following the method of Weir and Cockerham (1984). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. g : array_like, int, shape (n_variants, n_sampl...
def windowed_hudson_fst(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan): # compute values per-variants num, den = hudson_fst(ac1, ac2) # define the statistic to compute within each window def average_fst(wn, wd): return np.nansu...
Estimate average Fst in windows over a single chromosome/contig, following the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, s...
def moving_weir_cockerham_fst(g, subpops, size, start=0, stop=None, step=None, max_allele=None): # calculate per-variant values a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # compute the numerator and denominator in moving windows num = moving_statis...
Estimate average Fst in moving windows over a single chromosome/contig, following the method of Weir and Cockerham (1984). Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each...
def moving_hudson_fst(ac1, ac2, size, start=0, stop=None, step=None): # calculate per-variant values num, den = hudson_fst(ac1, ac2, fill=np.nan) # compute the numerator and denominator in moving windows num_sum = moving_statistic(num, statistic=np.nansum, size=size, ...
Estimate average Fst in moving windows over a single chromosome/contig, following the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, ...
def moving_patterson_fst(ac1, ac2, size, start=0, stop=None, step=None): # calculate per-variant values num, den = patterson_fst(ac1, ac2) # compute the numerator and denominator in moving windows num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=s...
Estimate average Fst in moving windows over a single chromosome/contig, following the method of Patterson (2012). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) ...
def average_weir_cockerham_fst(g, subpops, blen, max_allele=None): # calculate per-variant values a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # calculate overall estimate a_sum = np.nansum(a) b_sum = np.nansum(b) c_sum = np.nansum(c) fst = a_sum / (a_sum + b_sum + c_...
Estimate average Fst and standard error using the block-jackknife. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. blen : int Block size (number of...
def average_hudson_fst(ac1, ac2, blen): # calculate per-variant values num, den = hudson_fst(ac1, ac2, fill=np.nan) # calculate overall estimate fst = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nans...
Estimate average Fst between two populations and standard error using the block-jackknife. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts arr...
def average_patterson_fst(ac1, ac2, blen): # calculate per-variant values num, den = patterson_fst(ac1, ac2) # calculate overall estimate fst = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, siz...
Estimate average Fst between two populations and standard error using the block-jackknife. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts arr...
def rogers_huff_r(gn): # check inputs gn = asarray_ndim(gn, 2, dtype='i1') gn = memoryview_safe(gn) # compute correlation coefficients r = gn_pairwise_corrcoef_int8(gn) # convenience for singletons if r.size == 1: r = r[0] return r
Estimate the linkage disequilibrium parameter *r* for each pair of variants using the method of Rogers and Huff (2008). Parameters ---------- gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (...
def rogers_huff_r_between(gna, gnb): # check inputs gna = asarray_ndim(gna, 2, dtype='i1') gnb = asarray_ndim(gnb, 2, dtype='i1') gna = memoryview_safe(gna) gnb = memoryview_safe(gnb) # compute correlation coefficients r = gn_pairwise2_corrcoef_int8(gna, gnb) # convenience for singl...
Estimate the linkage disequilibrium parameter *r* for each pair of variants between the two input arrays, using the method of Rogers and Huff (2008). Parameters ---------- gna, gnb : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number...
def locate_unlinked(gn, size=100, step=20, threshold=.1, blen=None): # check inputs if not hasattr(gn, 'shape') or not hasattr(gn, 'dtype'): gn = np.asarray(gn, dtype='i1') if gn.ndim != 2: raise ValueError('gn must have two dimensions') # setup output loc = np.ones(gn.shape[0],...
Locate variants in approximate linkage equilibrium, where r**2 is below the given `threshold`. Parameters ---------- gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = he...
def windowed_r_squared(pos, gn, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan, percentile=50): # define the statistic function if isinstance(percentile, (list, tuple)): fill = [fill for _ in percentile] def statistic(gnw): r_square...
Summarise linkage disequilibrium in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at bia...
def plot_pairwise_ld(m, colorbar=True, ax=None, imshow_kwargs=None): import matplotlib.pyplot as plt # check inputs m_square = ensure_square(m) # blank out lower triangle and flip up/down m_square = np.tril(m_square)[::-1, :] # set up axes if ax is None: # make a square figure w...
Plot a matrix of genotype linkage disequilibrium values between all pairs of variants. Parameters ---------- m : array_like Array of linkage disequilibrium values in condensed form. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional ...
def array_to_hdf5(a, parent, name, **kwargs): import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: kwargs.setdefault('chunks', True) # auto-chunking kwargs.setdefault('dtype', a.dtype) kwargs.setdefault('comp...
Write a Numpy array to an HDF5 dataset. Parameters ---------- a : ndarray Data to write. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of dataset to write data into. kwargs : ke...
def recarray_from_hdf5_group(*args, **kwargs): import h5py h5f = None if len(args) == 1: group = args[0] elif len(args) == 2: file_path, node_path = args h5f = h5py.File(file_path, mode='r') try: group = h5f[node_path] except Exception as e: ...
Load a recarray from columns stored as separate datasets with an HDF5 group. Either provide an h5py group as a single positional argument, or provide two positional arguments giving the HDF5 file path and the group node path within the file. The following optional parameters may be given. Par...
def recarray_to_hdf5_group(ra, parent, name, **kwargs): import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: h5g = parent.require_group(name) for n in ra.dtype.names: array_to_hdf5(ra[n], h5g, n, **kwargs)...
Write each column in a recarray to a dataset in an HDF5 group. Parameters ---------- ra : recarray Numpy recarray to store. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of group to...
def subset(data, sel0, sel1): # check inputs data = np.asarray(data) if data.ndim < 2: raise ValueError('data must have 2 or more dimensions') sel0 = asarray_ndim(sel0, 1, allow_none=True) sel1 = asarray_ndim(sel1, 1, allow_none=True) # ensure indices if sel0 is not None and sel...
Apply selections on first and second axes.
def eval(self, expression, vm='python'): if vm == 'numexpr': import numexpr as ne return ne.evaluate(expression, local_dict=self) else: if PY2: # locals must be a mapping m = {k: self[k] for k in self.dtype.names} e...
Evaluate an expression against the table columns. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : ndarray
def query(self, expression, vm='python'): condition = self.eval(expression, vm=vm) return self.compress(condition)
Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array
def concatenate(self, others): if not isinstance(others, (list, tuple)): others = others, tup = (self.values,) + tuple(o.values for o in others) out = np.concatenate(tup, axis=0) out = type(self)(out) return out
Concatenate arrays.
def fill_masked(self, value=-1, copy=True): if self.mask is None: raise ValueError('no mask is set') # apply the mask data = np.array(self.values, copy=copy) data[self.mask, ...] = value if copy: out = type(self)(data) # wrap out.is_p...
Fill masked genotype calls with a given value. Parameters ---------- value : int, optional The fill value. copy : bool, optional If False, modify the array in place. Returns ------- g : GenotypeArray Examples -------- ...
def is_called(self): out = np.all(self.values >= 0, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
Find non-missing genotype calls. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray(...
def is_missing(self): out = np.any(self.values < 0, axis=-1) # handle mask if self.mask is not None: out |= self.mask return out
Find missing genotype calls. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0...
def is_hom(self, allele=None): if allele is None: allele1 = self.values[..., 0, np.newaxis] other_alleles = self.values[..., 1:] tmp = (allele1 >= 0) & (allele1 == other_alleles) out = np.all(tmp, axis=-1) else: out = np.all(self.value...
Find genotype calls that are homozygous. Parameters ---------- allele : int, optional Allele index. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the cond...
def is_hom_alt(self): allele1 = self.values[..., 0, np.newaxis] other_alleles = self.values[..., 1:] tmp = (allele1 > 0) & (allele1 == other_alleles) out = np.all(tmp, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return o...
Find genotype calls that are homozygous for any alternate (i.e., non-reference) allele. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples --------...
def is_het(self, allele=None): allele1 = self.values[..., 0, np.newaxis] # type: np.ndarray other_alleles = self.values[..., 1:] # type: np.ndarray out = np.all(self.values >= 0, axis=-1) & np.any(allele1 != other_alleles, axis=-1) if allele is not None: out &= np....
Find genotype calls that are heterozygous. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. allele : int, optional Heterozygous allele. Examples ...
def is_call(self, call): # guard conditions if not len(call) == self.shape[-1]: raise ValueError('invalid call ploidy: %s', repr(call)) if self.ndim == 2: call = np.asarray(call)[np.newaxis, :] else: call = np.asarray(call)[np.newaxis, np.newa...
Locate genotypes with a given call. Parameters ---------- call : array_like, int, shape (ploidy,) The genotype call to find. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype is `call...
def count_called(self, axis=None): b = self.is_called() return np.sum(b, axis=axis)
Count called genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count.
def count_missing(self, axis=None): b = self.is_missing() return np.sum(b, axis=axis)
Count missing genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count.
def count_hom(self, allele=None, axis=None): b = self.is_hom(allele=allele) return np.sum(b, axis=axis)
Count homozygous genotypes. Parameters ---------- allele : int, optional Allele index. axis : int, optional Axis over which to count, or None to perform overall count.
def count_hom_ref(self, axis=None): b = self.is_hom_ref() return np.sum(b, axis=axis)
Count homozygous reference genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count.
def count_hom_alt(self, axis=None): b = self.is_hom_alt() return np.sum(b, axis=axis)
Count homozygous alternate genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count.
def count_het(self, allele=None, axis=None): b = self.is_het(allele=allele) return np.sum(b, axis=axis)
Count heterozygous genotypes. Parameters ---------- allele : int, optional Allele index. axis : int, optional Axis over which to count, or None to perform overall count.
def count_call(self, call, axis=None): b = self.is_call(call=call) return np.sum(b, axis=axis)
Count genotypes with a given call. Parameters ---------- call : array_like, int, shape (ploidy,) The genotype call to find. axis : int, optional Axis over which to count, or None to perform overall count.
def to_n_ref(self, fill=0, dtype='i1'): # count number of alternate alleles out = np.empty(self.shape[:-1], dtype=dtype) np.sum(self.values == 0, axis=-1, out=out) # fill missing calls if fill != 0: m = self.is_missing() out[m] = fill # ha...
Transform each genotype call into the number of reference alleles. Parameters ---------- fill : int, optional Use this value to represent missing calls. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, int8, sh...
def to_allele_counts(self, max_allele=None, dtype='u1'): # determine alleles to count if max_allele is None: max_allele = self.max() alleles = list(range(max_allele + 1)) # set up output array outshape = self.shape[:-1] + (len(alleles),) out = np.zero...
Transform genotype calls into allele counts per call. Parameters ---------- max_allele : int, optional Highest allele index. Provide this value to speed up computation. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, ...
def to_gt(self, max_allele=None): # how many characters needed per allele call? if max_allele is None: max_allele = np.max(self) if max_allele <= 0: max_allele = 1 nchar = int(np.floor(np.log10(max_allele))) + 1 # convert to string a = sel...
Convert genotype calls to VCF-style string representation. Returns ------- gt : ndarray, string, shape (n_variants, n_samples) Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]...
def map_alleles(self, mapping, copy=True): h = self.to_haplotypes() hm = h.map_alleles(mapping, copy=copy) if self.ndim == 2: gm = GenotypeVector(hm) else: gm = hm.to_genotypes(ploidy=self.ploidy) return gm
Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. copy : bool, optional If True, return a new array; if False, apply mapping in place (...
def to_packed(self, boundscheck=True): check_ploidy(self.ploidy, 2) if boundscheck: amx = self.max() if amx > 14: raise ValueError('max allele for packing is 14, found %s' % amx) amn = self.min() if amn < -1: raise ...
Pack diploid genotypes into a single byte for each genotype, using the left-most 4 bits for the first allele and the right-most 4 bits for the second allele. Allows single byte encoding of diploid genotypes for variants with up to 15 alleles. Parameters ---------- bounds...
def from_packed(cls, packed): # check arguments packed = np.asarray(packed) check_ndim(packed, 2) check_dtype(packed, 'u1') packed = memoryview_safe(packed) data = genotype_array_unpack_diploid(packed) return cls(data)
Unpack diploid genotypes that have been bit-packed into single bytes. Parameters ---------- packed : ndarray, uint8, shape (n_variants, n_samples) Bit-packed diploid genotype array. Returns ------- g : GenotypeArray, shape (n_variants, n_samples, 2) ...
def to_sparse(self, format='csr', **kwargs): h = self.to_haplotypes() m = h.to_sparse(format=format, **kwargs) return m
Convert into a sparse matrix. Parameters ---------- format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'} Sparse matrix format. kwargs : keyword arguments Passed through to sparse matrix constructor. Returns ------- m : scipy.sparse.spmatri...
def from_sparse(m, ploidy, order=None, out=None): h = HaplotypeArray.from_sparse(m, order=order, out=out) g = h.to_genotypes(ploidy=ploidy) return g
Construct a genotype array from a sparse matrix. Parameters ---------- m : scipy.sparse.spmatrix Sparse matrix ploidy : int The sample ploidy. order : {'C', 'F'}, optional Whether to store data in C (row-major) or Fortran (column-major) ...
def haploidify_samples(self): # N.B., this implementation is obscure and uses more memory than # necessary, TODO review # define the range of possible indices, e.g., diploid => (0, 1) index_range = np.arange(0, self.ploidy, dtype='u1') # create a random index for each ge...
Construct a pseudo-haplotype for each sample by randomly selecting an allele from each genotype call. Returns ------- h : HaplotypeArray Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import al...
def count_alleles(self, max_allele=None, subpop=None): # check inputs subpop = _normalize_subpop_arg(subpop, self.shape[1]) # determine alleles to count if max_allele is None: max_allele = self.max() # use optimisations values = memoryview_safe(self.v...
Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles above this will be ignored. subpop : sequence of ints, optional Indices of samples to include in count. ...
def count_alleles_subpops(self, subpops, max_allele=None): if max_allele is None: max_allele = self.max() out = {name: self.count_alleles(max_allele=max_allele, subpop=subpop) for name, subpop in subpops.items()} return out
Count alleles for multiple subpopulations simultaneously. Parameters ---------- subpops : dict (string -> sequence of ints) Mapping of subpopulation names to sample indices. max_allele : int, optional The highest allele index to count. Alleles above this will be ...
def compress(self, condition, axis=0, out=None): return compress_haplotype_array(self, condition, axis=axis, cls=type(self), compress=np.compress, out=out)
Return selected slices of an array along given axis. Parameters ---------- condition : array_like, bool Array that selects which entries to return. N.B., if len(condition) is less than the size of the given axis, then output is truncated to the length of the ...
def take(self, indices, axis=0, out=None, mode='raise'): return take_haplotype_array(self, indices, axis=axis, cls=type(self), take=np.take, out=out, mode=mode)
Take elements from an array along an axis. This function does the same thing as "fancy" indexing (indexing arrays using arrays); however, it can be easier to use if you need elements along a given axis. Parameters ---------- indices : array_like The indices ...
def subset(self, sel0=None, sel1=None): return subset_haplotype_array(self, sel0, sel1, cls=type(self), subset=subset)
Make a sub-selection of variants and haplotypes. Parameters ---------- sel0 : array_like Boolean array or array of indices selecting variants. sel1 : array_like Boolean array or array of indices selecting haplotypes. Returns ------- out :...
def concatenate(self, others, axis=0): return concatenate_haplotype_array(self, others, axis=axis, cls=type(self), concatenate=np.concatenate)
Join a sequence of arrays along an existing axis. Parameters ---------- others : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along w...
def to_genotypes(self, ploidy, copy=False): # check ploidy is compatible if (self.shape[1] % ploidy) > 0: raise ValueError('incompatible ploidy') # reshape newshape = (self.shape[0], -1, ploidy) data = self.reshape(newshape) # wrap g = Genotyp...
Reshape a haplotype array to view it as genotypes by restoring the ploidy dimension. Parameters ---------- ploidy : int The sample ploidy. copy : bool, optional If True, make a copy of data. Returns ------- g : ndarray, int, shape...
def to_sparse(self, format='csr', **kwargs): import scipy.sparse # check arguments f = { 'bsr': scipy.sparse.bsr_matrix, 'coo': scipy.sparse.coo_matrix, 'csc': scipy.sparse.csc_matrix, 'csr': scipy.sparse.csr_matrix, 'dia': sci...
Convert into a sparse matrix. Parameters ---------- format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'} Sparse matrix format. kwargs : keyword arguments Passed through to sparse matrix constructor. Returns ------- m : scipy.sparse.spmatri...
def from_sparse(m, order=None, out=None): import scipy.sparse # check arguments if not scipy.sparse.isspmatrix(m): raise ValueError('not a sparse matrix: %r' % m) # convert to dense array data = m.toarray(order=order, out=out) # wrap h = Haplo...
Construct a haplotype array from a sparse matrix. Parameters ---------- m : scipy.sparse.spmatrix Sparse matrix order : {'C', 'F'}, optional Whether to store data in C (row-major) or Fortran (column-major) order in memory. out : ndarray, shape...
def count_alleles(self, max_allele=None, subpop=None): # check inputs subpop = _normalize_subpop_arg(subpop, self.shape[1]) # determine alleles to count if max_allele is None: max_allele = self.max() # use optimisations values = memoryview_safe(self.v...
Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles greater than this index will be ignored. subpop : array_like, int, optional Indices of haplotypes to incl...
def map_alleles(self, mapping, copy=True): # check inputs mapping = asarray_ndim(mapping, 2) check_dim0_aligned(self, mapping) # use optimisation mapping = np.asarray(mapping, dtype=self.dtype) mapping = memoryview_safe(mapping) values = memoryview_safe(s...
Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. copy : bool, optional If True, return a new array; if False, apply mapping in place (...
def distinct(self): # setup collection d = collections.defaultdict(set) # iterate over haplotypes for i in range(self.shape[1]): # hash the haplotype k = hash(self.values[:, i].tobytes()) # collect d[k].add(i) # extract set...
Return sets of indices for each distinct haplotype.
def distinct_counts(self): # hash the haplotypes k = [hash(self.values[:, i].tobytes()) for i in range(self.shape[1])] # count and sort # noinspection PyArgumentList counts = sorted(collections.Counter(k).values(), reverse=True) return np.asarray(counts)
Return counts for each distinct haplotype.
def distinct_frequencies(self): c = self.distinct_counts() n = self.shape[1] return c / n
Return frequencies for each distinct haplotype.
def to_frequencies(self, fill=np.nan): an = np.sum(self, axis=1)[:, None] with ignore_invalid(): af = np.where(an > 0, self / an, fill) return af
Compute allele frequencies. Parameters ---------- fill : float, optional Value to use when number of allele calls is 0. Returns ------- af : ndarray, float, shape (n_variants, n_alleles) Examples -------- >>> import allel >>...
def max_allele(self): out = np.empty(self.shape[0], dtype='i1') out.fill(-1) for i in range(self.shape[1]): d = self.values[:, i] > 0 out[d] = i return out
Return the highest allele index for each variant. Returns ------- n : ndarray, int, shape (n_variants,) Allele index array. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2...
def is_non_segregating(self, allele=None): if allele is None: return self.allelism() <= 1 else: return (self.allelism() == 1) & (self.values[:, allele] > 0)
Find non-segregating variants (where at most one allele is observed). Parameters ---------- allele : int, optional Allele index. Returns ------- out : ndarray, bool, shape (n_variants,) Boolean array where elements are True if variant mat...
def is_biallelic_01(self, min_mac=None): loc = self.is_biallelic() & (self.max_allele() == 1) if min_mac is not None: # noinspection PyAugmentAssignment loc = loc & (self.values[:, :2].min(axis=1) >= min_mac) return loc
Find variants biallelic for the reference (0) and first alternate (1) allele. Parameters ---------- min_mac : int, optional Minimum minor allele count. Returns ------- out : ndarray, bool, shape (n_variants,) Boolean array where elements ...
def map_alleles(self, mapping, max_allele=None): # ensure correct dimensionality and matching dtype mapping = asarray_ndim(mapping, 2, dtype=self.dtype) check_dim0_aligned(self, mapping) check_dim1_aligned(self, mapping) # use optimisation out = allele_counts_arr...
Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. max_allele : int, optional Highest allele index expected in the output. If not provided ...