docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Convert variant information to a VCF formated string
Args:
variant(dict)
variant_type(str)
Returns:
vcf_variant(str)
|
def format_variant(variant, variant_type='snv'):
chrom = variant.get('chrom')
pos = variant.get('start')
ref = variant.get('ref')
alt = variant.get('alt')
if variant_type == 'sv':
pos = int((variant['pos_left'] + variant['pos_right'])/2)
ref = 'N'
alt = f"<{variant['sv_type']}>"
info = None
info = format_info(variant, variant_type=variant_type)
variant_line = f"{chrom}\t{pos}\t.\t{ref}\t{alt}\t.\t.\t{info}"
return variant_line
| 703,313
|
Creates an objective function and its derivative for M, given W and X
Args:
w (array): clusters x cells
X (array): genes x cells
|
def _create_m_objective(w, X):
clusters, cells = w.shape
genes = X.shape[0]
w_sum = w.sum(1)
def objective(m):
m = m.reshape((X.shape[0], w.shape[0]))
d = m.dot(w)+eps
temp = X/d
w2 = w.dot(temp.T)
deriv = w_sum - w2.T
return np.sum(d - X*np.log(d))/genes, deriv.flatten()/genes
return objective
| 703,329
|
Creates a weight initialization matrix from Poisson clustering assignments.
Args:
assignments (array): 1D array of integers, of length cells
k (int): number of states/clusters
max_assign_weight (float, optional): between 0 and 1 - how much weight to assign to the highest cluster. Default: 0.75
Returns:
init_W (array): k x cells
|
def initialize_from_assignments(assignments, k, max_assign_weight=0.75):
cells = len(assignments)
init_W = np.zeros((k, cells))
for i, a in enumerate(assignments):
# entirely arbitrary... maybe it would be better to scale
# the weights based on k?
init_W[a, i] = max_assign_weight
for a2 in range(k):
if a2!=a:
init_W[a2, i] = (1-max_assign_weight)/(k-1)
return init_W/init_W.sum(0)
| 703,330
|
Initializes the M matrix given the data and a set of cluster labels.
Cluster centers are set to the mean of each cluster.
Args:
data (array): genes x cells
clusters (array): 1d array of ints (0...k-1)
k (int): number of clusters
|
def initialize_means(data, clusters, k):
init_w = np.zeros((data.shape[0], k))
if sparse.issparse(data):
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_w[:,i] = data[:,point].toarray().flatten()
else:
# memory usage might be a problem here?
init_w[:,i] = np.array(data[:,clusters==i].mean(1)).flatten() + eps
else:
for i in range(k):
if data[:,clusters==i].shape[1]==0:
point = np.random.randint(0, data.shape[1])
init_w[:,i] = data[:,point].flatten()
else:
init_w[:,i] = data[:,clusters==i].mean(1) + eps
return init_w
| 703,331
|
Runs an ensemble method on the list of M results...
Args:
data: genes x cells array
k: number of classes
n_runs (optional): number of random initializations of state estimation
M_list (optional): list of M arrays from state estimation
se_params (optional): optional poisson_estimate_state params
Returns:
M_new
W_new
ll
|
def state_estimation_ensemble(data, k, n_runs=10, M_list=[], **se_params):
if len(M_list)==0:
M_list = []
for i in range(n_runs):
M, W, ll = poisson_estimate_state(data, k, **se_params)
M_list.append(M)
M_stacked = np.hstack(M_list)
M_new, W_new, ll = poisson_estimate_state(M_stacked, k, **se_params)
W_new = np.dot(data.T, M_new)
W_new = W_new/W_new.sum(0)
return M_new, W_new, ll
| 703,339
|
Runs an ensemble method on the list of NMF W matrices...
Args:
data: genes x cells array (should be log + cell-normalized)
k: number of classes
n_runs (optional): number of random initializations of state estimation
M_list (optional): list of M arrays from state estimation
se_params (optional): optional poisson_estimate_state params
Returns:
W_new
H_new
|
def nmf_ensemble(data, k, n_runs=10, W_list=[], **nmf_params):
nmf = NMF(k)
if len(W_list)==0:
W_list = []
for i in range(n_runs):
W = nmf.fit_transform(data)
W_list.append(W)
W_stacked = np.hstack(W_list)
nmf_w = nmf.fit_transform(W_stacked)
nmf_h = nmf.components_
H_new = data.T.dot(nmf_w).T
nmf2 = NMF(k, init='custom')
nmf_w = nmf2.fit_transform(data, W=nmf_w, H=H_new)
H_new = nmf2.components_
#W_new = W_new/W_new.sum(0)
# alternatively, use nmf_w and h_new as initializations for another NMF round?
return nmf_w, H_new
| 703,340
|
Return ped_parser case from a family file
Create a dictionary with case data. If no family file is given create from VCF
Args:
family_lines (iterator): The family lines
family_type (str): The format of the family lines
vcf_path(str): Path to VCF
Returns:
family (Family): A ped_parser family object
|
def get_case(family_lines, family_type='ped', vcf_path=None):
family = None
LOG.info("Parsing family information")
family_parser = FamilyParser(family_lines, family_type)
families = list(family_parser.families.keys())
LOG.info("Found families {0}".format(', '.join(families)))
if len(families) > 1:
raise CaseError("Only one family per load can be used")
family = family_parser.families[families[0]]
return family
| 703,348
|
Update an existing case
This will add paths to VCF files, individuals etc
Args:
case_obj(models.Case)
existing_case(models.Case)
Returns:
updated_case(models.Case): Updated existing case
|
def update_case(case_obj, existing_case):
variant_nrs = ['nr_variants', 'nr_sv_variants']
individuals = [('individuals','_inds'), ('sv_individuals','_sv_inds')]
updated_case = deepcopy(existing_case)
for i,file_name in enumerate(['vcf_path','vcf_sv_path']):
variant_type = 'snv'
if file_name == 'vcf_sv_path':
variant_type = 'sv'
if case_obj.get(file_name):
if updated_case.get(file_name):
LOG.warning("VCF of type %s already exists in case", variant_type)
raise CaseError("Can not replace VCF in existing case")
else:
updated_case[file_name] = case_obj[file_name]
updated_case[variant_nrs[i]] = case_obj[variant_nrs[i]]
updated_case[individuals[i][0]] = case_obj[individuals[i][0]]
updated_case[individuals[i][1]] = case_obj[individuals[i][1]]
return updated_case
| 703,349
|
Convert a variant to a proper update
Args:
variant(dict)
Returns:
update(dict)
|
def _get_update(self, variant):
update = {
'$inc': {
'homozygote': variant.get('homozygote', 0),
'hemizygote': variant.get('hemizygote', 0),
'observations': 1
},
'$set': {
'chrom': variant.get('chrom'),
'start': variant.get('pos'),
'end': variant.get('end'),
'ref': variant.get('ref'),
'alt': variant.get('alt'),
}
}
if variant.get('case_id'):
update['$push'] = {
'families': {
'$each': [variant.get('case_id')],
'$slice': -50
}
}
return update
| 703,357
|
Add a variant to the variant collection
If the variant exists we update the count else we insert a new variant object.
Args:
variant (dict): A variant dictionary
|
def add_variant(self, variant):
LOG.debug("Upserting variant: {0}".format(variant.get('_id')))
update = self._get_update(variant)
message = self.db.variant.update_one(
{'_id': variant['_id']},
update,
upsert=True
)
if message.modified_count == 1:
LOG.debug("Variant %s was updated", variant.get('_id'))
else:
LOG.debug("Variant was added to database for first time")
return
| 703,358
|
Add a bulk of variants
This could be used for faster inserts
Args:
variants(iterable(dict))
|
def add_variants(self, variants):
operations = []
nr_inserted = 0
for i,variant in enumerate(variants, 1):
# We need to check if there was any information returned
# The variant could be excluded based on low gq or if no individiual was called
# in the particular case
if not variant:
continue
nr_inserted += 1
update = self._get_update(variant)
operations.append(
UpdateOne(
{'_id': variant['_id']},
update,
upsert=True
)
)
if i % 10000 == 0:
self.db.variant.bulk_write(operations, ordered=False)
operations = []
if len(operations) > 0:
self.db.variant.bulk_write(operations, ordered=False)
return nr_inserted
| 703,359
|
Make a batch search for variants in the database
Args:
variant_ids(list(str)): List of variant ids
Returns:
res(pymngo.Cursor(variant_obj)): The result
|
def search_variants(self, variant_ids):
query = {'_id': {'$in': variant_ids}}
return self.db.variant.find(query)
| 703,360
|
Return all variants in the database
If no region is specified all variants will be returned.
Args:
chromosome(str)
start(int)
end(int)
Returns:
variants(Iterable(Variant))
|
def get_variants(self, chromosome=None, start=None, end=None):
query = {}
if chromosome:
query['chrom'] = chromosome
if start:
query['start'] = {'$lte': end}
query['end'] = {'$gte': start}
LOG.info("Find all variants {}".format(query))
return self.db.variant.find(query).sort([('start', ASCENDING)])
| 703,361
|
Delete observation in database
This means that we take down the observations variable with one.
If 'observations' == 1 we remove the variant. If variant was homozygote
we decrease 'homozygote' with one.
Also remove the family from array 'families'.
Args:
variant (dict): A variant dictionary
|
def delete_variant(self, variant):
mongo_variant = self.get_variant(variant)
if mongo_variant:
if mongo_variant['observations'] == 1:
LOG.debug("Removing variant {0}".format(
mongo_variant.get('_id')
))
message = self.db.variant.delete_one({'_id': variant['_id']})
else:
LOG.debug("Decreasing observations for {0}".format(
mongo_variant.get('_id')
))
message = self.db.variant.update_one({
'_id': mongo_variant['_id']
},{
'$inc': {
'observations': -1,
'homozygote': - (variant.get('homozygote', 0)),
'hemizygote': - (variant.get('hemizygote', 0)),
},
'$pull': {
'families': variant.get('case_id')
}
}, upsert=False)
return
| 703,362
|
Return a list of all chromosomes found in database
Args:
sv(bool): if sv variants should be choosen
Returns:
res(iterable(str)): An iterable with all chromosomes in the database
|
def get_chromosomes(self, sv=False):
if sv:
res = self.db.structural_variant.distinct('chrom')
else:
res = self.db.variant.distinct('chrom')
return res
| 703,363
|
Get the last position observed on a chromosome in the database
Args:
chrom(str)
Returns:
end(int): The largest end position found
|
def get_max_position(self, chrom):
res = self.db.variant.find({'chrom':chrom}, {'_id':0, 'end':1}).sort([('end', DESCENDING)]).limit(1)
end = 0
for variant in res:
end = variant['end']
return end
| 703,364
|
downsample the data by removing a given percentage of the reads.
Args:
data: genes x cells array or sparse matrix
percent: float between 0 and 1
|
def downsample(data, percent):
n_genes = data.shape[0]
n_cells = data.shape[1]
new_data = data.copy()
total_count = float(data.sum())
to_remove = total_count*percent
# sum of read counts per cell
cell_sums = data.sum(0).astype(float)
# probability of selecting genes per cell
cell_gene_probs = data/cell_sums
# probability of selecting cells
cell_probs = np.array(cell_sums/total_count).flatten()
cells_selected = np.random.multinomial(to_remove, pvals=cell_probs)
for i, num_selected in enumerate(cells_selected):
cell_gene = np.array(cell_gene_probs[:,i]).flatten()
genes_selected = np.random.multinomial(num_selected, pvals=cell_gene)
if sparse.issparse(data):
genes_selected = sparse.csc_matrix(genes_selected).T
new_data[:,i] -= genes_selected
new_data[new_data < 0] = 0
return new_data
| 703,373
|
Creates an objective function and its derivative for W, given M and X (data)
Args:
m (array): genes x clusters
X (array): genes x cells
R (array): 1 x genes
|
def _create_w_objective(m, X, R):
genes, clusters = m.shape
cells = X.shape[1]
R1 = R.reshape((genes, 1)).dot(np.ones((1, cells)))
def objective(w):
# convert w into a matrix first... because it's a vector for
# optimization purposes
w = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w)+eps
return np.sum((X + R1)*np.log(d + R1) - X*np.log(d))/genes
def deriv(w):
# derivative of objective wrt all elements of w
# for w_{ij}, the derivative is... m_j1+...+m_jn sum over genes minus
# x_ij
w2 = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w2)+eps
temp = X/d
temp2 = (X+R1)/(d+R1)
m1 = m.T.dot(temp2)
m2 = m.T.dot(temp)
deriv = m1 - m2
return deriv.flatten()/genes
return objective, deriv
| 703,374
|
Creates an objective function and its derivative for M, given W and X
Args:
w (array): clusters x cells
X (array): genes x cells
selected_genes (array): array of ints - genes to be selected
|
def poisson_objective(X, m, w):
clusters, cells = w.shape
genes = X.shape[0]
#m = m.reshape((X.shape[0], w.shape[0]))
d = m.dot(w)+eps
#temp = X/d
#w_sum = w.sum(1)
#w2 = w.dot(temp.T)
#deriv = w_sum - w2.T
return np.sum(d - X*np.log(d))/genes
| 703,381
|
Check if a coordinate is in the PAR region
Args:
chrom(str)
pos(int)
Returns:
par(bool)
|
def check_par(chrom, pos):
par = False
for interval in PAR.get(chrom,[]):
if (pos >= interval[0] and pos <= interval[1]):
par = True
return par
| 703,391
|
Check if position a is greater than position b
This will look at chromosome and position.
For example a position where chrom = 2 and pos = 300 is greater than a position where
chrom = 1 and pos = 1000
If any of the chromosomes is outside [1-22,X,Y,MT] we can not say which is biggest.
Args:
a,b(Position)
Returns:
bool: True if a is greater than b
|
def is_greater(a,b):
a_chrom = CHROM_TO_INT.get(a.chrom,0)
b_chrom = CHROM_TO_INT.get(b.chrom,0)
if (a_chrom == 0 or b_chrom == 0):
return False
if a_chrom > b_chrom:
return True
if a_chrom == b_chrom:
if a.pos > b.pos:
return True
return False
| 703,393
|
Returns a dictionary with position information
Args:
variant(cyvcf2.Variant)
Returns:
coordinates(dict)
|
def get_coords(variant):
coordinates = {
'chrom': None,
'end_chrom': None,
'sv_length': None,
'sv_type': None,
'pos': None,
'end': None,
}
chrom = variant.CHROM
if chrom.startswith(('chr', 'CHR', 'Chr')):
chrom = chrom[3:]
coordinates['chrom'] = chrom
end_chrom = chrom
pos = int(variant.POS)
alt = variant.ALT[0]
# Get the end position
# This will be None for non-svs
end_pos = variant.INFO.get('END')
if end_pos:
end = int(end_pos)
else:
end = int(variant.end)
coordinates['end'] = end
sv_type = variant.INFO.get('SVTYPE')
length = variant.INFO.get('SVLEN')
if length:
sv_len = abs(length)
else:
sv_len = end - pos
# Translocations will sometimes have a end chrom that differs from chrom
if sv_type == 'BND':
other_coordinates = alt.strip('ACGTN[]').split(':')
end_chrom = other_coordinates[0]
if end_chrom.startswith(('chr', 'CHR', 'Chr')):
end_chrom = end_chrom[3:]
end = int(other_coordinates[1])
#Set 'infinity' to length if translocation
sv_len = float('inf')
# Insertions often have length 0 in VCF
if (sv_len == 0 and alt != '<INS>'):
sv_len = len(alt)
if (pos == end) and (sv_len > 0):
end = pos + sv_len
position = Position(chrom, pos)
end_position = Position(end_chrom, end)
# If 'start' is greater than 'end', switch positions
if is_greater(position, end_position):
end_chrom = position.chrom
end = position.pos
chrom = end_position.chrom
pos = end_position.pos
coordinates['end_chrom'] = end_chrom
coordinates['pos'] = pos
coordinates['end'] = end
coordinates['sv_length'] = sv_len
coordinates['sv_type'] = sv_type
return coordinates
| 703,394
|
Return a Variant object
Take a cyvcf2 formated variant line and return a models.Variant.
If criterias are not fullfilled, eg. variant have no gt call or quality
is below gq treshold then return None.
Args:
variant(cyvcf2.Variant)
case_obj(Case): We need the case object to check individuals sex
case_id(str): The case id
gq_treshold(int): Genotype Quality treshold
Return:
formated_variant(models.Variant): A variant dictionary
|
def build_variant(variant, case_obj, case_id=None, gq_treshold=None):
variant_obj = None
sv = False
# Let cyvcf2 tell if it is a Structural Variant or not
if variant.var_type == 'sv':
sv = True
# chrom_pos_ref_alt
variant_id = get_variant_id(variant)
ref = variant.REF
# ALT is an array in cyvcf2
# We allways assume splitted and normalized VCFs
alt = variant.ALT[0]
coordinates = get_coords(variant)
chrom = coordinates['chrom']
pos = coordinates['pos']
# These are integers that will be used when uploading
found_homozygote = 0
found_hemizygote = 0
# Only look at genotypes for the present individuals
if sv:
found_variant = True
else:
found_variant = False
for ind_obj in case_obj['individuals']:
ind_id = ind_obj['ind_id']
# Get the index position for the individual in the VCF
ind_pos = ind_obj['ind_index']
gq = int(variant.gt_quals[ind_pos])
if (gq_treshold and gq < gq_treshold):
continue
genotype = GENOTYPE_MAP[variant.gt_types[ind_pos]]
if genotype in ['het', 'hom_alt']:
LOG.debug("Found variant")
found_variant = True
# If variant in X or Y and individual is male,
# we need to check hemizygosity
if chrom in ['X','Y'] and ind_obj['sex'] == 1:
if not check_par(chrom, pos):
LOG.debug("Found hemizygous variant")
found_hemizygote = 1
if genotype == 'hom_alt':
LOG.debug("Found homozygote alternative variant")
found_homozygote = 1
if found_variant:
variant_obj = Variant(
variant_id=variant_id,
chrom=chrom,
pos=pos,
end=coordinates['end'],
ref=ref,
alt=alt,
end_chrom=coordinates['end_chrom'],
sv_type = coordinates['sv_type'],
sv_len = coordinates['sv_length'],
case_id = case_id,
homozygote = found_homozygote,
hemizygote = found_hemizygote,
is_sv = sv,
id_column = variant.ID,
)
return variant_obj
| 703,395
|
Load a case to the database
Args:
adapter: Connection to database
case_obj: dict
update(bool): If existing case should be updated
Returns:
case_obj(models.Case)
|
def load_case(adapter, case_obj, update=False):
# Check if the case already exists in database.
existing_case = adapter.case(case_obj)
if existing_case:
if not update:
raise CaseError("Case {0} already exists in database".format(case_obj['case_id']))
case_obj = update_case(case_obj, existing_case)
# Add the case to database
try:
adapter.add_case(case_obj, update=update)
except CaseError as err:
raise err
return case_obj
| 703,433
|
Load variants for a family into the database.
Args:
adapter (loqusdb.plugins.Adapter): initialized plugin
case_obj(Case): dict with case information
nr_variants(int)
skip_case_id (bool): whether to include the case id on variant level
or not
gq_treshold(int)
max_window(int): Specify the max size for sv windows
variant_type(str): 'sv' or 'snv'
Returns:
nr_inserted(int)
|
def load_variants(adapter, vcf_obj, case_obj, skip_case_id=False, gq_treshold=None,
max_window=3000, variant_type='snv'):
if variant_type == 'snv':
nr_variants = case_obj['nr_variants']
else:
nr_variants = case_obj['nr_sv_variants']
nr_inserted = 0
case_id = case_obj['case_id']
if skip_case_id:
case_id = None
# Loop over the variants in the vcf
with click.progressbar(vcf_obj, label="Inserting variants",length=nr_variants) as bar:
variants = (build_variant(variant,case_obj,case_id, gq_treshold) for variant in bar)
if variant_type == 'sv':
for sv_variant in variants:
if not sv_variant:
continue
adapter.add_structural_variant(variant=sv_variant, max_window=max_window)
nr_inserted += 1
if variant_type == 'snv':
nr_inserted = adapter.add_variants(variants)
LOG.info("Inserted %s variants of type %s", nr_inserted, variant_type)
return nr_inserted
| 703,434
|
Loads variants used for profiling
Args:
adapter (loqusdb.plugins.Adapter): initialized plugin
variant_file(str): Path to variant file
|
def load_profile_variants(adapter, variant_file):
vcf_info = check_vcf(variant_file)
nr_variants = vcf_info['nr_variants']
variant_type = vcf_info['variant_type']
if variant_type != 'snv':
LOG.critical('Variants used for profiling must be SNVs only')
raise VcfError
vcf = get_vcf(variant_file)
profile_variants = [build_profile_variant(variant) for variant in vcf]
adapter.add_profile_variants(profile_variants)
| 703,435
|
This function identifies the genes that have the max variance
across a number of bins sorted by mean.
Args:
data (array): genes x cells
nbins (int): number of bins to sort genes by mean expression level. Default: 10.
frac (float): fraction of genes to return per bin - between 0 and 1. Default: 0.1
Returns:
list of gene indices (list of ints)
|
def max_variance_genes(data, nbins=5, frac=0.2):
# TODO: profile, make more efficient for large matrices
# 8000 cells: 0.325 seconds
# top time: sparse.csc_tocsr, csc_matvec, astype, copy, mul_scalar
# 73233 cells: 5.347 seconds, 4.762 s in sparse_var
# csc_tocsr: 1.736 s
# copy: 1.028 s
# astype: 0.999 s
# there is almost certainly something superlinear in this method
# maybe it's to_csr?
indices = []
if sparse.issparse(data):
means, var = sparse_mean_var(data)
else:
means = data.mean(1)
var = data.var(1)
mean_indices = means.argsort()
n_elements = int(data.shape[0]/nbins)
frac_elements = int(n_elements*frac)
for i in range(nbins):
bin_i = mean_indices[i*n_elements : (i+1)*n_elements]
if i==nbins-1:
bin_i = mean_indices[i*n_elements :]
var_i = var[bin_i]
var_sorted = var_i.argsort()
top_var_indices = var_sorted[len(bin_i) - frac_elements:]
ind = bin_i[top_var_indices]
# filter out genes with zero variance
ind = [index for index in ind if var[index]>0]
indices.extend(ind)
return indices
| 703,437
|
Return a dictionary with individual positions
Args:
individuals(list): A list with vcf individuals in correct order
Returns:
ind_pos(dict): Map from ind_id -> index position
|
def get_individual_positions(individuals):
ind_pos = {}
if individuals:
for i, ind in enumerate(individuals):
ind_pos[ind] = i
return ind_pos
| 703,440
|
Generates poisson-distributed data, given a set of means for each cluster.
Args:
centers (array): genes x clusters matrix
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels
|
def generate_poisson_data(centers, n_cells, cluster_probs=None):
genes, clusters = centers.shape
output = np.zeros((genes, n_cells))
if cluster_probs is None:
cluster_probs = np.ones(clusters)/clusters
labels = []
for i in range(n_cells):
c = np.random.choice(range(clusters), p=cluster_probs)
labels.append(c)
output[:,i] = np.random.poisson(centers[:,c])
return output, np.array(labels)
| 703,442
|
Generates zero-inflated poisson-distributed data, given a set of means and zero probs for each cluster.
Args:
M (array): genes x clusters matrix
L (array): genes x clusters matrix - zero-inflation parameters
n_cells (int): number of output cells
cluster_probs (array): prior probability for each cluster.
Default: uniform.
Returns:
output - array with shape genes x n_cells
labels - array of cluster labels
|
def generate_zip_data(M, L, n_cells, cluster_probs=None):
genes, clusters = M.shape
output = np.zeros((genes, n_cells))
if cluster_probs is None:
cluster_probs = np.ones(clusters)/clusters
zip_p = np.random.random((genes, n_cells))
labels = []
for i in range(n_cells):
c = np.random.choice(range(clusters), p=cluster_probs)
labels.append(c)
output[:,i] = np.where(zip_p[:,i] < L[:,c], 0, np.random.poisson(M[:,c]))
return output, np.array(labels)
| 703,443
|
Generates data according to the Poisson Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
Returns:
data matrix - genes x cells
|
def generate_state_data(means, weights):
x_true = np.dot(means, weights)
sample = np.random.poisson(x_true)
return sample.astype(float)
| 703,444
|
Generates data according to the Zero-inflated Poisson Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
z (float): zero-inflation parameter
Returns:
data matrix - genes x cells
|
def generate_zip_state_data(means, weights, z):
x_true = np.dot(means, weights)
sample = np.random.poisson(x_true)
random = np.random.random(x_true.shape)
x_true[random < z] = 0
return sample.astype(float)
| 703,445
|
Generates data according to the Negative Binomial Convex Mixture Model.
Args:
means (array): Cell types- genes x clusters
weights (array): Cell cluster assignments- clusters x cells
R (array): dispersion parameter - 1 x genes
Returns:
data matrix - genes x cells
|
def generate_nb_state_data(means, weights, R):
cells = weights.shape[1]
# x_true = true means
x_true = np.dot(means, weights)
# convert means into P
R_ = np.tile(R, (cells, 1)).T
P_true = x_true/(R_ + x_true)
sample = np.random.negative_binomial(np.tile(R, (cells, 1)).T, P_true)
return sample.astype(float)
| 703,446
|
Generates means and weights for the Negative Binomial Mixture Model.
Weights are distributed Dirichlet(1,1,...), means are rand(0, 1).
Returned values can be passed to generate_state_data(M, W).
Args:
n_states (int): number of states or clusters
n_cells (int): number of cells
n_genes (int): number of genes
Returns:
M - genes x clusters
W - clusters x cells
R - genes x 1 - randint(1, 100)
|
def generate_nb_states(n_states, n_cells, n_genes):
W = np.random.dirichlet([1]*n_states, size=(n_cells,))
W = W.T
M = np.random.random((n_genes, n_states))*100
R = np.random.randint(1, 100, n_genes)
return M, W, R
| 703,447
|
Generates means and weights for the Poisson Convex Mixture Model.
Weights are distributed Dirichlet(1,1,...), means are rand(0, 100).
Returned values can be passed to generate_state_data(M, W).
Args:
n_states (int): number of states or clusters
n_cells (int): number of cells
n_genes (int): number of genes
Returns:
M - genes x clusters
W - clusters x cells
|
def generate_poisson_states(n_states, n_cells, n_genes):
W = np.random.dirichlet([1]*n_states, size=(n_cells,))
W = W.T
M = np.random.random((n_genes, n_states))*100
return M, W
| 703,448
|
Generates negative binomial data
Args:
P (array): genes x clusters
R (array): genes x clusters
n_cells (int): number of cells
assignments (list): cluster assignment of each cell. Default:
random uniform
Returns:
data array with shape genes x cells
labels - array of cluster labels
|
def generate_nb_data(P, R, n_cells, assignments=None):
genes, clusters = P.shape
output = np.zeros((genes, n_cells))
if assignments is None:
cluster_probs = np.ones(clusters)/clusters
labels = []
for i in range(n_cells):
if assignments is None:
c = np.random.choice(range(clusters), p=cluster_probs)
else:
c = assignments[i]
labels.append(c)
# because numpy's negative binomial, r is the number of successes
output[:,i] = np.random.negative_binomial(R[:,c], 1.0-P[:,c])
return output, np.array(labels)
| 703,450
|
Generates visualization scatters for all the methods.
Args:
methods: follows same format as run_experiments. List of tuples.
data: genes x cells
true_labels: array of integers
base_dir: base directory to save all the plots
figsize: tuple of ints representing size of figure
scatter_options: options for plt.scatter
|
def generate_visualizations(methods, data, true_labels, base_dir = 'visualizations',
figsize=(18,10), **scatter_options):
plt.figure(figsize=figsize)
for method in methods:
preproc= method[0]
if isinstance(preproc, Preprocess):
preprocessed, ll = preproc.run(data)
output_names = preproc.output_names
else:
# if the input is a list, only use the first preproc result
p1 = data
output_names = ['']
for p in preproc:
p1, ll = p.run(p1)
p1 = p1[0]
output_names[0] = output_names[0] + p.output_names[0]
preprocessed = [p1]
for r, name in zip(preprocessed, output_names):
# TODO: cluster labels
print(name)
# if it's 2d, just display it... else, do tsne to reduce to 2d
if r.shape[0]==2:
r_dim_red = r
else:
# sometimes the data is too big to do tsne... (for sklearn)
if sparse.issparse(r) and r.shape[0] > 100:
name = 'tsvd_' + name
tsvd = TruncatedSVD(50)
r_dim_red = tsvd.fit_transform(r.T)
try:
tsne = TSNE(2)
r_dim_red = tsne.fit_transform(r_dim_red).T
name = 'tsne_' + name
except:
tsvd2 = TruncatedSVD(2)
r_dim_red = tsvd2.fit_transform(r_dim_red).T
else:
name = 'tsne_' + name
tsne = TSNE(2)
r_dim_red = tsne.fit_transform(r.T).T
if isinstance(method[1], list):
for clustering_method in method[1]:
try:
cluster_labels = clustering_method.run(r)
except:
print('clustering failed')
continue
output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)
visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)
else:
clustering_method = method[1]
try:
cluster_labels = clustering_method.run(r)
except:
print('clustering failed')
continue
output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)
visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)
output_path = base_dir + '/{0}_true_labels.png'.format(name)
visualize_dim_red(r_dim_red, true_labels, output_path, **scatter_options)
| 703,459
|
Given a vcf, get a profile string for each sample in the vcf
based on the profile variants in the database
Args:
adapter(MongoAdapter): Adapter to mongodb
vcf_file(str): Path to vcf file
Returns:
profiles (dict(str)): The profiles (given as strings) for each sample
in vcf.
|
def get_profiles(adapter, vcf_file):
vcf = get_file_handle(vcf_file)
individuals = vcf.samples
profiles = {individual: [] for individual in individuals}
for profile_variant in adapter.profile_variants():
ref = profile_variant['ref']
alt = profile_variant['alt']
pos = profile_variant['pos']
end = pos + 1
chrom = profile_variant['chrom']
region = f"{chrom}:{pos}-{end}"
#Find variants in region
found_variant = False
for variant in vcf(region):
variant_id = get_variant_id(variant)
#If variant id i.e. chrom_pos_ref_alt matches
if variant_id == profile_variant['_id']:
found_variant = True
#find genotype for each individual in vcf
for i, individual in enumerate(individuals):
genotype = GENOTYPE_MAP[variant.gt_types[i]]
if genotype == 'hom_alt':
gt_str = f"{alt}{alt}"
elif genotype == 'het':
gt_str = f"{ref}{alt}"
else:
gt_str = f"{ref}{ref}"
#Append genotype to profile string of individual
profiles[individual].append(gt_str)
#Break loop if variant is found in region
break
#If no call was found for variant, give all samples a hom ref genotype
if not found_variant:
for individual in individuals: profiles[individual].append(f"{ref}{ref}")
return profiles
| 703,516
|
Given two profiles, determine the ratio of similarity, i.e.
the hamming distance between the strings.
Args:
profile1/2 (str): profile string
Returns:
similarity_ratio (float): the ratio of similiarity (0-1)
|
def compare_profiles(profile1, profile2):
length = len(profile1)
profile1 = np.array(list(profile1))
profile2 = np.array(list(profile2))
similarity_array = profile1 == profile2
matches = np.sum(similarity_array)
similarity_ratio = matches/length
return similarity_ratio
| 703,518
|
For all cases having vcf_path, update the profile string for the samples
Args:
adapter (MongoAdapter): Adapter to mongodb
|
def update_profiles(adapter):
for case in adapter.cases():
#If the case has a vcf_path, get the profiles and update the
#case with new profiled individuals.
if case.get('profile_path'):
profiles = get_profiles(adapter, case['profile_path'])
profiled_individuals = deepcopy(case['individuals'])
for individual in profiled_individuals:
ind_id = individual['ind_id']
try:
profile = profiles[ind_id]
individual['profile'] = profile
except KeyError:
LOG.warning(f"sample IDs in vcf does not match for case {case['case_id']}")
updated_case = deepcopy(case)
updated_case['individuals'] = profiled_individuals
adapter.add_case(updated_case, update=True)
| 703,519
|
Calculates the purity score for the given labels.
Args:
labels (array): 1D array of integers
true_labels (array): 1D array of integers - true labels
Returns:
purity score - a float bewteen 0 and 1. Closer to 1 is better.
|
def purity(labels, true_labels):
purity = 0.0
for i in set(labels):
indices = (labels==i)
true_clusters = true_labels[indices]
if len(true_clusters)==0:
continue
counts = Counter(true_clusters)
lab, count = counts.most_common()[0]
purity += count
return float(purity)/len(labels)
| 703,521
|
Calculates the nearest neighbor accuracy (basically leave-one-out cross
validation with a 1NN classifier).
Args:
dim_red (array): dimensions (k, cells)
true_labels (array): 1d array of integers
Returns:
Nearest neighbor accuracy - fraction of points for which the 1NN
1NN classifier returns the correct value.
|
def nne(dim_red, true_labels):
# use sklearn's BallTree
bt = BallTree(dim_red.T)
correct = 0
for i, l in enumerate(true_labels):
dist, ind = bt.query([dim_red[:,i]], k=2)
closest_cell = ind[0, 1]
if true_labels[closest_cell] == l:
correct += 1
return float(correct)/len(true_labels)
| 703,522
|
Returns the negative binomial log-likelihood of the data.
Args:
data (array): genes x cells
P (array): NB success probability param - genes x clusters
R (array): NB stopping param - genes x clusters
Returns:
cells x clusters array of log-likelihoods
|
def nb_ll(data, P, R):
# TODO: include factorial...
#data = data + eps
genes, cells = data.shape
clusters = P.shape[1]
lls = np.zeros((cells, clusters))
for c in range(clusters):
P_c = P[:,c].reshape((genes, 1))
R_c = R[:,c].reshape((genes, 1))
# don't need constant factors...
ll = gammaln(R_c + data) - gammaln(R_c) #- gammaln(data + 1)
ll += data*np.log(P_c) + xlog1py(R_c, -P_c)
#new_ll = np.sum(nbinom.logpmf(data, R_c, P_c), 0)
lls[:,c] = ll.sum(0)
return lls
| 703,526
|
returns the negative LL of a single row.
Args:
params (array) - [p, r]
data_row (array) - 1d array of data
Returns:
LL of row
|
def nb_ll_row(params, data_row):
p = params[0]
r = params[1]
n = len(data_row)
ll = np.sum(gammaln(data_row + r)) - np.sum(gammaln(data_row + 1))
ll -= n*gammaln(r)
ll += np.sum(data_row)*np.log(p)
ll += n*r*np.log(1-p)
return -ll
| 703,528
|
Derivative of log-likelihood wrt r (formula from wikipedia)
Args:
r (float): the R paramemter in the NB distribution
data_row (array): 1d array of length cells
|
def nb_r_deriv(r, data_row):
n = len(data_row)
d = sum(digamma(data_row + r)) - n*digamma(r) + n*np.log(r/(r+np.mean(data_row)))
return d
| 703,529
|
Fits the NB distribution to data using method of moments.
Args:
data (array): genes x cells
P_init (array, optional): NB success prob param - genes x 1
R_init (array, optional): NB stopping param - genes x 1
Returns:
P, R - fit to data
|
def nb_fit(data, P_init=None, R_init=None, epsilon=1e-8, max_iters=100):
means = data.mean(1)
variances = data.var(1)
if (means > variances).any():
raise ValueError("For NB fit, means must be less than variances")
genes, cells = data.shape
# method of moments
P = 1.0 - means/variances
R = means*(1-P)/P
for i in range(genes):
result = minimize(nb_ll_row, [P[i], R[i]], args=(data[i,:],),
bounds = [(0, 1), (eps, None)])
params = result.x
P[i] = params[0]
R[i] = params[1]
#R[i] = fsolve(nb_r_deriv, R[i], args = (data[i,:],))
#P[i] = data[i,:].mean()/(data[i,:].mean() + R[i])
return P,R
| 703,530
|
Calculates the zero-inflated Poisson log-likelihood.
Args:
data (array): genes x cells
means (array): genes x k
M (array): genes x k - this is the zero-inflation parameter.
Returns:
cells x k array of log-likelihood for each cell/cluster pair.
|
def zip_ll(data, means, M):
genes, cells = data.shape
clusters = means.shape[1]
ll = np.zeros((cells, clusters))
d0 = (data==0)
d1 = (data>0)
for i in range(clusters):
means_i = np.tile(means[:,i], (cells, 1))
means_i = means_i.transpose()
L_i = np.tile(M[:,i], (cells, 1))
L_i = L_i.transpose()
ll_0 = np.log(L_i + (1 - L_i)*np.exp(-means_i))
ll_0 = np.where((L_i==0) & (means_i==0), -means_i, ll_0)
# not including constant factors
ll_1 = np.log(1 - L_i) + xlogy(data, means_i) - means_i
ll_0 = np.where(d0, ll_0, 0.0)
ll_1 = np.where(d1, ll_1, 0.0)
ll[:,i] = np.sum(ll_0 + ll_1, 0)
return ll
| 703,533
|
Returns the negative log-likelihood of a row given ZIP data.
Args:
params (list): [lambda zero-inf]
data_row (array): 1d array
Returns:
negative log-likelihood
|
def zip_ll_row(params, data_row):
l = params[0]
pi = params[1]
d0 = (data_row==0)
likelihood = d0*pi + (1-pi)*poisson.pmf(data_row, l)
return -np.log(likelihood+eps).sum()
| 703,534
|
Migrate an old loqusdb instance to 1.0
Args:
adapter
Returns:
nr_updated(int): Number of variants that where updated
|
def migrate_database(adapter):
all_variants = adapter.get_variants()
nr_variants = all_variants.count()
nr_updated = 0
with progressbar(all_variants, label="Updating variants", length=nr_variants) as bar:
for variant in bar:
# Do not update if the variants have the correct format
if 'chrom' in variant:
continue
nr_updated += 1
splitted_id = variant['_id'].split('_')
chrom = splitted_id[0]
start = int(splitted_id[1])
ref = splitted_id[2]
alt = splitted_id[3]
# Calculate end
end = start + (max(len(ref), len(alt)) - 1)
adapter.db.variant.find_one_and_update(
{'_id': variant['_id']},
{
'$set': {
'chrom': chrom,
'start': start,
'end': end
}
}
)
return nr_updated
| 703,535
|
Given a data matrix, this returns the per-gene fit error for the
Poisson, Normal, and Log-Normal distributions.
Args:
Dat (array): numpy array with shape (genes, cells)
Returns:
d (dict): 'poiss', 'norm', 'lognorm' give the fit error for each distribution.
|
def DistFitDataset(Dat):
#Assumes data to be in the form of a numpy matrix
(r,c) = Dat.shape
Poiss = np.zeros(r)
Norm = np.zeros(r)
LogNorm = np.zeros(r)
for i in range(r):
temp = GetDistFitError(Dat[i])
Poiss[i] = temp['poiss']
Norm[i] = temp['norm']
LogNorm[i] = temp['lognorm']
d = {}
d['poiss'] = Poiss
d['norm'] = Norm
d['lognorm'] = LogNorm
return d
| 703,555
|
Delete a case and all of it's variants from the database.
Args:
adapter: Connection to database
case_obj(models.Case)
update(bool): If we are in the middle of an update
existing_case(models.Case): If something failed during an update we need to revert
to the original case
|
def delete(adapter, case_obj, update=False, existing_case=False):
# This will overwrite the updated case with the previous one
if update:
adapter.add_case(existing_case)
else:
adapter.delete_case(case_obj)
for file_type in ['vcf_path','vcf_sv_path']:
if not case_obj.get(file_type):
continue
variant_file = case_obj[file_type]
# Get a cyvcf2.VCF object
vcf_obj = get_vcf(variant_file)
delete_variants(
adapter=adapter,
vcf_obj=vcf_obj,
case_obj=case_obj,
)
| 703,556
|
Delete variants for a case in the database
Args:
adapter(loqusdb.plugins.Adapter)
vcf_obj(iterable(dict))
ind_positions(dict)
case_id(str)
Returns:
nr_deleted (int): Number of deleted variants
|
def delete_variants(adapter, vcf_obj, case_obj, case_id=None):
case_id = case_id or case_obj['case_id']
nr_deleted = 0
start_deleting = datetime.now()
chrom_time = datetime.now()
current_chrom = None
new_chrom = None
for variant in vcf_obj:
formated_variant = build_variant(
variant=variant,
case_obj=case_obj,
case_id=case_id,
)
if not formated_variant:
continue
new_chrom = formated_variant.get('chrom')
adapter.delete_variant(formated_variant)
nr_deleted += 1
if not current_chrom:
LOG.info("Start deleting chromosome {}".format(new_chrom))
current_chrom = new_chrom
chrom_time = datetime.now()
continue
if new_chrom != current_chrom:
LOG.info("Chromosome {0} done".format(current_chrom))
LOG.info("Time to delete chromosome {0}: {1}".format(
current_chrom, datetime.now()-chrom_time))
LOG.info("Start deleting chromosome {0}".format(new_chrom))
current_chrom = new_chrom
return nr_deleted
| 703,557
|
Generates starting points using binarized data. If qualitative data is missing for a given gene, all of its entries should be -1 in the qualitative matrix.
Args:
data (array): 2d array of genes x cells
qualitative (array): 2d array of numerical data - genes x clusters
Returns:
Array of starting positions for state estimation or
clustering, with shape genes x clusters
|
def qualNorm(data, qualitative):
genes, cells = data.shape
clusters = qualitative.shape[1]
output = np.zeros((genes, clusters))
missing_indices = []
qual_indices = []
thresholds = qualitative.min(1) + (qualitative.max(1) - qualitative.min(1))/2.0
for i in range(genes):
if qualitative[i,:].max() == -1 and qualitative[i,:].min() == -1:
missing_indices.append(i)
continue
qual_indices.append(i)
threshold = thresholds[i]
data_i = data[i,:]
if sparse.issparse(data):
data_i = data_i.toarray().flatten()
assignments, means = poisson_cluster(data_i.reshape((1, cells)), 2)
means = means.flatten()
high_i = 1
low_i = 0
if means[0]>means[1]:
high_i = 0
low_i = 1
high_mean = np.median(data_i[assignments==high_i])
low_mean = np.median(data_i[assignments==low_i])
for k in range(clusters):
if qualitative[i,k]>threshold:
output[i,k] = high_mean
else:
output[i,k] = low_mean
if missing_indices:
assignments, means = poisson_cluster(data[qual_indices, :], clusters, output[qual_indices, :], max_iters=1)
for ind in missing_indices:
for k in range(clusters):
if len(assignments==k)==0:
output[ind, k] = data[ind,:].mean()
else:
output[ind, k] = data[ind, assignments==k].mean()
return output
| 703,568
|
Generates starting points using binarized data. If qualitative data is missing for a given gene, all of its entries should be -1 in the qualitative matrix.
Args:
data (array): 2d array of genes x cells
qualitative (array): 2d array of numerical data - genes x clusters
Returns:
Array of starting positions for state estimation or
clustering, with shape genes x clusters
|
def qualNormGaussian(data, qualitative):
genes, cells = data.shape
clusters = qualitative.shape[1]
output = np.zeros((genes, clusters))
missing_indices = []
qual_indices = []
for i in range(genes):
if qualitative[i,:].max() == -1 and qualitative[i,:].min() == -1:
missing_indices.append(i)
continue
qual_indices.append(i)
threshold = (qualitative[i,:].max() - qualitative[i,:].min())/2.0
kmeans = KMeans(n_clusters = 2).fit(data[i,:].reshape((1, cells)))
assignments = kmeans.labels_
means = kmeans.cluster_centers_
high_mean = means.max()
low_mean = means.min()
for k in range(clusters):
if qualitative[i,k]>threshold:
output[i,k] = high_mean
else:
output[i,k] = low_mean
if missing_indices:
#generating centers for missing indices
M_init = output[qual_indices, :]
kmeans = KMeans(n_clusters = 2, init = M_init, max_iter = 1).fit(data[qual_indices, :])
assignments = kmeans.labels_
#assignments, means = poisson_cluster(data[qual_indices, :], clusters, output[qual_indices, :], max_iters=1)
for ind in missing_indices:
for k in range(clusters):
output[ind, k] = np.mean(data[ind, assignments==k])
# TODO: assign to closest
return output
| 703,569
|
Get current script's directory
Args:
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory
|
def script_dir(pyobject, follow_symlinks=True):
if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze
path = abspath(sys.executable)
else:
path = inspect.getabsfile(pyobject)
if follow_symlinks:
path = realpath(path)
return dirname(path)
| 703,570
|
Get current script's directory and then append a filename
Args:
filename (str): Filename to append to directory path
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory and with filename appended
|
def script_dir_plus_file(filename, pyobject, follow_symlinks=True):
return join(script_dir(pyobject, follow_symlinks), filename)
| 703,571
|
Annotate a cyvcf variant with observations
Args:
variant(cyvcf2.variant)
var_obj(dict)
Returns:
variant(cyvcf2.variant): Annotated variant
|
def annotate_variant(variant, var_obj=None):
if var_obj:
variant.INFO['Obs'] = var_obj['observations']
if var_obj.get('homozygote'):
variant.INFO['Hom'] = var_obj['homozygote']
if var_obj.get('hemizygote'):
variant.INFO['Hem'] = var_obj['hemizygote']
return variant
| 703,581
|
Annotate an SNV/INDEL variant
Args:
adapter(loqusdb.plugin.adapter)
variant(cyvcf2.Variant)
|
def annotate_snv(adpter, variant):
variant_id = get_variant_id(variant)
variant_obj = adapter.get_variant(variant={'_id':variant_id})
annotated_variant = annotated_variant(variant, variant_obj)
return annotated_variant
| 703,582
|
Annotate all SV variants in a VCF
Args:
adapter(loqusdb.plugin.adapter)
vcf_obj(cyvcf2.VCF)
Yields:
variant(cyvcf2.Variant)
|
def annotate_svs(adapter, vcf_obj):
for nr_variants, variant in enumerate(vcf_obj, 1):
variant_info = get_coords(variant)
match = adapter.get_structural_variant(variant_info)
if match:
annotate_variant(variant, match)
yield variant
| 703,583
|
Annotate all variants in a VCF
Args:
adapter(loqusdb.plugin.adapter)
vcf_obj(cyvcf2.VCF)
Yields:
variant(cyvcf2.Variant): Annotated variant
|
def annotate_snvs(adapter, vcf_obj):
variants = {}
for nr_variants, variant in enumerate(vcf_obj, 1):
# Add the variant to current batch
variants[get_variant_id(variant)] = variant
# If batch len == 1000 we annotate the batch
if (nr_variants % 1000) == 0:
for var_obj in adapter.search_variants(list(variants.keys())):
var_id = var_obj['_id']
if var_id in variants:
annotate_variant(variants[var_id], var_obj)
for variant_id in variants:
yield variants[variant_id]
variants = {}
for var_obj in adapter.search_variants(list(variants.keys())):
var_id = var_obj['_id']
if var_id in variants:
annotate_variant(variants[var_id], var_obj)
for variant_id in variants:
yield variants[variant_id]
| 703,584
|
Calculates the Poisson log-likelihood.
Args:
data (array): 2d numpy array of genes x cells
means (array): 2d numpy array of genes x k
Returns:
cells x k array of log-likelihood for each cell/cluster pair
|
def poisson_ll(data, means):
if sparse.issparse(data):
return sparse_poisson_ll(data, means)
genes, cells = data.shape
clusters = means.shape[1]
ll = np.zeros((cells, clusters))
for i in range(clusters):
means_i = np.tile(means[:,i], (cells, 1))
means_i = means_i.transpose() + eps
#ll[:,i] = np.sum(xlogy(data, means_i) - gammaln(data+1) - means_i, 0)
ll[:,i] = np.sum(xlogy(data, means_i) - means_i, 0)
return ll
| 703,645
|
Construct a individual object
Args:
ind_id (str): The individual id
case_id (str): What case it belongs to
mother (str): The mother id
father (str): The father id
sex (str): Sex in ped format
phenotype (str): Phenotype in ped format
ind_index (int): Column in the vcf.
|
def __init__(self, ind_id, case_id=None, mother=None,
father=None, sex=None, phenotype=None, ind_index=None,
profile=None, similar_samples=None):
super(Individual, self).__init__(
ind_id=ind_id,
name=ind_id,
case_id=case_id,
ind_index=ind_index,
sex=sex,
)
if profile: self['profile'] = profile
if similar_samples: self['similar_samples'] = similar_samples
| 703,681
|
Check if there are any overlapping sv clusters
Search the sv variants with chrom start end_chrom end and sv_type
Args:
variant (dict): A variant dictionary
Returns:
variant (dict): A variant dictionary
|
def get_structural_variant(self, variant):
# Create a query for the database
# This will include more variants than we want
# The rest of the calculations will be done in python
query = {
'chrom': variant['chrom'],
'end_chrom': variant['end_chrom'],
'sv_type': variant['sv_type'],
'$and': [
{'pos_left': {'$lte': variant['pos']}},
{'pos_right': {'$gte': variant['pos']}},
]
}
res = self.db.structural_variant.find(query).sort('pos_left',1)
match = None
distance = None
closest_hit = None
# First we check that the coordinates are correct
# Then we count the distance to mean on both ends to see which variant is closest
for hit in res:
# We know from the query that the variants position is larger than the left most part of
# the cluster.
# If the right most part of the cluster is smaller than the variant position they do
# not overlap
if hit['end_left'] > variant['end']:
continue
if hit['end_right'] < variant['end']:
continue
# We need to calculate the distance to see what cluster that was closest to the variant
distance = (abs(variant['pos'] - (hit['pos_left'] + hit['pos_right'])/2) +
abs(variant['end'] - (hit['end_left'] + hit['end_right'])/2))
# If we have no cluster yet we set the curent to be the hit
if closest_hit is None:
match = hit
closest_hit = distance
continue
# If the distance is closer than previous we choose current cluster
if distance < closest_hit:
# Set match to the current closest hit
match = hit
# Update the closest distance
closest_hit = distance
return match
| 703,696
|
Return all structural variants in the database
Args:
chromosome (str)
end_chromosome (str)
sv_type (str)
pos (int): Left position of SV
end (int): Right position of SV
Returns:
variants (Iterable(Variant))
|
def get_sv_variants(self, chromosome=None, end_chromosome=None, sv_type=None,
pos=None, end=None):
query = {}
if chromosome:
query['chrom'] = chromosome
if end_chromosome:
query['end_chrom'] = end_chromosome
if sv_type:
query['sv_type'] = sv_type
if pos:
if not '$and' in query:
query['$and'] = []
query['$and'].append({'pos_left': {'$lte': pos}})
query['$and'].append({'pos_right': {'$gte': pos}})
if end:
if not '$and' in query:
query['$and'] = []
query['$and'].append({'end_left': {'$lte': end}})
query['$and'].append({'end_right': {'$gte': end}})
LOG.info("Find all sv variants {}".format(query))
return self.db.structural_variant.find(query).sort([('chrom', ASCENDING), ('pos_left', ASCENDING)])
| 703,697
|
Search what clusters a variant belongs to
Args:
variant_id(str): From ID column in vcf
Returns:
clusters()
|
def get_clusters(self, variant_id):
query = {'variant_id':variant_id}
identities = self.db.identity.find(query)
return identities
| 703,698
|
Given an undirected adjacency list and a pairwise distance matrix between
all nodes: calculates distances along graph from start node.
Args:
start (int): start node
edges (list): adjacency list of tuples
distances (array): 2d array of distances between nodes
Returns:
dict of node to distance from start
|
def graph_distances(start, edges, distances):
# convert adjacency list to adjacency dict
adj = {x: [] for x in range(len(distances))}
for n1, n2 in edges:
adj[n1].append(n2)
adj[n2].append(n1)
# run dijkstra's algorithm
to_visit = []
new_dist = {}
for n in adj[start]:
heapq.heappush(to_visit, (distances[start, n], n))
while to_visit:
d, next_node = heapq.heappop(to_visit)
if next_node not in new_dist:
new_dist[next_node] = d
for n in adj[next_node]:
if n not in new_dist:
heapq.heappush(to_visit, (d + distances[next_node, n], n))
return new_dist
| 703,707
|
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
|
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
| 703,711
|
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
|
def set_countriesdata(cls, countries):
# type: (str) -> None
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
| 703,712
|
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
|
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
| 703,713
|
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
|
def set_ocha_url(cls, url=None):
# type: (str) -> None
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
| 703,714
|
Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
|
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
| 703,715
|
Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
|
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
| 703,716
|
Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
|
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
| 703,717
|
Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
|
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
| 703,718
|
Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
|
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
| 703,719
|
Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
|
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
| 703,720
|
Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
|
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
| 703,721
|
Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
|
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
| 703,722
|
Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
|
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
| 703,723
|
Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
|
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
| 703,724
|
Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
|
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
| 703,725
|
Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
|
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
| 703,727
|
Add several variants to the profile_variant collection in the
database
Args:
profile_variants(list(models.ProfileVariant))
|
def add_profile_variants(self, profile_variants):
results = self.db.profile_variant.insert_many(profile_variants)
return results
| 703,769
|
Construct a identity object
Args:
cluster_id(str): Ref to a cluster
variant_id (str): ID from variant
case_id (str): What case it belongs to
|
def __init__(self, cluster_id, variant_id, case_id):
super(Identity, self).__init__(
cluster_id=cluster_id,
variant_id=variant_id,
case_id=case_id,
)
| 703,770
|
Returns the ZIP parameters that best fit a given data set.
Args:
data (array): 2d array of genes x cells belonging to a given cluster
Returns:
L (array): 1d array of means
M (array): 1d array of zero-inflation parameter
|
def zip_fit_params(data):
genes, cells = data.shape
m = data.mean(1)
v = data.var(1)
M = (v-m)/(m**2+v-m)
#M = v/(v+m**2)
#M[np.isnan(M)] = 0.0
M = np.array([min(1.0, max(0.0, x)) for x in M])
L = m + v/m - 1.0
#L = (v + m**2)/m
L[np.isnan(L)] = 0.0
L = np.array([max(0.0, x) for x in L])
return L, M
| 703,771
|
Dimensionality reduction using MDS, while running diffusion on W.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells)
|
def diffusion_mds(means, weights, d, diffusion_rounds=10):
for i in range(diffusion_rounds):
weights = weights*weights
weights = weights/weights.sum(0)
X = dim_reduce(means, weights, d)
if X.shape[0]==2:
return X.dot(weights)
else:
return X.T.dot(weights)
| 703,795
|
Dimensionality reduction using MDS.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells)
|
def mds(means, weights, d):
X = dim_reduce(means, weights, d)
if X.shape[0]==2:
return X.dot(weights)
else:
return X.T.dot(weights)
| 703,796
|
Does a MDS on the data directly, not on the means.
Args:
data (array): genes x cells
d (int): desired dimensionality
Returns:
X, a cells x d matrix
|
def dim_reduce_data(data, d):
genes, cells = data.shape
distances = np.zeros((cells, cells))
for i in range(cells):
for j in range(cells):
distances[i,j] = poisson_dist(data[:,i], data[:,j])
# do MDS on the distance matrix (procedure from Wikipedia)
proximity = distances**2
J = np.eye(cells) - 1./cells
B = -0.5*np.dot(J, np.dot(proximity, J))
# B should be symmetric, so we can use eigh
e_val, e_vec = np.linalg.eigh(B)
# Note: lam should be ordered to be the largest eigenvalues
lam = np.diag(e_val[-d:])[::-1]
#lam = max_or_zero(lam)
E = e_vec[:,-d:][::-1]
X = np.dot(E, lam**0.5)
return X
| 703,797
|
Get a case from the database
Search the cases with the case id
Args:
case (dict): A case dictionary
Returns:
mongo_case (dict): A mongo case dictionary
|
def case(self, case):
LOG.debug("Getting case {0} from database".format(case.get('case_id')))
case_id = case['case_id']
return self.db.case.find_one({'case_id': case_id})
| 703,798
|
Return the number of cases in the database
Args:
snv_cases(bool): If only snv cases should be searched
sv_cases(bool): If only snv cases should be searched
Returns:
cases (Iterable(Case)): A iterable with mongo cases
|
def nr_cases(self, snv_cases=None, sv_cases=None):
query = {}
if snv_cases:
query = {'vcf_path': {'$exists':True}}
if sv_cases:
query = {'vcf_sv_path': {'$exists':True}}
if snv_cases and sv_cases:
query = None
return self.db.case.count_documents(query)
| 703,799
|
Add a case to the case collection
If the case exists and update is False raise error.
Args:
db (MongoClient): A connection to the mongodb
case (dict): A case dictionary
update(bool): If existing case should be updated
Returns:
mongo_case_id(ObjectId)
|
def add_case(self, case, update=False):
existing_case = self.case(case)
if existing_case and not update:
raise CaseError("Case {} already exists".format(case['case_id']))
if existing_case:
self.db.case.find_one_and_replace(
{'case_id': case['case_id']},
case,
)
else:
self.db.case.insert_one(case)
return case
| 703,800
|
Delete case from the database
Delete a case from the database
Args:
case (dict): A case dictionary
|
def delete_case(self, case):
mongo_case = self.case(case)
if not mongo_case:
raise CaseError("Tried to delete case {0} but could not find case".format(
case.get('case_id')
))
LOG.info("Removing case {0} from database".format(
mongo_case.get('case_id')
))
self.db.case.delete_one({'_id': mongo_case['_id']})
return
| 703,801
|
Returns a ProfileVariant object
Args:
variant (cyvcf2.Variant)
Returns:
variant (models.ProfileVariant)
|
def build_profile_variant(variant):
chrom = variant.CHROM
if chrom.startswith(('chr', 'CHR', 'Chr')):
chrom = chrom[3:]
pos = int(variant.POS)
variant_id = get_variant_id(variant)
ref = variant.REF
alt = variant.ALT[0]
maf = get_maf(variant)
profile_variant = ProfileVariant(
variant_id=variant_id,
chrom=chrom,
pos=pos,
ref=ref,
alt=alt,
maf=maf,
id_column = variant.ID
)
return profile_variant
| 703,814
|
Add loqus specific information to a VCF header
Args:
vcf_obj(cyvcf2.VCF)
|
def add_headers(vcf_obj, nr_cases=None, sv=False):
vcf_obj.add_info_to_header(
{
'ID':"Obs",
'Number': '1',
'Type': 'Integer',
'Description': "The number of observations for the variant"}
)
if not sv:
vcf_obj.add_info_to_header(
{
'ID':"Hom",
'Number': '1',
'Type': 'Integer',
'Description': "The number of observed homozygotes"}
)
vcf_obj.add_info_to_header(
{
'ID':"Hem",
'Number': '1',
'Type': 'Integer',
'Description': "The number of observed hemizygotes"}
)
if nr_cases:
case_header = "##NrCases={}".format(nr_cases)
vcf_obj.add_to_header(case_header)
# head.add_version_tracking("loqusdb", version, datetime.now().strftime("%Y-%m-%d %H:%M"))
return
| 703,815
|
Return cyvcf2 VCF object
Args:
file_path(str)
Returns:
vcf_obj(cyvcf2.VCF)
|
def get_file_handle(file_path):
LOG.debug("Check if file end is correct")
if not os.path.exists(file_path):
raise IOError("No such file:{0}".format(file_path))
if not os.path.splitext(file_path)[-1] in VALID_ENDINGS:
raise IOError("Not a valid vcf file name: {}".format(file_path))
vcf_obj = VCF(file_path)
return vcf_obj
| 703,816
|
Check if there are any problems with the vcf file
Args:
vcf_path(str)
expected_type(str): 'sv' or 'snv'
Returns:
vcf_info(dict): dict like
{
'nr_variants':<INT>,
'variant_type': <STR> in ['snv', 'sv'],
'individuals': <LIST> individual positions in file
}
|
def check_vcf(vcf_path, expected_type='snv'):
LOG.info("Check if vcf is on correct format...")
vcf = VCF(vcf_path)
individuals = vcf.samples
variant_type = None
previous_pos = None
previous_chrom = None
posititon_variants = set()
nr_variants = 0
for nr_variants,variant in enumerate(vcf,1):
# Check the type of variant
current_type = 'sv' if variant.var_type == 'sv' else 'snv'
if not variant_type:
variant_type = current_type
# Vcf can not include both snvs and svs
if variant_type != current_type:
raise VcfError("Vcf includes a mix of snvs and svs")
current_chrom = variant.CHROM
current_pos = variant.POS
# We start with a simple id that can be used by SV:s
variant_id = "{0}_{1}".format(current_chrom, current_pos)
# For SNVs we can create a proper variant id with chrom_pos_ref_alt
if variant_type == 'snv':
variant_id = get_variant_id(variant)
# Initiate variables
if not previous_chrom:
previous_chrom = current_chrom
previous_pos = current_pos
posititon_variants = set([variant_id])
continue
# Update variables if new chromosome
if current_chrom != previous_chrom:
previous_chrom = current_chrom
previous_pos = current_pos
posititon_variants = set([variant_id])
continue
if variant_type == 'snv':
# Check if variant is unique
if current_pos == previous_pos:
if variant_id in posititon_variants:
raise VcfError("Variant {0} occurs several times"\
" in vcf".format(variant_id))
else:
posititon_variants.add(variant_id)
# Check if vcf is sorted
else:
if not current_pos >= previous_pos:
raise VcfError("Vcf if not sorted in a correct way")
previous_pos = current_pos
# Reset posititon_variants since we are on a new position
posititon_variants = set([variant_id])
if variant_type != expected_type:
raise VcfError("VCF file does not only include {0}s, please check vcf {1}".format(
expected_type.upper(), vcf_path))
LOG.info("Vcf file %s looks fine", vcf_path)
LOG.info("Nr of variants in vcf: {0}".format(nr_variants))
LOG.info("Type of variants in vcf: {0}".format(variant_type))
vcf_info = {
'nr_variants': nr_variants,
'variant_type': variant_type,
'individuals': individuals,
}
return vcf_info
| 703,817
|
Creates an objective function and its derivative for W, given M and X (data)
Args:
m (array): genes x clusters
X (array): genes x cells
Z (array): zero-inflation parameters - genes x 1
|
def _create_w_objective(m, X, Z=None):
genes, clusters = m.shape
cells = X.shape[1]
nonzeros = (X!=0)
def objective(w):
# convert w into a matrix first... because it's a vector for
# optimization purposes
w = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w)+eps
# derivative of objective wrt all elements of w
# for w_{ij}, the derivative is... m_j1+...+m_jn sum over genes minus
# x_ij
temp = X/d
m_sum = m.T.dot(nonzeros)
m2 = m.T.dot(temp)
deriv = m_sum - m2
return np.sum(nonzeros*(d - X*np.log(d)))/genes, deriv.flatten()/genes
return objective
| 703,839
|
Generates kmeans++ initial centers.
Args:
data (array): A 2d array- genes x cells
k (int): Number of clusters
centers (array, optional): if provided, these are one or more known cluster centers. 2d array of genes x number of centers (<=k).
Returns:
centers - a genes x k array of cluster means.
assignments - a cells x 1 array of cluster assignments
|
def kmeans_pp(data, k, centers=None):
# TODO: what if there is missing data for a given gene?
# missing data could be if all the entires are -1.
genes, cells = data.shape
if sparse.issparse(data) and not sparse.isspmatrix_csc(data):
data = sparse.csc_matrix(data)
num_known_centers = 0
if centers is None:
centers = np.zeros((genes, k))
else:
num_known_centers = centers.shape[1]
centers = np.concatenate((centers, np.zeros((genes, k-num_known_centers))), 1)
distances = np.zeros((cells, k))
distances[:] = np.inf
if num_known_centers == 0:
init = np.random.randint(0, cells)
if sparse.issparse(data):
centers[:,0] = data[:, init].toarray().flatten()
else:
centers[:,0] = data[:, init]
num_known_centers+=1
available_cells = list(range(cells))
for c in range(num_known_centers, k):
c2 = c-1
# use different formulation for distance... if sparse, use lls
# if not sparse, use poisson_dist
if sparse.issparse(data):
lls = poisson_ll(data, centers[:,c2:c2+1]).flatten()
distances[:,c2] = 1 + lls.max() - lls
distances[:,c2] /= distances[:,c2].max()
else:
for cell in range(cells):
distances[cell, c2] = poisson_dist(data[:,cell], centers[:,c2])
# choose a new data point as center... probability proportional
# to distance^2
min_distances = np.min(distances, 1)
min_distances = min_distances**2
min_distances = min_distances[available_cells]
# should be sampling without replacement
min_dist = np.random.choice(available_cells,
p=min_distances/min_distances.sum())
available_cells.pop(available_cells.index(min_dist))
if sparse.issparse(data):
centers[:,c] = data[:, min_dist].toarray().flatten()
else:
centers[:,c] = data[:, min_dist]
lls = poisson_ll(data, centers)
new_assignments = np.argmax(lls, 1)
centers[centers==0.0] = eps
return centers, new_assignments
| 703,841
|
r"""Diagonal of :math:`\mathrm A\mathrm B^\intercal`.
If ``A`` is :math:`n\times p` and ``B`` is :math:`p\times n`, it is done in
:math:`O(pn)`.
Args:
A (array_like): Left matrix.
B (array_like): Right matrix.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: Resulting diagonal.
|
def dotd(A, B, out=None):
r
A = asarray(A, float)
B = asarray(B, float)
if A.ndim == 1 and B.ndim == 1:
if out is None:
return dot(A, B)
return dot(A, B, out)
if out is None:
out = empty((A.shape[0],), float)
return einsum("ij,ji->i", A, B, out=out)
| 703,846
|
r"""Dot product of a matrix and a diagonal one.
Args:
L (array_like): Left matrix.
R (array_like): Right matrix.
out (:class:`numpy.ndarray`, optional): copy result to.
Returns:
:class:`numpy.ndarray`: Resulting matrix.
|
def ddot(L, R, left=None, out=None):
r
L = asarray(L, float)
R = asarray(R, float)
if left is None:
ok = min(L.ndim, R.ndim) == 1 and max(L.ndim, R.ndim) == 2
if not ok:
msg = "Wrong array layout. One array should have"
msg += " ndim=1 and the other one ndim=2."
raise ValueError(msg)
left = L.ndim == 1
if left:
if out is None:
out = copy(R)
L = L.reshape(list(L.shape) + [1] * (R.ndim - 1))
return multiply(L, R, out=out)
else:
if out is None:
out = copy(L)
return multiply(L, R, out=out)
| 703,847
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.