_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q273400 | build_clnsig | test | def build_clnsig(clnsig_info):
"""docstring for build_clnsig"""
clnsig_obj = dict(
value = clnsig_info['value'],
accession = clnsig_info.get('accession'),
revstat = clnsig_info.get('revstat')
)
return clnsig_obj | python | {
"resource": ""
} |
q273401 | GeneHandler.load_hgnc_bulk | test | def load_hgnc_bulk(self, gene_objs):
"""Load a bulk of hgnc gene objects
Raises IntegrityError if there are any write concerns
Args:
gene_objs(iterable(scout.models.hgnc_gene))
Returns:
result (pymongo.results.InsertManyResult)
"""
LOG.info("Loading gene bulk with length %s", len(gene_objs))
try:
result = self.hgnc_collection.insert_many(gene_objs)
except (DuplicateKeyError, BulkWriteError) as err:
raise IntegrityError(err)
return result | python | {
"resource": ""
} |
q273402 | GeneHandler.load_transcript_bulk | test | def load_transcript_bulk(self, transcript_objs):
"""Load a bulk of transcript objects to the database
Arguments:
transcript_objs(iterable(scout.models.hgnc_transcript))
"""
LOG.info("Loading transcript bulk")
try:
result = self.transcript_collection.insert_many(transcript_objs)
except (DuplicateKeyError, BulkWriteError) as err:
raise IntegrityError(err)
return result | python | {
"resource": ""
} |
q273403 | GeneHandler.load_exon_bulk | test | def load_exon_bulk(self, exon_objs):
"""Load a bulk of exon objects to the database
Arguments:
exon_objs(iterable(scout.models.hgnc_exon))
"""
try:
result = self.exon_collection.insert_many(transcript_objs)
except (DuplicateKeyError, BulkWriteError) as err:
raise IntegrityError(err)
return result | python | {
"resource": ""
} |
q273404 | GeneHandler.hgnc_gene | test | def hgnc_gene(self, hgnc_identifier, build='37'):
"""Fetch a hgnc gene
Args:
hgnc_identifier(int)
Returns:
gene_obj(HgncGene)
"""
if not build in ['37', '38']:
build = '37'
query = {}
try:
# If the identifier is a integer we search for hgnc_id
hgnc_identifier = int(hgnc_identifier)
query['hgnc_id'] = hgnc_identifier
except ValueError:
# Else we seach for a hgnc_symbol
query['hgnc_symbol'] = hgnc_identifier
query['build'] = build
LOG.debug("Fetching gene %s" % hgnc_identifier)
gene_obj = self.hgnc_collection.find_one(query)
if not gene_obj:
return None
# Add the transcripts:
transcripts = []
tx_objs = self.transcripts(build=build, hgnc_id=gene_obj['hgnc_id'])
if tx_objs.count() > 0:
for tx in tx_objs:
transcripts.append(tx)
gene_obj['transcripts'] = transcripts
return gene_obj | python | {
"resource": ""
} |
q273405 | GeneHandler.hgnc_id | test | def hgnc_id(self, hgnc_symbol, build='37'):
"""Query the genes with a hgnc symbol and return the hgnc id
Args:
hgnc_symbol(str)
build(str)
Returns:
hgnc_id(int)
"""
#LOG.debug("Fetching gene %s", hgnc_symbol)
query = {'hgnc_symbol':hgnc_symbol, 'build':build}
projection = {'hgnc_id':1, '_id':0}
res = self.hgnc_collection.find(query, projection)
if res.count() > 0:
return res[0]['hgnc_id']
else:
return None | python | {
"resource": ""
} |
q273406 | GeneHandler.hgnc_genes | test | def hgnc_genes(self, hgnc_symbol, build='37', search=False):
"""Fetch all hgnc genes that match a hgnc symbol
Check both hgnc_symbol and aliases
Args:
hgnc_symbol(str)
build(str): The build in which to search
search(bool): if partial searching should be used
Returns:
result()
"""
LOG.debug("Fetching genes with symbol %s" % hgnc_symbol)
if search:
# first search for a full match
full_query = self.hgnc_collection.find({
'$or': [
{'aliases': hgnc_symbol},
{'hgnc_id': int(hgnc_symbol) if hgnc_symbol.isdigit() else None},
],
'build': build
})
if full_query.count() != 0:
return full_query
return self.hgnc_collection.find({
'aliases': {'$regex': hgnc_symbol, '$options': 'i'},
'build': build
})
return self.hgnc_collection.find({'build': build, 'aliases': hgnc_symbol}) | python | {
"resource": ""
} |
q273407 | GeneHandler.all_genes | test | def all_genes(self, build='37'):
"""Fetch all hgnc genes
Returns:
result()
"""
LOG.info("Fetching all genes")
return self.hgnc_collection.find({'build': build}).sort('chromosome', 1) | python | {
"resource": ""
} |
q273408 | GeneHandler.nr_genes | test | def nr_genes(self, build=None):
"""Return the number of hgnc genes in collection
If build is used, return the number of genes of a certain build
Returns:
result()
"""
if build:
LOG.info("Fetching all genes from build %s", build)
else:
LOG.info("Fetching all genes")
return self.hgnc_collection.find({'build':build}).count() | python | {
"resource": ""
} |
q273409 | GeneHandler.drop_genes | test | def drop_genes(self, build=None):
"""Delete the genes collection"""
if build:
LOG.info("Dropping the hgnc_gene collection, build %s", build)
self.hgnc_collection.delete_many({'build': build})
else:
LOG.info("Dropping the hgnc_gene collection")
self.hgnc_collection.drop() | python | {
"resource": ""
} |
q273410 | GeneHandler.drop_transcripts | test | def drop_transcripts(self, build=None):
"""Delete the transcripts collection"""
if build:
LOG.info("Dropping the transcripts collection, build %s", build)
self.transcript_collection.delete_many({'build': build})
else:
LOG.info("Dropping the transcripts collection")
self.transcript_collection.drop() | python | {
"resource": ""
} |
q273411 | GeneHandler.drop_exons | test | def drop_exons(self, build=None):
"""Delete the exons collection"""
if build:
LOG.info("Dropping the exons collection, build %s", build)
self.exon_collection.delete_many({'build': build})
else:
LOG.info("Dropping the exons collection")
self.exon_collection.drop() | python | {
"resource": ""
} |
q273412 | GeneHandler.ensembl_transcripts | test | def ensembl_transcripts(self, build='37'):
"""Return a dictionary with ensembl ids as keys and transcripts as value.
Args:
build(str)
Returns:
ensembl_transcripts(dict): {<enst_id>: transcripts_obj, ...}
"""
ensembl_transcripts = {}
LOG.info("Fetching all transcripts")
for transcript_obj in self.transcript_collection.find({'build':build}):
enst_id = transcript_obj['transcript_id']
ensembl_transcripts[enst_id] = transcript_obj
LOG.info("Ensembl transcripts fetched")
return ensembl_transcripts | python | {
"resource": ""
} |
q273413 | GeneHandler.hgncsymbol_to_gene | test | def hgncsymbol_to_gene(self, build='37', genes=None):
"""Return a dictionary with hgnc_symbol as key and gene_obj as value
The result will have ONE entry for each gene in the database.
(For a specific build)
Args:
build(str)
genes(iterable(scout.models.HgncGene)):
Returns:
hgnc_dict(dict): {<hgnc_symbol(str)>: <gene(dict)>}
"""
hgnc_dict = {}
LOG.info("Building hgncsymbol_to_gene")
if not genes:
genes = self.hgnc_collection.find({'build':build})
for gene_obj in genes:
hgnc_dict[gene_obj['hgnc_symbol']] = gene_obj
LOG.info("All genes fetched")
return hgnc_dict | python | {
"resource": ""
} |
q273414 | GeneHandler.gene_by_alias | test | def gene_by_alias(self, symbol, build='37'):
"""Return a iterable with hgnc_genes.
If the gene symbol is listed as primary the iterable will only have
one result. If not the iterable will include all hgnc genes that have
the symbol as an alias.
Args:
symbol(str)
build(str)
Returns:
res(pymongo.Cursor(dict))
"""
res = self.hgnc_collection.find({'hgnc_symbol': symbol, 'build':build})
if res.count() == 0:
res = self.hgnc_collection.find({'aliases': symbol, 'build':build})
return res | python | {
"resource": ""
} |
q273415 | GeneHandler.genes_by_alias | test | def genes_by_alias(self, build='37', genes=None):
"""Return a dictionary with hgnc symbols as keys and a list of hgnc ids
as value.
If a gene symbol is listed as primary the list of ids will only consist
of that entry if not the gene can not be determined so the result is a list
of hgnc_ids
Args:
build(str)
genes(iterable(scout.models.HgncGene)):
Returns:
alias_genes(dict): {<hgnc_alias>: {'true': <hgnc_id>, 'ids': {<hgnc_id_1>, <hgnc_id_2>, ...}}}
"""
LOG.info("Fetching all genes by alias")
# Collect one entry for each alias symbol that exists
alias_genes = {}
# Loop over all genes
if not genes:
genes = self.hgnc_collection.find({'build':build})
for gene in genes:
# Collect the hgnc_id
hgnc_id = gene['hgnc_id']
# Collect the true symbol given by hgnc
hgnc_symbol = gene['hgnc_symbol']
# Loop aver all aliases
for alias in gene['aliases']:
true_id = None
# If the alias is the same as hgnc symbol we know the true id
if alias == hgnc_symbol:
true_id = hgnc_id
# If the alias is already in the list we add the id
if alias in alias_genes:
alias_genes[alias]['ids'].add(hgnc_id)
if true_id:
alias_genes[alias]['true'] = hgnc_id
else:
alias_genes[alias] = {
'true': hgnc_id,
'ids': set([hgnc_id])
}
return alias_genes | python | {
"resource": ""
} |
q273416 | GeneHandler.ensembl_genes | test | def ensembl_genes(self, build='37'):
"""Return a dictionary with ensembl ids as keys and gene objects as value.
Args:
build(str)
Returns:
genes(dict): {<ensg_id>: gene_obj, ...}
"""
genes = {}
LOG.info("Fetching all genes")
for gene_obj in self.hgnc_collection.find({'build':build}):
ensg_id = gene_obj['ensembl_id']
hgnc_id = gene_obj['hgnc_id']
genes[ensg_id] = gene_obj
LOG.info("Ensembl genes fetched")
return genes | python | {
"resource": ""
} |
q273417 | GeneHandler.to_hgnc | test | def to_hgnc(self, hgnc_alias, build='37'):
"""Check if a hgnc symbol is an alias
Return the correct hgnc symbol, if not existing return None
Args:
hgnc_alias(str)
Returns:
hgnc_symbol(str)
"""
result = self.hgnc_genes(hgnc_symbol=hgnc_alias, build=build)
if result:
for gene in result:
return gene['hgnc_symbol']
else:
return None | python | {
"resource": ""
} |
q273418 | GeneHandler.add_hgnc_id | test | def add_hgnc_id(self, genes):
"""Add the correct hgnc id to a set of genes with hgnc symbols
Args:
genes(list(dict)): A set of genes with hgnc symbols only
"""
genes_by_alias = self.genes_by_alias()
for gene in genes:
id_info = genes_by_alias.get(gene['hgnc_symbol'])
if not id_info:
LOG.warning("Gene %s does not exist in scout", gene['hgnc_symbol'])
continue
gene['hgnc_id'] = id_info['true']
if not id_info['true']:
if len(id_info['ids']) > 1:
LOG.warning("Gene %s has ambiguous value, please choose one hgnc id in result", gene['hgnc_symbol'])
gene['hgnc_id'] = ','.join([str(hgnc_id) for hgnc_id in id_info['ids']]) | python | {
"resource": ""
} |
q273419 | GeneHandler.get_coding_intervals | test | def get_coding_intervals(self, build='37', genes=None):
"""Return a dictionary with chromosomes as keys and interval trees as values
Each interval represents a coding region of overlapping genes.
Args:
build(str): The genome build
genes(iterable(scout.models.HgncGene)):
Returns:
intervals(dict): A dictionary with chromosomes as keys and overlapping genomic intervals as values
"""
intervals = {}
if not genes:
genes = self.all_genes(build=build)
LOG.info("Building interval trees...")
for i,hgnc_obj in enumerate(genes):
chrom = hgnc_obj['chromosome']
start = max((hgnc_obj['start'] - 5000), 1)
end = hgnc_obj['end'] + 5000
# If this is the first time a chromosome is seen we create a new
# interval tree with current interval
if chrom not in intervals:
intervals[chrom] = intervaltree.IntervalTree()
intervals[chrom].addi(start, end, i)
continue
res = intervals[chrom].search(start, end)
# If the interval did not overlap any other intervals we insert it and continue
if not res:
intervals[chrom].addi(start, end, i)
continue
# Loop over the overlapping intervals
for interval in res:
# Update the positions to new max and mins
if interval.begin < start:
start = interval.begin
if interval.end > end:
end = interval.end
# Delete the old interval
intervals[chrom].remove(interval)
# Add the new interval consisting och the overlapping ones
intervals[chrom].addi(start, end, i)
return intervals | python | {
"resource": ""
} |
q273420 | omim | test | def omim(context, api_key, institute):
"""
Update the automate generated omim gene panel in the database.
"""
LOG.info("Running scout update omim")
adapter = context.obj['adapter']
api_key = api_key or context.obj.get('omim_api_key')
if not api_key:
LOG.warning("Please provide a omim api key to load the omim gene panel")
context.abort()
institute_obj = adapter.institute(institute)
if not institute_obj:
LOG.info("Institute %s could not be found in database", institute)
LOG.warning("Please specify an existing institute")
context.abort()
try:
adapter.load_omim_panel(api_key, institute=institute)
except Exception as err:
LOG.error(err)
context.abort() | python | {
"resource": ""
} |
q273421 | cases | test | def cases(institute_id):
"""Display a list of cases for an institute."""
institute_obj = institute_and_case(store, institute_id)
query = request.args.get('query')
limit = 100
if request.args.get('limit'):
limit = int(request.args.get('limit'))
skip_assigned = request.args.get('skip_assigned')
is_research = request.args.get('is_research')
all_cases = store.cases(collaborator=institute_id, name_query=query,
skip_assigned=skip_assigned, is_research=is_research)
data = controllers.cases(store, all_cases, limit)
sanger_unevaluated = controllers.get_sanger_unevaluated(store, institute_id, current_user.email)
if len(sanger_unevaluated)> 0:
data['sanger_unevaluated'] = sanger_unevaluated
return dict(institute=institute_obj, skip_assigned=skip_assigned,
is_research=is_research, query=query, **data) | python | {
"resource": ""
} |
q273422 | case | test | def case(institute_id, case_name):
"""Display one case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
data = controllers.case(store, institute_obj, case_obj)
return dict(institute=institute_obj, case=case_obj, **data) | python | {
"resource": ""
} |
q273423 | matchmaker_matches | test | def matchmaker_matches(institute_id, case_name):
"""Show all MatchMaker matches for a given case"""
# check that only authorized users can access MME patients matches
user_obj = store.user(current_user.email)
if 'mme_submitter' not in user_obj['roles']:
flash('unauthorized request', 'warning')
return redirect(request.referrer)
# Required params for getting matches from MME server:
mme_base_url = current_app.config.get('MME_URL')
mme_token = current_app.config.get('MME_TOKEN')
if not mme_base_url or not mme_token:
flash('An error occurred reading matchmaker connection parameters. Please check config file!', 'danger')
return redirect(request.referrer)
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
data = controllers.mme_matches(case_obj, institute_obj, mme_base_url, mme_token)
if data and data.get('server_errors'):
flash('MatchMaker server returned error:{}'.format(data['server_errors']), 'danger')
return redirect(request.referrer)
elif not data:
data = {
'institute' : institute_obj,
'case' : case_obj
}
return data | python | {
"resource": ""
} |
q273424 | matchmaker_match | test | def matchmaker_match(institute_id, case_name, target):
"""Starts an internal match or a match against one or all MME external nodes"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
# check that only authorized users can run matches
user_obj = store.user(current_user.email)
if 'mme_submitter' not in user_obj['roles']:
flash('unauthorized request', 'warning')
return redirect(request.referrer)
# Required params for sending an add request to MME:
mme_base_url = current_app.config.get('MME_URL')
mme_accepts = current_app.config.get('MME_ACCEPTS')
mme_token = current_app.config.get('MME_TOKEN')
nodes = current_app.mme_nodes
if not mme_base_url or not mme_token or not mme_accepts:
flash('An error occurred reading matchmaker connection parameters. Please check config file!', 'danger')
return redirect(request.referrer)
match_results = controllers.mme_match(case_obj, target, mme_base_url, mme_token, nodes, mme_accepts)
ok_responses = 0
for match_results in match_results:
match_results['status_code'] == 200
ok_responses +=1
if ok_responses:
flash("Match request sent. Look for eventual matches in 'Matches' page.", 'info')
else:
flash('An error occurred while sending match request.', 'danger')
return redirect(request.referrer) | python | {
"resource": ""
} |
q273425 | matchmaker_delete | test | def matchmaker_delete(institute_id, case_name):
"""Remove a case from MatchMaker"""
# check that only authorized users can delete patients from MME
user_obj = store.user(current_user.email)
if 'mme_submitter' not in user_obj['roles']:
flash('unauthorized request', 'warning')
return redirect(request.referrer)
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
# Required params for sending a delete request to MME:
mme_base_url = current_app.config.get('MME_URL')
mme_token = current_app.config.get('MME_TOKEN')
if not mme_base_url or not mme_token:
flash('An error occurred reading matchmaker connection parameters. Please check config file!', 'danger')
return redirect(request.referrer)
delete_result = controllers.mme_delete(case_obj, mme_base_url, mme_token)
n_deleted = 0
category = 'warning'
for resp in delete_result:
if resp['status_code'] == 200:
n_deleted += 1
else:
flash(resp['message'], category)
if n_deleted:
category = 'success'
# update case by removing mme submission
# and create events for patients deletion from MME
user_obj = store.user(current_user.email)
store.case_mme_delete(case_obj=case_obj, user_obj=user_obj)
flash('Number of patients deleted from Matchmaker: {} out of {}'.format(n_deleted, len(delete_result)), category)
return redirect(request.referrer) | python | {
"resource": ""
} |
q273426 | case_report | test | def case_report(institute_id, case_name):
"""Visualize case report"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
data = controllers.case_report_content(store, institute_obj, case_obj)
return dict(institute=institute_obj, case=case_obj, format='html', **data) | python | {
"resource": ""
} |
q273427 | pdf_case_report | test | def pdf_case_report(institute_id, case_name):
"""Download a pdf report for a case"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
data = controllers.case_report_content(store, institute_obj, case_obj)
# add coverage report on the bottom of this report
if current_app.config.get('SQLALCHEMY_DATABASE_URI'):
data['coverage_report'] = controllers.coverage_report_contents(store, institute_obj, case_obj, request.url_root)
# workaround to be able to print the case pedigree to pdf
if case_obj.get('madeline_info') is not None:
with open(os.path.join(cases_bp.static_folder, 'madeline.svg'), 'w') as temp_madeline:
temp_madeline.write(case_obj['madeline_info'])
html_report = render_template('cases/case_report.html', institute=institute_obj, case=case_obj, format='pdf', **data)
return render_pdf(HTML(string=html_report), download_filename=case_obj['display_name']+'_'+datetime.datetime.now().strftime("%Y-%m-%d")+'_scout.pdf') | python | {
"resource": ""
} |
q273428 | case_diagnosis | test | def case_diagnosis(institute_id, case_name):
"""Add or remove a diagnosis for a case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
link = url_for('.case', institute_id=institute_id, case_name=case_name)
level = 'phenotype' if 'phenotype' in request.form else 'gene'
omim_id = request.form['omim_id']
remove = True if request.args.get('remove') == 'yes' else False
store.diagnose(institute_obj, case_obj, user_obj, link, level=level,
omim_id=omim_id, remove=remove)
return redirect(request.referrer) | python | {
"resource": ""
} |
q273429 | phenotypes | test | def phenotypes(institute_id, case_name, phenotype_id=None):
"""Handle phenotypes."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
case_url = url_for('.case', institute_id=institute_id, case_name=case_name)
is_group = request.args.get('is_group') == 'yes'
user_obj = store.user(current_user.email)
if phenotype_id:
# DELETE a phenotype item/group from case
store.remove_phenotype(institute_obj, case_obj, user_obj, case_url,
phenotype_id, is_group=is_group)
else:
try:
# add a new phenotype item/group to the case
phenotype_term = request.form['hpo_term']
if phenotype_term.startswith('HP:') or len(phenotype_term) == 7:
hpo_term = phenotype_term.split(' | ', 1)[0]
store.add_phenotype(institute_obj, case_obj, user_obj, case_url,
hpo_term=hpo_term, is_group=is_group)
else:
# assume omim id
store.add_phenotype(institute_obj, case_obj, user_obj, case_url,
omim_term=phenotype_term)
except ValueError:
return abort(400, ("unable to add phenotype: {}".format(phenotype_term)))
return redirect(case_url) | python | {
"resource": ""
} |
q273430 | phenotypes_actions | test | def phenotypes_actions(institute_id, case_name):
"""Perform actions on multiple phenotypes."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
case_url = url_for('.case', institute_id=institute_id, case_name=case_name)
action = request.form['action']
hpo_ids = request.form.getlist('hpo_id')
user_obj = store.user(current_user.email)
if action == 'DELETE':
for hpo_id in hpo_ids:
# DELETE a phenotype from the list
store.remove_phenotype(institute_obj, case_obj, user_obj, case_url, hpo_id)
elif action == 'PHENOMIZER':
if len(hpo_ids) == 0:
hpo_ids = [term['phenotype_id'] for term in case_obj.get('phenotype_terms', [])]
username = current_app.config['PHENOMIZER_USERNAME']
password = current_app.config['PHENOMIZER_PASSWORD']
diseases = controllers.hpo_diseases(username, password, hpo_ids)
return render_template('cases/diseases.html', diseases=diseases,
institute=institute_obj, case=case_obj)
elif action == 'GENES':
hgnc_symbols = set()
for raw_symbols in request.form.getlist('genes'):
# avoid empty lists
if raw_symbols:
hgnc_symbols.update(raw_symbol.split(' ', 1)[0] for raw_symbol in
raw_symbols.split('|'))
store.update_dynamic_gene_list(case_obj, hgnc_symbols=hgnc_symbols)
elif action == 'GENERATE':
if len(hpo_ids) == 0:
hpo_ids = [term['phenotype_id'] for term in case_obj.get('phenotype_terms', [])]
results = store.generate_hpo_gene_list(*hpo_ids)
# determine how many HPO terms each gene must match
hpo_count = int(request.form.get('min_match') or 1)
hgnc_ids = [result[0] for result in results if result[1] >= hpo_count]
store.update_dynamic_gene_list(case_obj, hgnc_ids=hgnc_ids, phenotype_ids=hpo_ids)
return redirect(case_url) | python | {
"resource": ""
} |
q273431 | events | test | def events(institute_id, case_name, event_id=None):
"""Handle events."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
link = request.form.get('link')
content = request.form.get('content')
variant_id = request.args.get('variant_id')
user_obj = store.user(current_user.email)
if event_id:
# delete the event
store.delete_event(event_id)
else:
if variant_id:
# create a variant comment
variant_obj = store.variant(variant_id)
level = request.form.get('level', 'specific')
store.comment(institute_obj, case_obj, user_obj, link,
variant=variant_obj, content=content, comment_level=level)
else:
# create a case comment
store.comment(institute_obj, case_obj, user_obj, link, content=content)
return redirect(request.referrer) | python | {
"resource": ""
} |
q273432 | status | test | def status(institute_id, case_name):
"""Update status of a specific case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
status = request.form.get('status', case_obj['status'])
link = url_for('.case', institute_id=institute_id, case_name=case_name)
if status == 'archive':
store.archive_case(institute_obj, case_obj, user_obj, status, link)
else:
store.update_status(institute_obj, case_obj, user_obj, status, link)
return redirect(request.referrer) | python | {
"resource": ""
} |
q273433 | assign | test | def assign(institute_id, case_name, user_id=None):
"""Assign and unassign a user from a case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
link = url_for('.case', institute_id=institute_id, case_name=case_name)
if user_id:
user_obj = store.user(user_id)
else:
user_obj = store.user(current_user.email)
if request.form.get('action') == 'DELETE':
store.unassign(institute_obj, case_obj, user_obj, link)
else:
store.assign(institute_obj, case_obj, user_obj, link)
return redirect(request.referrer) | python | {
"resource": ""
} |
q273434 | hpoterms | test | def hpoterms():
"""Search for HPO terms."""
query = request.args.get('query')
if query is None:
return abort(500)
terms = sorted(store.hpo_terms(query=query), key=itemgetter('hpo_number'))
json_terms = [
{'name': '{} | {}'.format(term['_id'], term['description']),
'id': term['_id']
} for term in terms[:7]]
return jsonify(json_terms) | python | {
"resource": ""
} |
q273435 | mark_validation | test | def mark_validation(institute_id, case_name, variant_id):
"""Mark a variant as sanger validated."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_obj = store.variant(variant_id)
user_obj = store.user(current_user.email)
validate_type = request.form['type'] or None
link = url_for('variants.variant', institute_id=institute_id, case_name=case_name,
variant_id=variant_id)
store.validate(institute_obj, case_obj, user_obj, link, variant_obj, validate_type)
return redirect(request.referrer or link) | python | {
"resource": ""
} |
q273436 | mark_causative | test | def mark_causative(institute_id, case_name, variant_id):
"""Mark a variant as confirmed causative."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_obj = store.variant(variant_id)
user_obj = store.user(current_user.email)
link = url_for('variants.variant', institute_id=institute_id, case_name=case_name,
variant_id=variant_id)
if request.form['action'] == 'ADD':
store.mark_causative(institute_obj, case_obj, user_obj, link, variant_obj)
elif request.form['action'] == 'DELETE':
store.unmark_causative(institute_obj, case_obj, user_obj, link, variant_obj)
# send the user back to the case that was marked as solved
case_url = url_for('.case', institute_id=institute_id, case_name=case_name)
return redirect(case_url) | python | {
"resource": ""
} |
q273437 | delivery_report | test | def delivery_report(institute_id, case_name):
"""Display delivery report."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if case_obj.get('delivery_report') is None:
return abort(404)
date_str = request.args.get('date')
if date_str:
delivery_report = None
analysis_date = parse_date(date_str)
for analysis_data in case_obj['analyses']:
if analysis_data['date'] == analysis_date:
delivery_report = analysis_data['delivery_report']
if delivery_report is None:
return abort(404)
else:
delivery_report = case_obj['delivery_report']
out_dir = os.path.dirname(delivery_report)
filename = os.path.basename(delivery_report)
return send_from_directory(out_dir, filename) | python | {
"resource": ""
} |
q273438 | share | test | def share(institute_id, case_name):
"""Share a case with a different institute."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
collaborator_id = request.form['collaborator']
revoke_access = 'revoke' in request.form
link = url_for('.case', institute_id=institute_id, case_name=case_name)
if revoke_access:
store.unshare(institute_obj, case_obj, collaborator_id, user_obj, link)
else:
store.share(institute_obj, case_obj, collaborator_id, user_obj, link)
return redirect(request.referrer) | python | {
"resource": ""
} |
q273439 | rerun | test | def rerun(institute_id, case_name):
"""Request a case to be rerun."""
sender = current_app.config['MAIL_USERNAME']
recipient = current_app.config['TICKET_SYSTEM_EMAIL']
controllers.rerun(store, mail, current_user, institute_id, case_name, sender,
recipient)
return redirect(request.referrer) | python | {
"resource": ""
} |
q273440 | research | test | def research(institute_id, case_name):
"""Open the research list for a case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
user_obj = store.user(current_user.email)
link = url_for('.case', institute_id=institute_id, case_name=case_name)
store.open_research(institute_obj, case_obj, user_obj, link)
return redirect(request.referrer) | python | {
"resource": ""
} |
q273441 | vcf2cytosure | test | def vcf2cytosure(institute_id, case_name, individual_id):
"""Download vcf2cytosure file for individual."""
(display_name, vcf2cytosure) = controllers.vcf2cytosure(store,
institute_id, case_name, individual_id)
outdir = os.path.abspath(os.path.dirname(vcf2cytosure))
filename = os.path.basename(vcf2cytosure)
log.debug("Attempt to deliver file {0} from dir {1}".format(filename, outdir))
attachment_filename = display_name + ".vcf2cytosure.cgh"
return send_from_directory(outdir, filename,
attachment_filename=attachment_filename,
as_attachment=True) | python | {
"resource": ""
} |
q273442 | multiqc | test | def multiqc(institute_id, case_name):
"""Load multiqc report for the case."""
data = controllers.multiqc(store, institute_id, case_name)
if data['case'].get('multiqc') is None:
return abort(404)
out_dir = os.path.abspath(os.path.dirname(data['case']['multiqc']))
filename = os.path.basename(data['case']['multiqc'])
return send_from_directory(out_dir, filename) | python | {
"resource": ""
} |
q273443 | cases | test | def cases(store, case_query, limit=100):
"""Preprocess case objects.
Add the necessary information to display the 'cases' view
Args:
store(adapter.MongoAdapter)
case_query(pymongo.Cursor)
limit(int): Maximum number of cases to display
Returns:
data(dict): includes the cases, how many there are and the limit.
"""
case_groups = {status: [] for status in CASE_STATUSES}
for case_obj in case_query.limit(limit):
analysis_types = set(ind['analysis_type'] for ind in case_obj['individuals'])
case_obj['analysis_types'] = list(analysis_types)
case_obj['assignees'] = [store.user(user_email) for user_email in
case_obj.get('assignees', [])]
case_groups[case_obj['status']].append(case_obj)
case_obj['is_rerun'] = len(case_obj.get('analyses', [])) > 0
case_obj['clinvar_variants'] = store.case_to_clinVars(case_obj['_id'])
case_obj['display_track'] = TRACKS[case_obj.get('track', 'rare')]
data = {
'cases': [(status, case_groups[status]) for status in CASE_STATUSES],
'found_cases': case_query.count(),
'limit': limit,
}
return data | python | {
"resource": ""
} |
q273444 | case_report_content | test | def case_report_content(store, institute_obj, case_obj):
"""Gather contents to be visualized in a case report
Args:
store(adapter.MongoAdapter)
institute_obj(models.Institute)
case_obj(models.Case)
Returns:
data(dict)
"""
variant_types = {
'causatives_detailed': 'causatives',
'suspects_detailed': 'suspects',
'classified_detailed': 'acmg_classification',
'tagged_detailed': 'manual_rank',
'dismissed_detailed': 'dismiss_variant',
'commented_detailed': 'is_commented',
}
data = case_obj
for individual in data['individuals']:
try:
sex = int(individual.get('sex', 0))
except ValueError as err:
sex = 0
individual['sex_human'] = SEX_MAP[sex]
individual['phenotype_human'] = PHENOTYPE_MAP.get(individual['phenotype'])
# Add the case comments
data['comments'] = store.events(institute_obj, case=case_obj, comments=True)
data['manual_rank_options'] = MANUAL_RANK_OPTIONS
data['dismissed_options'] = DISMISS_VARIANT_OPTIONS
data['genetic_models'] = dict(GENETIC_MODELS)
data['report_created_at'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
evaluated_variants = {}
for vt in variant_types:
evaluated_variants[vt] = []
# We collect all causatives and suspected variants
# These are handeled in separate since they are on case level
for var_type in ['causatives', 'suspects']:
#These include references to variants
vt = '_'.join([var_type, 'detailed'])
for var_id in case_obj.get(var_type,[]):
variant_obj = store.variant(var_id)
if not variant_obj:
continue
# If the variant exists we add it to the evaluated variants
evaluated_variants[vt].append(variant_obj)
## get variants for this case that are either classified, commented, tagged or dismissed.
for var_obj in store.evaluated_variants(case_id=case_obj['_id']):
# Check which category it belongs to
for vt in variant_types:
keyword = variant_types[vt]
# When found we add it to the categpry
# Eac variant can belong to multiple categories
if keyword in var_obj:
evaluated_variants[vt].append(var_obj)
for var_type in evaluated_variants:
decorated_variants = []
for var_obj in evaluated_variants[var_type]:
# We decorate the variant with some extra information
if var_obj['category'] == 'snv':
decorated_info = variant_decorator(
store=store,
institute_obj=institute_obj,
case_obj=case_obj,
variant_id=None,
variant_obj=var_obj,
add_case=False,
add_other=False,
get_overlapping=False
)
else:
decorated_info = sv_variant(
store=store,
institute_id=institute_obj['_id'],
case_name=case_obj['display_name'],
variant_obj=var_obj,
add_case=False,
get_overlapping=False
)
decorated_variants.append(decorated_info['variant'])
# Add the decorated variants to the case
data[var_type] = decorated_variants
return data | python | {
"resource": ""
} |
q273445 | coverage_report_contents | test | def coverage_report_contents(store, institute_obj, case_obj, base_url):
"""Posts a request to chanjo-report and capture the body of the returned response to include it in case report
Args:
store(adapter.MongoAdapter)
institute_obj(models.Institute)
case_obj(models.Case)
base_url(str): base url of server
Returns:
coverage_data(str): string rendering of the content between <body </body> tags of a coverage report
"""
request_data = {}
# extract sample ids from case_obj and add them to the post request object:
request_data['sample_id'] = [ ind['individual_id'] for ind in case_obj['individuals'] ]
# extract default panel names and default genes from case_obj and add them to the post request object
distinct_genes = set()
panel_names = []
for panel_info in case_obj.get('panels', []):
if not panel_info.get('is_default'):
continue
panel_obj = store.gene_panel(panel_info['panel_name'], version=panel_info.get('version'))
full_name = "{} ({})".format(panel_obj['display_name'], panel_obj['version'])
panel_names.append(full_name)
panel_names = ' ,'.join(panel_names)
request_data['panel_name'] = panel_names
# add institute-specific cutoff level to the post request object
request_data['level'] = institute_obj.get('coverage_cutoff', 15)
#send get request to chanjo report
resp = requests.get(base_url+'reports/report', params=request_data)
#read response content
soup = BeautifulSoup(resp.text)
# remove links in the printed version of coverage report
for tag in soup.find_all('a'):
tag.replaceWith('')
#extract body content using BeautifulSoup
coverage_data = ''.join(['%s' % x for x in soup.body.contents])
return coverage_data | python | {
"resource": ""
} |
q273446 | clinvar_submissions | test | def clinvar_submissions(store, user_id, institute_id):
"""Get all Clinvar submissions for a user and an institute"""
submissions = list(store.clinvar_submissions(user_id, institute_id))
return submissions | python | {
"resource": ""
} |
q273447 | mt_excel_files | test | def mt_excel_files(store, case_obj, temp_excel_dir):
"""Collect MT variants and format line of a MT variant report
to be exported in excel format
Args:
store(adapter.MongoAdapter)
case_obj(models.Case)
temp_excel_dir(os.Path): folder where the temp excel files are written to
Returns:
written_files(int): the number of files written to temp_excel_dir
"""
today = datetime.datetime.now().strftime('%Y-%m-%d')
samples = case_obj.get('individuals')
query = {'chrom':'MT'}
mt_variants = list(store.variants(case_id=case_obj['_id'], query=query, nr_of_variants= -1, sort_key='position'))
written_files = 0
for sample in samples:
sample_id = sample['individual_id']
sample_lines = export_mt_variants(variants=mt_variants, sample_id=sample_id)
# set up document name
document_name = '.'.join([case_obj['display_name'], sample_id, today]) + '.xlsx'
workbook = Workbook(os.path.join(temp_excel_dir,document_name))
Report_Sheet = workbook.add_worksheet()
# Write the column header
row = 0
for col,field in enumerate(MT_EXPORT_HEADER):
Report_Sheet.write(row,col,field)
# Write variant lines, after header (start at line 1)
for row, line in enumerate(sample_lines,1): # each line becomes a row in the document
for col, field in enumerate(line): # each field in line becomes a cell
Report_Sheet.write(row,col,field)
workbook.close()
if os.path.exists(os.path.join(temp_excel_dir,document_name)):
written_files += 1
return written_files | python | {
"resource": ""
} |
q273448 | update_synopsis | test | def update_synopsis(store, institute_obj, case_obj, user_obj, new_synopsis):
"""Update synopsis."""
# create event only if synopsis was actually changed
if case_obj['synopsis'] != new_synopsis:
link = url_for('cases.case', institute_id=institute_obj['_id'],
case_name=case_obj['display_name'])
store.update_synopsis(institute_obj, case_obj, user_obj, link,
content=new_synopsis) | python | {
"resource": ""
} |
q273449 | hpo_diseases | test | def hpo_diseases(username, password, hpo_ids, p_value_treshold=1):
"""Return the list of HGNC symbols that match annotated HPO terms.
Args:
username (str): username to use for phenomizer connection
password (str): password to use for phenomizer connection
Returns:
query_result: a generator of dictionaries on the form
{
'p_value': float,
'disease_source': str,
'disease_nr': int,
'gene_symbols': list(str),
'description': str,
'raw_line': str
}
"""
# skip querying Phenomizer unless at least one HPO terms exists
try:
results = query_phenomizer.query(username, password, *hpo_ids)
diseases = [result for result in results
if result['p_value'] <= p_value_treshold]
return diseases
except SystemExit:
return None | python | {
"resource": ""
} |
q273450 | vcf2cytosure | test | def vcf2cytosure(store, institute_id, case_name, individual_id):
"""vcf2cytosure CGH file for inidividual."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
for individual in case_obj['individuals']:
if individual['individual_id'] == individual_id:
individual_obj = individual
return (individual_obj['display_name'], individual_obj['vcf2cytosure']) | python | {
"resource": ""
} |
q273451 | multiqc | test | def multiqc(store, institute_id, case_name):
"""Find MultiQC report for the case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
return dict(
institute=institute_obj,
case=case_obj,
) | python | {
"resource": ""
} |
q273452 | get_sanger_unevaluated | test | def get_sanger_unevaluated(store, institute_id, user_id):
"""Get all variants for an institute having Sanger validations ordered but still not evaluated
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
Returns:
unevaluated: a list that looks like this: [ {'case1': [varID_1, varID_2, .., varID_n]}, {'case2' : [varID_1, varID_2, .., varID_n]} ],
where the keys are case_ids and the values are lists of variants with Sanger ordered but not yet validated
"""
# Retrieve a list of ids for variants with Sanger ordered grouped by case from the 'event' collection
# This way is much faster than querying over all variants in all cases of an institute
sanger_ordered_by_case = store.sanger_ordered(institute_id, user_id)
unevaluated = []
# for each object where key==case and value==[variant_id with Sanger ordered]
for item in sanger_ordered_by_case:
case_id = item['_id']
# Get the case to collect display name
case_obj = store.case(case_id=case_id)
if not case_obj: # the case might have been removed
continue
case_display_name = case_obj.get('display_name')
# List of variant document ids
varid_list = item['vars']
unevaluated_by_case = {}
unevaluated_by_case[case_display_name] = []
for var_id in varid_list:
# For each variant with sanger validation ordered
variant_obj = store.variant(document_id=var_id, case_id=case_id)
# Double check that Sanger was ordered (and not canceled) for the variant
if variant_obj is None or variant_obj.get('sanger_ordered') is None or variant_obj.get('sanger_ordered') is False:
continue
validation = variant_obj.get('validation', 'not_evaluated')
# Check that the variant is not evaluated
if validation in ['True positive', 'False positive']:
continue
unevaluated_by_case[case_display_name].append(variant_obj['_id'])
# If for a case there is at least one Sanger validation to evaluate add the object to the unevaluated objects list
if len(unevaluated_by_case[case_display_name]) > 0:
unevaluated.append(unevaluated_by_case)
return unevaluated | python | {
"resource": ""
} |
q273453 | mme_add | test | def mme_add(store, user_obj, case_obj, add_gender, add_features, add_disorders, genes_only,
mme_base_url, mme_accepts, mme_token):
"""Add a patient to MatchMaker server
Args:
store(adapter.MongoAdapter)
user_obj(dict) a scout user object (to be added as matchmaker contact)
case_obj(dict) a scout case object
add_gender(bool) if True case gender will be included in matchmaker
add_features(bool) if True HPO features will be included in matchmaker
add_disorders(bool) if True OMIM diagnoses will be included in matchmaker
genes_only(bool) if True only genes and not variants will be shared
mme_base_url(str) base url of the MME server
mme_accepts(str) request content accepted by MME server
mme_token(str) auth token of the MME server
Returns:
submitted_info(dict) info submitted to MatchMaker and its responses
"""
if not mme_base_url or not mme_accepts or not mme_token:
return 'Please check that Matchmaker connection parameters are valid'
url = ''.join([mme_base_url, '/patient/add'])
features = [] # this is the list of HPO terms
disorders = [] # this is the list of OMIM diagnoses
g_features = []
# create contact dictionary
contact_info = {
'name' : user_obj['name'],
'href' : ''.join( ['mailto:',user_obj['email']] ),
'institution' : 'Scout software user, Science For Life Laboratory, Stockholm, Sweden'
}
if add_features: # create features dictionaries
features = hpo_terms(case_obj)
if add_disorders: # create OMIM disorders dictionaries
disorders = omim_terms(case_obj)
# send a POST request and collect response for each affected individual in case
server_responses = []
submitted_info = {
'contact' : contact_info,
'sex' : add_gender,
'features' : features,
'disorders' : disorders,
'genes_only' : genes_only,
'patient_id' : []
}
for individual in case_obj.get('individuals'):
if not individual['phenotype'] in [2, 'affected']: # include only affected individuals
continue
patient = {
'contact' : contact_info,
'id' : '.'.join([case_obj['_id'], individual.get('individual_id')]), # This is a required field form MME
'label' : '.'.join([case_obj['display_name'], individual.get('display_name')]),
'features' : features,
'disorders' : disorders
}
if add_gender:
if individual['sex'] == '1':
patient['sex'] = 'MALE'
else:
patient['sex'] = 'FEMALE'
if case_obj.get('suspects'):
g_features = genomic_features(store, case_obj, individual.get('display_name'), genes_only)
patient['genomicFeatures'] = g_features
# send add request to server and capture response
resp = matchmaker_request(url=url, token=mme_token, method='POST', content_type=mme_accepts,
accept='application/json', data={'patient':patient})
server_responses.append({
'patient': patient,
'message': resp.get('message'),
'status_code' : resp.get('status_code')
})
submitted_info['server_responses'] = server_responses
return submitted_info | python | {
"resource": ""
} |
q273454 | mme_delete | test | def mme_delete(case_obj, mme_base_url, mme_token):
"""Delete all affected samples for a case from MatchMaker
Args:
case_obj(dict) a scout case object
mme_base_url(str) base url of the MME server
mme_token(str) auth token of the MME server
Returns:
server_responses(list): a list of object of this type:
{
'patient_id': patient_id
'message': server_message,
'status_code': server_status_code
}
"""
server_responses = []
if not mme_base_url or not mme_token:
return 'Please check that Matchmaker connection parameters are valid'
# for each patient of the case in matchmaker
for patient in case_obj['mme_submission']['patients']:
# send delete request to server and capture server's response
patient_id = patient['id']
url = ''.join([mme_base_url, '/patient/delete/', patient_id])
resp = matchmaker_request(url=url, token=mme_token, method='DELETE', )
server_responses.append({
'patient_id': patient_id,
'message': resp.get('message'),
'status_code': resp.get('status_code')
})
return server_responses | python | {
"resource": ""
} |
q273455 | mme_matches | test | def mme_matches(case_obj, institute_obj, mme_base_url, mme_token):
"""Show Matchmaker submission data for a sample and eventual matches.
Args:
case_obj(dict): a scout case object
institute_obj(dict): an institute object
mme_base_url(str) base url of the MME server
mme_token(str) auth token of the MME server
Returns:
data(dict): data to display in the html template
"""
data = {
'institute' : institute_obj,
'case' : case_obj,
'server_errors' : []
}
matches = {}
# loop over the submitted samples and get matches from the MatchMaker server
if not case_obj.get('mme_submission'):
return None
for patient in case_obj['mme_submission']['patients']:
patient_id = patient['id']
matches[patient_id] = None
url = ''.join([ mme_base_url, '/matches/', patient_id])
server_resp = matchmaker_request(url=url, token=mme_token, method='GET')
if 'status_code' in server_resp: # the server returned a valid response
# and this will be a list of match objects sorted by desc date
pat_matches = []
if server_resp.get('matches'):
pat_matches = parse_matches(patient_id, server_resp['matches'])
matches[patient_id] = pat_matches
else:
LOG.warning('Server returned error message: {}'.format(server_resp['message']))
data['server_errors'].append(server_resp['message'])
data['matches'] = matches
return data | python | {
"resource": ""
} |
q273456 | mme_match | test | def mme_match(case_obj, match_type, mme_base_url, mme_token, nodes=None, mme_accepts=None):
"""Initiate a MatchMaker match against either other Scout patients or external nodes
Args:
case_obj(dict): a scout case object already submitted to MME
match_type(str): 'internal' or 'external'
mme_base_url(str): base url of the MME server
mme_token(str): auth token of the MME server
mme_accepts(str): request content accepted by MME server (only for internal matches)
Returns:
matches(list): a list of eventual matches
"""
query_patients = []
server_responses = []
url = None
# list of patient dictionaries is required for internal matching
query_patients = case_obj['mme_submission']['patients']
if match_type=='internal':
url = ''.join([mme_base_url,'/match'])
for patient in query_patients:
json_resp = matchmaker_request(url=url, token=mme_token, method='POST',
content_type=mme_accepts, accept=mme_accepts, data={'patient':patient})
resp_obj = {
'server' : 'Local MatchMaker node',
'patient_id' : patient['id'],
'results' : json_resp.get('results'),
'status_code' : json_resp.get('status_code'),
'message' : json_resp.get('message') # None if request was successful
}
server_responses.append(resp_obj)
else: # external matching
# external matching requires only patient ID
query_patients = [ patient['id'] for patient in query_patients]
node_ids = [ node['id'] for node in nodes ]
if match_type in node_ids: # match is against a specific external node
node_ids = [match_type]
# Match every affected patient
for patient in query_patients:
# Against every node
for node in node_ids:
url = ''.join([mme_base_url,'/match/external/', patient, '?node=', node])
json_resp = matchmaker_request(url=url, token=mme_token, method='POST')
resp_obj = {
'server' : node,
'patient_id' : patient,
'results' : json_resp.get('results'),
'status_code' : json_resp.get('status_code'),
'message' : json_resp.get('message') # None if request was successful
}
server_responses.append(resp_obj)
return server_responses | python | {
"resource": ""
} |
q273457 | genes | test | def genes(context, build, api_key):
"""
Load the hgnc aliases to the mongo database.
"""
LOG.info("Running scout update genes")
adapter = context.obj['adapter']
# Fetch the omim information
api_key = api_key or context.obj.get('omim_api_key')
if not api_key:
LOG.warning("Please provide a omim api key to load the omim gene panel")
context.abort()
try:
mim_files = fetch_mim_files(api_key, mim2genes=True, morbidmap=True, genemap2=True)
except Exception as err:
LOG.warning(err)
context.abort()
LOG.warning("Dropping all gene information")
adapter.drop_genes(build)
LOG.info("Genes dropped")
LOG.warning("Dropping all transcript information")
adapter.drop_transcripts(build)
LOG.info("transcripts dropped")
hpo_genes = fetch_hpo_genes()
if build:
builds = [build]
else:
builds = ['37', '38']
hgnc_lines = fetch_hgnc()
exac_lines = fetch_exac_constraint()
for build in builds:
ensembl_genes = fetch_ensembl_genes(build=build)
# load the genes
hgnc_genes = load_hgnc_genes(
adapter=adapter,
ensembl_lines=ensembl_genes,
hgnc_lines=hgnc_lines,
exac_lines=exac_lines,
mim2gene_lines=mim_files['mim2genes'],
genemap_lines=mim_files['genemap2'],
hpo_lines=hpo_genes,
build=build,
)
ensembl_genes = {}
for gene_obj in hgnc_genes:
ensembl_id = gene_obj['ensembl_id']
ensembl_genes[ensembl_id] = gene_obj
# Fetch the transcripts from ensembl
ensembl_transcripts = fetch_ensembl_transcripts(build=build)
transcripts = load_transcripts(adapter, ensembl_transcripts, build, ensembl_genes)
adapter.update_indexes()
LOG.info("Genes, transcripts and Exons loaded") | python | {
"resource": ""
} |
q273458 | parse_callers | test | def parse_callers(variant, category='snv'):
"""Parse how the different variant callers have performed
Args:
variant (cyvcf2.Variant): A variant object
Returns:
callers (dict): A dictionary on the format
{'gatk': <filter>,'freebayes': <filter>,'samtools': <filter>}
"""
relevant_callers = CALLERS[category]
callers = {caller['id']: None for caller in relevant_callers}
raw_info = variant.INFO.get('set')
if raw_info:
info = raw_info.split('-')
for call in info:
if call == 'FilteredInAll':
for caller in callers:
callers[caller] = 'Filtered'
elif call == 'Intersection':
for caller in callers:
callers[caller] = 'Pass'
elif 'filterIn' in call:
for caller in callers:
if caller in call:
callers[caller] = 'Filtered'
elif call in set(callers.keys()):
callers[call] = 'Pass'
# The following is parsing of a custom made merge
other_info = variant.INFO.get('FOUND_IN')
if other_info:
for info in other_info.split(','):
called_by = info.split('|')[0]
callers[called_by] = 'Pass'
return callers | python | {
"resource": ""
} |
q273459 | build_transcript | test | def build_transcript(transcript_info, build='37'):
"""Build a hgnc_transcript object
Args:
transcript_info(dict): Transcript information
Returns:
transcript_obj(HgncTranscript)
{
transcript_id: str, required
hgnc_id: int, required
build: str, required
refseq_id: str,
chrom: str, required
start: int, required
end: int, required
is_primary: bool
}
"""
try:
transcript_id = transcript_info['ensembl_transcript_id']
except KeyError:
raise KeyError("Transcript has to have ensembl id")
build = build
is_primary = transcript_info.get('is_primary', False)
refseq_id = transcript_info.get('refseq_id')
refseq_identifiers = transcript_info.get('refseq_identifiers')
try:
chrom = transcript_info['chrom']
except KeyError:
raise KeyError("Transcript has to have a chromosome")
try:
start = int(transcript_info['transcript_start'])
except KeyError:
raise KeyError("Transcript has to have start")
except TypeError:
raise TypeError("Transcript start has to be integer")
try:
end = int(transcript_info['transcript_end'])
except KeyError:
raise KeyError("Transcript has to have end")
except TypeError:
raise TypeError("Transcript end has to be integer")
try:
hgnc_id = int(transcript_info['hgnc_id'])
except KeyError:
raise KeyError("Transcript has to have a hgnc id")
except TypeError:
raise TypeError("hgnc id has to be integer")
transcript_obj = HgncTranscript(
transcript_id=transcript_id,
hgnc_id=hgnc_id,
chrom=chrom,
start=start,
end=end,
is_primary=is_primary,
refseq_id=refseq_id,
refseq_identifiers=refseq_identifiers,
build=build
)
# Remove unnessesary keys
for key in list(transcript_obj):
if transcript_obj[key] is None:
transcript_obj.pop(key)
return transcript_obj | python | {
"resource": ""
} |
q273460 | load_institute | test | def load_institute(adapter, internal_id, display_name, sanger_recipients=None):
"""Load a institute into the database
Args:
adapter(MongoAdapter)
internal_id(str)
display_name(str)
sanger_recipients(list(email))
"""
institute_obj = build_institute(
internal_id=internal_id,
display_name=display_name,
sanger_recipients=sanger_recipients
)
log.info("Loading institute {0} with display name {1}" \
" into database".format(internal_id, display_name))
adapter.add_institute(institute_obj) | python | {
"resource": ""
} |
q273461 | parse_cadd | test | def parse_cadd(variant, transcripts):
"""Check if the cadd phred score is annotated"""
cadd = 0
cadd_keys = ['CADD', 'CADD_PHRED']
for key in cadd_keys:
cadd = variant.INFO.get(key, 0)
if cadd:
return float(cadd)
for transcript in transcripts:
cadd_entry = transcript.get('cadd')
if (cadd_entry and cadd_entry > cadd):
cadd = cadd_entry
return cadd | python | {
"resource": ""
} |
q273462 | case | test | def case(context, vcf, vcf_sv, vcf_cancer, vcf_str, owner, ped, update, config,
no_variants, peddy_ped, peddy_sex, peddy_check):
"""Load a case into the database.
A case can be loaded without specifying vcf files and/or bam files
"""
adapter = context.obj['adapter']
if config is None and ped is None:
LOG.warning("Please provide either scout config or ped file")
context.abort()
# Scout needs a config object with the neccessary information
# If no config is used create a dictionary
config_raw = yaml.load(config) if config else {}
try:
config_data = parse_case_data(
config=config_raw,
ped=ped,
owner=owner,
vcf_snv=vcf,
vcf_sv=vcf_sv,
vcf_str=vcf_str,
vcf_cancer=vcf_cancer,
peddy_ped=peddy_ped,
peddy_sex=peddy_sex,
peddy_check=peddy_check
)
except SyntaxError as err:
LOG.warning(err)
context.abort()
LOG.info("Use family %s" % config_data['family'])
try:
case_obj = adapter.load_case(config_data, update)
except Exception as err:
LOG.error("Something went wrong during loading")
LOG.warning(err)
context.abort() | python | {
"resource": ""
} |
q273463 | VariantLoader.update_variant | test | def update_variant(self, variant_obj):
"""Update one variant document in the database.
This means that the variant in the database will be replaced by variant_obj.
Args:
variant_obj(dict)
Returns:
new_variant(dict)
"""
LOG.debug('Updating variant %s', variant_obj.get('simple_id'))
new_variant = self.variant_collection.find_one_and_replace(
{'_id': variant_obj['_id']},
variant_obj,
return_document=pymongo.ReturnDocument.AFTER
)
return new_variant | python | {
"resource": ""
} |
q273464 | VariantLoader.update_variant_rank | test | def update_variant_rank(self, case_obj, variant_type='clinical', category='snv'):
"""Updates the manual rank for all variants in a case
Add a variant rank based on the rank score
Whenever variants are added or removed from a case we need to update the variant rank
Args:
case_obj(Case)
variant_type(str)
"""
# Get all variants sorted by rank score
variants = self.variant_collection.find({
'case_id': case_obj['_id'],
'category': category,
'variant_type': variant_type,
}).sort('rank_score', pymongo.DESCENDING)
LOG.info("Updating variant_rank for all variants")
requests = []
for index, var_obj in enumerate(variants):
if len(requests) > 5000:
try:
self.variant_collection.bulk_write(requests, ordered=False)
requests = []
except BulkWriteError as err:
LOG.warning("Updating variant rank failed")
raise err
operation = pymongo.UpdateOne(
{'_id': var_obj['_id']},
{
'$set': {
'variant_rank': index + 1,
}
})
requests.append(operation)
#Update the final bulk
try:
self.variant_collection.bulk_write(requests, ordered=False)
except BulkWriteError as err:
LOG.warning("Updating variant rank failed")
raise err
LOG.info("Updating variant_rank done") | python | {
"resource": ""
} |
q273465 | VariantLoader.update_variant_compounds | test | def update_variant_compounds(self, variant, variant_objs = None):
"""Update compounds for a variant.
This will add all the necessary information of a variant on a compound object.
Args:
variant(scout.models.Variant)
variant_objs(dict): A dictionary with _ids as keys and variant objs as values.
Returns:
compound_objs(list(dict)): A dictionary with updated compound objects.
"""
compound_objs = []
for compound in variant.get('compounds', []):
not_loaded = True
gene_objs = []
# Check if the compound variant exists
if variant_objs:
variant_obj = variant_objs.get(compound['variant'])
else:
variant_obj = self.variant_collection.find_one({'_id': compound['variant']})
if variant_obj:
# If the variant exosts we try to collect as much info as possible
not_loaded = False
compound['rank_score'] = variant_obj['rank_score']
for gene in variant_obj.get('genes', []):
gene_obj = {
'hgnc_id': gene['hgnc_id'],
'hgnc_symbol': gene.get('hgnc_symbol'),
'region_annotation': gene.get('region_annotation'),
'functional_annotation': gene.get('functional_annotation'),
}
gene_objs.append(gene_obj)
compound['genes'] = gene_objs
compound['not_loaded'] = not_loaded
compound_objs.append(compound)
return compound_objs | python | {
"resource": ""
} |
q273466 | VariantLoader.update_compounds | test | def update_compounds(self, variants):
"""Update the compounds for a set of variants.
Args:
variants(dict): A dictionary with _ids as keys and variant objs as values
"""
LOG.debug("Updating compound objects")
for var_id in variants:
variant_obj = variants[var_id]
if not variant_obj.get('compounds'):
continue
updated_compounds = self.update_variant_compounds(variant_obj, variants)
variant_obj['compounds'] = updated_compounds
LOG.debug("Compounds updated")
return variants | python | {
"resource": ""
} |
q273467 | VariantLoader.update_mongo_compound_variants | test | def update_mongo_compound_variants(self, bulk):
"""Update the compound information for a bulk of variants in the database
Args:
bulk(dict): {'_id': scout.models.Variant}
"""
requests = []
for var_id in bulk:
var_obj = bulk[var_id]
if not var_obj.get('compounds'):
continue
# Add a request to update compounds
operation = pymongo.UpdateOne(
{'_id': var_obj['_id']},
{
'$set': {
'compounds': var_obj['compounds']
}
})
requests.append(operation)
if not requests:
return
try:
self.variant_collection.bulk_write(requests, ordered=False)
except BulkWriteError as err:
LOG.warning("Updating compounds failed")
raise err | python | {
"resource": ""
} |
q273468 | VariantLoader.update_case_compounds | test | def update_case_compounds(self, case_obj, build='37'):
"""Update the compounds for a case
Loop over all coding intervals to get coordinates for all potential compound positions.
Update all variants within a gene with a bulk operation.
"""
case_id = case_obj['_id']
# Possible categories 'snv', 'sv', 'str', 'cancer':
categories = set()
# Possible variant types 'clinical', 'research':
variant_types = set()
for file_type in FILE_TYPE_MAP:
if case_obj.get('vcf_files',{}).get(file_type):
categories.add(FILE_TYPE_MAP[file_type]['category'])
variant_types.add(FILE_TYPE_MAP[file_type]['variant_type'])
coding_intervals = self.get_coding_intervals(build=build)
# Loop over all intervals
for chrom in CHROMOSOMES:
intervals = coding_intervals.get(chrom, IntervalTree())
for var_type in variant_types:
for category in categories:
LOG.info("Updating compounds on chromosome:{0}, type:{1}, category:{2} for case:{3}".format(
chrom, var_type, category, case_id))
# Fetch all variants from a chromosome
query = {
'variant_type': var_type,
'chrom': chrom,
}
# Get all variants from the database of the specific type
variant_objs = self.variants(
case_id=case_id,
query=query,
category=category,
nr_of_variants=-1,
sort_key='position'
)
# Initiate a bulk
bulk = {}
current_region = None
special = False
# Loop over the variants and check if they are in a coding region
for var_obj in variant_objs:
var_id = var_obj['_id']
var_chrom = var_obj['chromosome']
var_start = var_obj['position']
var_end = var_obj['end'] + 1
update_bulk = True
new_region = None
# Check if the variant is in a coding region
genomic_regions = coding_intervals.get(var_chrom, IntervalTree()).search(var_start, var_end)
# If the variant is in a coding region
if genomic_regions:
# We know there is data here so get the interval id
new_region = genomic_regions.pop().data
if new_region and (new_region == current_region):
# If the variant is in the same region as previous
# we add it to the same bulk
update_bulk = False
current_region = new_region
# If the variant is not in a current region we update the compounds
# from the previous region, if any. Otherwise continue
if update_bulk and bulk:
self.update_compounds(bulk)
self.update_mongo_compound_variants(bulk)
bulk = {}
if new_region:
bulk[var_id] = var_obj
if not bulk:
continue
self.update_compounds(bulk)
self.update_mongo_compound_variants(bulk)
LOG.info("All compounds updated")
return | python | {
"resource": ""
} |
q273469 | VariantLoader.load_variant | test | def load_variant(self, variant_obj):
"""Load a variant object
Args:
variant_obj(dict)
Returns:
inserted_id
"""
# LOG.debug("Loading variant %s", variant_obj['_id'])
try:
result = self.variant_collection.insert_one(variant_obj)
except DuplicateKeyError as err:
raise IntegrityError("Variant %s already exists in database", variant_obj['_id'])
return result | python | {
"resource": ""
} |
q273470 | VariantLoader.upsert_variant | test | def upsert_variant(self, variant_obj):
"""Load a variant object, if the object already exists update compounds.
Args:
variant_obj(dict)
Returns:
result
"""
LOG.debug("Upserting variant %s", variant_obj['_id'])
try:
result = self.variant_collection.insert_one(variant_obj)
except DuplicateKeyError as err:
LOG.debug("Variant %s already exists in database", variant_obj['_id'])
result = self.variant_collection.find_one_and_update(
{'_id': variant_obj['_id']},
{
'$set': {
'compounds': variant_obj.get('compounds',[])
}
}
)
variant = self.variant_collection.find_one({'_id': variant_obj['_id']})
return result | python | {
"resource": ""
} |
q273471 | VariantLoader.load_variant_bulk | test | def load_variant_bulk(self, variants):
"""Load a bulk of variants
Args:
variants(iterable(scout.models.Variant))
Returns:
object_ids
"""
if not len(variants) > 0:
return
LOG.debug("Loading variant bulk")
try:
result = self.variant_collection.insert_many(variants)
except (DuplicateKeyError, BulkWriteError) as err:
# If the bulk write is wrong there are probably some variants already existing
# In the database. So insert each variant
for var_obj in variants:
try:
self.upsert_variant(var_obj)
except IntegrityError as err:
pass
return | python | {
"resource": ""
} |
q273472 | CaseEventHandler.assign | test | def assign(self, institute, case, user, link):
"""Assign a user to a case.
This function will create an Event to log that a person has been assigned
to a case. Also the user will be added to case "assignees".
Arguments:
institute (dict): A institute
case (dict): A case
user (dict): A User object
link (str): The url to be used in the event
Returns:
updated_case(dict)
"""
LOG.info("Creating event for assigning {0} to {1}"
.format(user['name'].encode('utf-8'), case['display_name']))
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='assign',
subject=case['display_name']
)
LOG.info("Updating {0} to be assigned with {1}"
.format(case['display_name'], user['name']))
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{'$addToSet': {'assignees': user['_id']}},
return_document=pymongo.ReturnDocument.AFTER
)
return updated_case | python | {
"resource": ""
} |
q273473 | CaseEventHandler.share | test | def share(self, institute, case, collaborator_id, user, link):
"""Share a case with a new institute.
Arguments:
institute (dict): A Institute object
case (dict): Case object
collaborator_id (str): A instute id
user (dict): A User object
link (str): The url to be used in the event
Return:
updated_case
"""
if collaborator_id in case.get('collaborators', []):
raise ValueError('new customer is already a collaborator')
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='share',
subject=collaborator_id
)
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$push': {'collaborators': collaborator_id}
},
return_document=pymongo.ReturnDocument.AFTER
)
LOG.debug("Case updated")
return updated_case | python | {
"resource": ""
} |
q273474 | CaseEventHandler.diagnose | test | def diagnose(self, institute, case, user, link, level, omim_id, remove=False):
"""Diagnose a case using OMIM ids.
Arguments:
institute (dict): A Institute object
case (dict): Case object
user (dict): A User object
link (str): The url to be used in the event
level (str): choices=('phenotype','gene')
Return:
updated_case
"""
if level == 'phenotype':
case_key = 'diagnosis_phenotypes'
elif level == 'gene':
case_key = 'diagnosis_genes'
else:
raise TypeError('wrong level')
diagnosis_list = case.get(case_key, [])
omim_number = int(omim_id.split(':')[-1])
updated_case = None
if remove and omim_number in diagnosis_list:
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{'$pull': {case_key: omim_number}},
return_document=pymongo.ReturnDocument.AFTER
)
elif omim_number not in diagnosis_list:
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{'$push': {case_key: omim_number}},
return_document=pymongo.ReturnDocument.AFTER
)
if updated_case:
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='update_diagnosis',
subject=case['display_name'],
content=omim_id
)
return updated_case | python | {
"resource": ""
} |
q273475 | CaseEventHandler.mark_checked | test | def mark_checked(self, institute, case, user, link,
unmark=False):
"""Mark a case as checked from an analysis point of view.
Arguments:
institute (dict): A Institute object
case (dict): Case object
user (dict): A User object
link (str): The url to be used in the event
unmark (bool): If case should ve unmarked
Return:
updated_case
"""
LOG.info("Updating checked status of {}"
.format(case['display_name']))
status = 'not checked' if unmark else 'checked'
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='check_case',
subject=status
)
LOG.info("Updating {0}'s checked status {1}"
.format(case['display_name'], status))
analysis_checked = False if unmark else True
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$set': {'analysis_checked': analysis_checked}
},
return_document=pymongo.ReturnDocument.AFTER
)
LOG.debug("Case updated")
return updated_case | python | {
"resource": ""
} |
q273476 | VariantEventHandler.order_verification | test | def order_verification(self, institute, case, user, link, variant):
"""Create an event for a variant verification for a variant
and an event for a variant verification for a case
Arguments:
institute (dict): A Institute object
case (dict): Case object
user (dict): A User object
link (str): The url to be used in the event
variant (dict): A variant object
Returns:
updated_variant(dict)
"""
LOG.info("Creating event for ordering validation for variant" \
" {0}".format(variant['display_name']))
updated_variant = self.variant_collection.find_one_and_update(
{'_id': variant['_id']},
{'$set': {'sanger_ordered': True}},
return_document=pymongo.ReturnDocument.AFTER
)
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='variant',
verb='sanger',
variant=variant,
subject=variant['display_name'],
)
LOG.info("Creating event for ordering sanger for case" \
" {0}".format(case['display_name']))
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='sanger',
variant=variant,
subject=variant['display_name'],
)
return updated_variant | python | {
"resource": ""
} |
q273477 | VariantEventHandler.sanger_ordered | test | def sanger_ordered(self, institute_id=None, user_id=None):
"""Get all variants with validations ever ordered.
Args:
institute_id(str) : The id of an institute
user_id(str) : The id of an user
Returns:
sanger_ordered(list) : a list of dictionaries, each with "case_id" as keys and list of variant ids as values
"""
query = {'$match': {
'$and': [
{'verb': 'sanger'},
],
}}
if institute_id:
query['$match']['$and'].append({'institute': institute_id})
if user_id:
query['$match']['$and'].append({'user_id': user_id})
# Get all sanger ordered variants grouped by case_id
results = self.event_collection.aggregate([
query,
{'$group': {
'_id': "$case",
'vars': {'$addToSet' : '$variant_id'}
}}
])
sanger_ordered = [item for item in results]
return sanger_ordered | python | {
"resource": ""
} |
q273478 | VariantEventHandler.validate | test | def validate(self, institute, case, user, link, variant, validate_type):
"""Mark validation status for a variant.
Arguments:
institute (dict): A Institute object
case (dict): Case object
user (dict): A User object
link (str): The url to be used in the event
variant (dict): A variant object
validate_type(str): The outcome of validation.
choices=('True positive', 'False positive')
Returns:
updated_variant(dict)
"""
if not validate_type in SANGER_OPTIONS:
LOG.warning("Invalid validation string: %s", validate_type)
LOG.info("Validation options: %s", ', '.join(SANGER_OPTIONS))
return
updated_variant = self.variant_collection.find_one_and_update(
{'_id': variant['_id']},
{'$set': {'validation': validate_type}},
return_document=pymongo.ReturnDocument.AFTER
)
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='variant',
verb='validate',
variant=variant,
subject=variant['display_name'],
)
return updated_variant | python | {
"resource": ""
} |
q273479 | VariantEventHandler.mark_causative | test | def mark_causative(self, institute, case, user, link, variant):
"""Create an event for marking a variant causative.
Arguments:
institute (dict): A Institute object
case (dict): Case object
user (dict): A User object
link (str): The url to be used in the event
variant (variant): A variant object
Returns:
updated_case(dict)
"""
display_name = variant['display_name']
LOG.info("Mark variant {0} as causative in the case {1}".format(
display_name, case['display_name']))
LOG.info("Adding variant to causatives in case {0}".format(
case['display_name']))
LOG.info("Marking case {0} as solved".format(
case['display_name']))
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$push': {'causatives': variant['_id']},
'$set': {'status': 'solved'}
},
return_document=pymongo.ReturnDocument.AFTER
)
LOG.info("Creating case event for marking {0}" \
" causative".format(variant['display_name']))
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='mark_causative',
variant=variant,
subject=variant['display_name'],
)
LOG.info("Creating variant event for marking {0}" \
" causative".format(case['display_name']))
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='variant',
verb='mark_causative',
variant=variant,
subject=variant['display_name'],
)
return updated_case | python | {
"resource": ""
} |
q273480 | VariantEventHandler.update_dismiss_variant | test | def update_dismiss_variant(self, institute, case, user, link, variant,
dismiss_variant):
"""Create an event for updating the manual dismiss variant entry
This function will create a event and update the dismiss variant
field of the variant.
Arguments:
institute (dict): A Institute object
case (dict): Case object
user (dict): A User object
link (str): The url to be used in the event
variant (dict): A variant object
dismiss_variant (list): The new dismiss variant list
Return:
updated_variant
"""
LOG.info("Creating event for updating dismiss variant for "
"variant {0}".format(variant['display_name']))
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='variant',
verb='dismiss_variant',
variant=variant,
subject=variant['display_name'],
)
if dismiss_variant:
LOG.info("Setting dismiss variant to {0} for variant {1}"
.format(dismiss_variant, variant['display_name']))
action = '$set'
else:
LOG.info("Reset dismiss variant from {0} for variant {1}"
.format(variant['dismiss_variant'], variant['display_name']))
action = '$unset'
updated_variant = self.variant_collection.find_one_and_update(
{'_id': variant['_id']},
{action: {'dismiss_variant': dismiss_variant}},
return_document=pymongo.ReturnDocument.AFTER
)
LOG.debug("Variant updated")
return updated_variant | python | {
"resource": ""
} |
q273481 | VariantEventHandler.update_acmg | test | def update_acmg(self, institute_obj, case_obj, user_obj, link, variant_obj, acmg_str):
"""Create an event for updating the ACMG classification of a variant.
Arguments:
institute_obj (dict): A Institute object
case_obj (dict): Case object
user_obj (dict): A User object
link (str): The url to be used in the event
variant_obj (dict): A variant object
acmg_str (str): The new ACMG classification string
Returns:
updated_variant
"""
self.create_event(
institute=institute_obj,
case=case_obj,
user=user_obj,
link=link,
category='variant',
verb='acmg',
variant=variant_obj,
subject=variant_obj['display_name'],
)
LOG.info("Setting ACMG to {} for: {}".format(acmg_str, variant_obj['display_name']))
if acmg_str is None:
updated_variant = self.variant_collection.find_one_and_update(
{'_id': variant_obj['_id']},
{'$unset': {'acmg_classification': 1}},
return_document=pymongo.ReturnDocument.AFTER
)
else:
updated_variant = self.variant_collection.find_one_and_update(
{'_id': variant_obj['_id']},
{'$set': {'acmg_classification': REV_ACMG_MAP[acmg_str]}},
return_document=pymongo.ReturnDocument.AFTER
)
LOG.debug("Variant updated")
return updated_variant | python | {
"resource": ""
} |
q273482 | parse_ids | test | def parse_ids(chrom, pos, ref, alt, case_id, variant_type):
"""Construct the necessary ids for a variant
Args:
chrom(str): Variant chromosome
pos(int): Variant position
ref(str): Variant reference
alt(str): Variant alternative
case_id(str): Unique case id
variant_type(str): 'clinical' or 'research'
Returns:
ids(dict): Dictionary with the relevant ids
"""
ids = {}
pos = str(pos)
ids['simple_id'] = parse_simple_id(chrom, pos, ref, alt)
ids['variant_id'] = parse_variant_id(chrom, pos, ref, alt, variant_type)
ids['display_name'] = parse_display_name(chrom, pos, ref, alt, variant_type)
ids['document_id'] = parse_document_id(chrom, pos, ref, alt, variant_type, case_id)
return ids | python | {
"resource": ""
} |
q273483 | parse_simple_id | test | def parse_simple_id(chrom, pos, ref, alt):
"""Parse the simple id for a variant
Simple id is used as a human readable reference for a position, it is
in no way unique.
Args:
chrom(str)
pos(str)
ref(str)
alt(str)
Returns:
simple_id(str): The simple human readable variant id
"""
return '_'.join([chrom, pos, ref, alt]) | python | {
"resource": ""
} |
q273484 | parse_document_id | test | def parse_document_id(chrom, pos, ref, alt, variant_type, case_id):
"""Parse the unique document id for a variant.
This will always be unique in the database.
Args:
chrom(str)
pos(str)
ref(str)
alt(str)
variant_type(str): 'clinical' or 'research'
case_id(str): unqiue family id
Returns:
document_id(str): The unique document id in an md5 string
"""
return generate_md5_key([chrom, pos, ref, alt, variant_type, case_id]) | python | {
"resource": ""
} |
q273485 | convert | test | def convert(context, panel):
"""Convert a gene panel with hgnc symbols to a new one with hgnc ids."""
adapter = context.obj['adapter']
new_header = ["hgnc_id","hgnc_symbol","disease_associated_transcripts",
"reduced_penetrance", "genetic_disease_models", "mosaicism",
"database_entry_version"]
genes = parse_genes(panel)
adapter.add_hgnc_id(genes)
click.echo("#{0}".format('\t'.join(new_header)))
for gene in genes:
if gene.get('hgnc_id'):
print_info = []
for head in new_header:
print_info.append(str(gene[head]) if gene.get(head) else '')
click.echo('\t'.join(print_info)) | python | {
"resource": ""
} |
q273486 | get_variantid | test | def get_variantid(variant_obj, family_id):
"""Create a new variant id.
Args:
variant_obj(dict)
family_id(str)
Returns:
new_id(str): The new variant id
"""
new_id = parse_document_id(
chrom=variant_obj['chromosome'],
pos=str(variant_obj['position']),
ref=variant_obj['reference'],
alt=variant_obj['alternative'],
variant_type=variant_obj['variant_type'],
case_id=family_id,
)
return new_id | python | {
"resource": ""
} |
q273487 | CaseHandler.nr_cases | test | def nr_cases(self, institute_id=None):
"""Return the number of cases
This function will change when we migrate to 3.7.1
Args:
collaborator(str): Institute id
Returns:
nr_cases(int)
"""
query = {}
if institute_id:
query['collaborators'] = institute_id
LOG.debug("Fetch all cases with query {0}".format(query))
nr_cases = self.case_collection.find(query).count()
return nr_cases | python | {
"resource": ""
} |
q273488 | CaseHandler.update_dynamic_gene_list | test | def update_dynamic_gene_list(self, case, hgnc_symbols=None, hgnc_ids=None,
phenotype_ids=None, build='37'):
"""Update the dynamic gene list for a case
Adds a list of dictionaries to case['dynamic_gene_list'] that looks like
{
hgnc_symbol: str,
hgnc_id: int,
description: str
}
Arguments:
case (dict): The case that should be updated
hgnc_symbols (iterable): A list of hgnc_symbols
hgnc_ids (iterable): A list of hgnc_ids
Returns:
updated_case(dict)
"""
dynamic_gene_list = []
res = []
if hgnc_ids:
LOG.info("Fetching genes by hgnc id")
res = self.hgnc_collection.find({'hgnc_id': {'$in': hgnc_ids}, 'build': build})
elif hgnc_symbols:
LOG.info("Fetching genes by hgnc symbols")
res = []
for symbol in hgnc_symbols:
for gene_obj in self.gene_by_alias(symbol=symbol, build=build):
res.append(gene_obj)
for gene_obj in res:
dynamic_gene_list.append(
{
'hgnc_symbol': gene_obj['hgnc_symbol'],
'hgnc_id': gene_obj['hgnc_id'],
'description': gene_obj['description'],
}
)
LOG.info("Update dynamic gene panel for: %s", case['display_name'])
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{'$set': {'dynamic_gene_list': dynamic_gene_list,
'dynamic_panel_phenotypes': phenotype_ids or []}},
return_document=pymongo.ReturnDocument.AFTER
)
LOG.debug("Case updated")
return updated_case | python | {
"resource": ""
} |
q273489 | CaseHandler.case | test | def case(self, case_id=None, institute_id=None, display_name=None):
"""Fetches a single case from database
Use either the _id or combination of institute_id and display_name
Args:
case_id(str): _id for a caes
institute_id(str):
display_name(str)
Yields:
A single Case
"""
query = {}
if case_id:
query['_id'] = case_id
LOG.info("Fetching case %s", case_id)
else:
if not (institute_id and display_name):
raise ValueError("Have to provide both institute_id and display_name")
LOG.info("Fetching case %s institute %s", display_name, institute_id)
query['owner'] = institute_id
query['display_name'] = display_name
return self.case_collection.find_one(query) | python | {
"resource": ""
} |
q273490 | CaseHandler.delete_case | test | def delete_case(self, case_id=None, institute_id=None, display_name=None):
"""Delete a single case from database
Args:
institute_id(str)
case_id(str)
Returns:
case_obj(dict): The case that was deleted
"""
query = {}
if case_id:
query['_id'] = case_id
LOG.info("Deleting case %s", case_id)
else:
if not (institute_id and display_name):
raise ValueError("Have to provide both institute_id and display_name")
LOG.info("Deleting case %s institute %s", display_name, institute_id)
query['owner'] = institute_id
query['display_name'] = display_name
result = self.case_collection.delete_one(query)
return result | python | {
"resource": ""
} |
q273491 | CaseHandler._add_case | test | def _add_case(self, case_obj):
"""Add a case to the database
If the case already exists exception is raised
Args:
case_obj(Case)
"""
if self.case(case_obj['_id']):
raise IntegrityError("Case %s already exists in database" % case_obj['_id'])
return self.case_collection.insert_one(case_obj) | python | {
"resource": ""
} |
q273492 | CaseHandler.replace_case | test | def replace_case(self, case_obj):
"""Replace a existing case with a new one
Keeps the object id
Args:
case_obj(dict)
Returns:
updated_case(dict)
"""
# Todo: Figure out and describe when this method destroys a case if invoked instead of
# update_case
LOG.info("Saving case %s", case_obj['_id'])
# update updated_at of case to "today"
case_obj['updated_at'] = datetime.datetime.now(),
updated_case = self.case_collection.find_one_and_replace(
{'_id': case_obj['_id']},
case_obj,
return_document=pymongo.ReturnDocument.AFTER
)
return updated_case | python | {
"resource": ""
} |
q273493 | CaseHandler.update_caseid | test | def update_caseid(self, case_obj, family_id):
"""Update case id for a case across the database.
This function is used when a case is a rerun or updated for another reason.
Args:
case_obj(dict)
family_id(str): The new family id
Returns:
new_case(dict): The updated case object
"""
new_case = deepcopy(case_obj)
new_case['_id'] = family_id
# update suspects and causatives
for case_variants in ['suspects', 'causatives']:
new_variantids = []
for variant_id in case_obj.get(case_variants, []):
case_variant = self.variant(variant_id)
if not case_variant:
continue
new_variantid = get_variantid(case_variant, family_id)
new_variantids.append(new_variantid)
new_case[case_variants] = new_variantids
# update ACMG
for acmg_obj in self.acmg_collection.find({'case_id': case_obj['_id']}):
LOG.info("update ACMG classification: %s", acmg_obj['classification'])
acmg_variant = self.variant(acmg_obj['variant_specific'])
new_specific_id = get_variantid(acmg_variant, family_id)
self.acmg_collection.find_one_and_update(
{'_id': acmg_obj['_id']},
{'$set': {'case_id': family_id, 'variant_specific': new_specific_id}},
)
# update events
institute_obj = self.institute(case_obj['owner'])
for event_obj in self.events(institute_obj, case=case_obj):
LOG.info("update event: %s", event_obj['verb'])
self.event_collection.find_one_and_update(
{'_id': event_obj['_id']},
{'$set': {'case': family_id}},
)
# insert the updated case
self.case_collection.insert_one(new_case)
# delete the old case
self.case_collection.find_one_and_delete({'_id': case_obj['_id']})
return new_case | python | {
"resource": ""
} |
q273494 | ACMGHandler.submit_evaluation | test | def submit_evaluation(self, variant_obj, user_obj, institute_obj, case_obj, link, criteria):
"""Submit an evaluation to the database
Get all the relevant information, build a evaluation_obj
Args:
variant_obj(dict)
user_obj(dict)
institute_obj(dict)
case_obj(dict)
link(str): variant url
criteria(list(dict)):
[
{
'term': str,
'comment': str,
'links': list(str)
},
.
.
]
"""
variant_specific = variant_obj['_id']
variant_id = variant_obj['variant_id']
user_id = user_obj['_id']
user_name = user_obj.get('name', user_obj['_id'])
institute_id = institute_obj['_id']
case_id = case_obj['_id']
evaluation_terms = [evluation_info['term'] for evluation_info in criteria]
classification = get_acmg(evaluation_terms)
evaluation_obj = build_evaluation(
variant_specific=variant_specific,
variant_id=variant_id,
user_id=user_id,
user_name=user_name,
institute_id=institute_id,
case_id=case_id,
classification=classification,
criteria=criteria
)
self._load_evaluation(evaluation_obj)
# Update the acmg classification for the variant:
self.update_acmg(institute_obj, case_obj, user_obj, link, variant_obj, classification)
return classification | python | {
"resource": ""
} |
q273495 | ACMGHandler.get_evaluations | test | def get_evaluations(self, variant_obj):
"""Return all evaluations for a certain variant.
Args:
variant_obj (dict): variant dict from the database
Returns:
pymongo.cursor: database cursor
"""
query = dict(variant_id=variant_obj['variant_id'])
res = self.acmg_collection.find(query).sort([('created_at', pymongo.DESCENDING)])
return res | python | {
"resource": ""
} |
q273496 | parse_transcripts | test | def parse_transcripts(transcript_lines):
"""Parse and massage the transcript information
There could be multiple lines with information about the same transcript.
This is why it is necessary to parse the transcripts first and then return a dictionary
where all information has been merged.
Args:
transcript_lines(): This could be an iterable with strings or a pandas.DataFrame
Returns:
parsed_transcripts(dict): Map from enstid -> transcript info
"""
LOG.info("Parsing transcripts")
# Parse the transcripts, we need to check if it is a request or a file handle
if isinstance(transcript_lines, DataFrame):
transcripts = parse_ensembl_transcript_request(transcript_lines)
else:
transcripts = parse_ensembl_transcripts(transcript_lines)
# Since there can be multiple lines with information about the same transcript
# we store transcript information in a dictionary for now
parsed_transcripts = {}
# Loop over the parsed transcripts
for tx in transcripts:
tx_id = tx['ensembl_transcript_id']
ens_gene_id = tx['ensembl_gene_id']
# Check if the transcript has been added
# If not, create a new transcript
if not tx_id in parsed_transcripts:
tx_info = {
'chrom': tx['chrom'],
'transcript_start': tx['transcript_start'],
'transcript_end': tx['transcript_end'],
'mrna': set(),
'mrna_predicted': set(),
'nc_rna': set(),
'ensembl_gene_id': ens_gene_id,
'ensembl_transcript_id': tx_id,
}
parsed_transcripts[tx_id] = tx_info
tx_info = parsed_transcripts[tx_id]
# Add the ref seq information
if tx.get('refseq_mrna_predicted'):
tx_info['mrna_predicted'].add(tx['refseq_mrna_predicted'])
if tx.get('refseq_mrna'):
tx_info['mrna'].add(tx['refseq_mrna'])
if tx.get('refseq_ncrna'):
tx_info['nc_rna'].add(tx['refseq_ncrna'])
return parsed_transcripts | python | {
"resource": ""
} |
q273497 | parse_ensembl_gene_request | test | def parse_ensembl_gene_request(result):
"""Parse a dataframe with ensembl gene information
Args:
res(pandas.DataFrame)
Yields:
gene_info(dict)
"""
LOG.info("Parsing genes from request")
for index, row in result.iterrows():
# print(index, row)
ensembl_info = {}
# Pandas represents missing data with nan which is a float
if type(row['hgnc_symbol']) is float:
# Skip genes without hgnc information
continue
ensembl_info['chrom'] = row['chromosome_name']
ensembl_info['gene_start'] = int(row['start_position'])
ensembl_info['gene_end'] = int(row['end_position'])
ensembl_info['ensembl_gene_id'] = row['ensembl_gene_id']
ensembl_info['hgnc_symbol'] = row['hgnc_symbol']
hgnc_id = row['hgnc_id']
if type(hgnc_id) is float:
hgnc_id = int(hgnc_id)
else:
hgnc_id = int(hgnc_id.split(':')[-1])
ensembl_info['hgnc_id'] = hgnc_id
yield ensembl_info | python | {
"resource": ""
} |
q273498 | parse_ensembl_transcript_request | test | def parse_ensembl_transcript_request(result):
"""Parse a dataframe with ensembl transcript information
Args:
res(pandas.DataFrame)
Yields:
transcript_info(dict)
"""
LOG.info("Parsing transcripts from request")
keys = [
'chrom',
'ensembl_gene_id',
'ensembl_transcript_id',
'transcript_start',
'transcript_end',
'refseq_mrna',
'refseq_mrna_predicted',
'refseq_ncrna',
]
# for res in result.itertuples():
for index, row in result.iterrows():
ensembl_info = {}
ensembl_info['chrom'] = str(row['chromosome_name'])
ensembl_info['ensembl_gene_id'] = row['ensembl_gene_id']
ensembl_info['ensembl_transcript_id'] = row['ensembl_transcript_id']
ensembl_info['transcript_start'] = int(row['transcript_start'])
ensembl_info['transcript_end'] = int(row['transcript_end'])
# Check if refseq data is annotated
# Pandas represent missing data with nan
for key in keys[-3:]:
if type(row[key]) is float:
ensembl_info[key] = None
else:
ensembl_info[key] = row[key]
yield ensembl_info | python | {
"resource": ""
} |
q273499 | parse_ensembl_line | test | def parse_ensembl_line(line, header):
"""Parse an ensembl formated line
Args:
line(list): A list with ensembl gene info
header(list): A list with the header info
Returns:
ensembl_info(dict): A dictionary with the relevant info
"""
line = line.rstrip().split('\t')
header = [head.lower() for head in header]
raw_info = dict(zip(header, line))
ensembl_info = {}
for word in raw_info:
value = raw_info[word]
if not value:
continue
if 'chromosome' in word:
ensembl_info['chrom'] = value
if 'gene' in word:
if 'id' in word:
ensembl_info['ensembl_gene_id'] = value
elif 'start' in word:
ensembl_info['gene_start'] = int(value)
elif 'end' in word:
ensembl_info['gene_end'] = int(value)
if 'hgnc symbol' in word:
ensembl_info['hgnc_symbol'] = value
if "gene name" in word:
ensembl_info['hgnc_symbol'] = value
if 'hgnc id' in word:
ensembl_info['hgnc_id'] = int(value.split(':')[-1])
if 'transcript' in word:
if 'id' in word:
ensembl_info['ensembl_transcript_id'] = value
elif 'start' in word:
ensembl_info['transcript_start'] = int(value)
elif 'end' in word:
ensembl_info['transcript_end'] = int(value)
if 'exon' in word:
if 'start' in word:
ensembl_info['exon_start'] = int(value)
elif 'end' in word:
ensembl_info['exon_end'] = int(value)
elif 'rank' in word:
ensembl_info['exon_rank'] = int(value)
if 'utr' in word:
if 'start' in word:
if '5' in word:
ensembl_info['utr_5_start'] = int(value)
elif '3' in word:
ensembl_info['utr_3_start'] = int(value)
elif 'end' in word:
if '5' in word:
ensembl_info['utr_5_end'] = int(value)
elif '3' in word:
ensembl_info['utr_3_end'] = int(value)
if 'strand' in word:
ensembl_info['strand'] = int(value)
if 'refseq' in word:
if 'mrna' in word:
if 'predicted' in word:
ensembl_info['refseq_mrna_predicted'] = value
else:
ensembl_info['refseq_mrna'] = value
if 'ncrna' in word:
ensembl_info['refseq_ncrna'] = value
return ensembl_info | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.