_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q273200 | find_bai_file | test | def find_bai_file(bam_file):
"""Find out BAI file by extension given the BAM file."""
bai_file = bam_file.replace('.bam', '.bai')
if not os.path.exists(bai_file):
# try the other convention
bai_file = "{}.bai".format(bam_file)
return bai_file | python | {
"resource": ""
} |
q273201 | observations | test | def observations(store, loqusdb, case_obj, variant_obj):
"""Query observations for a variant."""
composite_id = ("{this[chromosome]}_{this[position]}_{this[reference]}_"
"{this[alternative]}".format(this=variant_obj))
obs_data = loqusdb.get_variant({'_id': composite_id}) or {}
obs_data['total'] = loqusdb.case_count()
obs_data['cases'] = []
institute_id = variant_obj['institute']
for case_id in obs_data.get('families', []):
if case_id != variant_obj['case_id'] and case_id.startswith(institute_id):
other_variant = store.variant(variant_obj['variant_id'], case_id=case_id)
other_case = store.case(case_id)
obs_data['cases'].append(dict(case=other_case, variant=other_variant))
return obs_data | python | {
"resource": ""
} |
q273202 | parse_gene | test | def parse_gene(gene_obj, build=None):
"""Parse variant genes."""
build = build or 37
if gene_obj.get('common'):
add_gene_links(gene_obj, build)
refseq_transcripts = []
for tx_obj in gene_obj['transcripts']:
parse_transcript(gene_obj, tx_obj, build)
# select refseq transcripts as "primary"
if not tx_obj.get('refseq_id'):
continue
refseq_transcripts.append(tx_obj)
gene_obj['primary_transcripts'] = (refseq_transcripts if refseq_transcripts else []) | python | {
"resource": ""
} |
q273203 | transcript_str | test | def transcript_str(transcript_obj, gene_name=None):
"""Generate amino acid change as a string."""
if transcript_obj.get('exon'):
gene_part, part_count_raw = 'exon', transcript_obj['exon']
elif transcript_obj.get('intron'):
gene_part, part_count_raw = 'intron', transcript_obj['intron']
else:
# variant between genes
gene_part, part_count_raw = 'intergenic', '0'
part_count = part_count_raw.rpartition('/')[0]
change_str = "{}:{}{}:{}:{}".format(
transcript_obj.get('refseq_id', ''),
gene_part,
part_count,
transcript_obj.get('coding_sequence_name', 'NA'),
transcript_obj.get('protein_sequence_name', 'NA'),
)
if gene_name:
change_str = "{}:".format(gene_name) + change_str
return change_str | python | {
"resource": ""
} |
q273204 | end_position | test | def end_position(variant_obj):
"""Calculate end position for a variant."""
alt_bases = len(variant_obj['alternative'])
num_bases = max(len(variant_obj['reference']), alt_bases)
return variant_obj['position'] + (num_bases - 1) | python | {
"resource": ""
} |
q273205 | frequency | test | def frequency(variant_obj):
"""Returns a judgement on the overall frequency of the variant.
Combines multiple metrics into a single call.
"""
most_common_frequency = max(variant_obj.get('thousand_genomes_frequency') or 0,
variant_obj.get('exac_frequency') or 0)
if most_common_frequency > .05:
return 'common'
elif most_common_frequency > .01:
return 'uncommon'
else:
return 'rare' | python | {
"resource": ""
} |
q273206 | clinsig_human | test | def clinsig_human(variant_obj):
"""Convert to human readable version of CLINSIG evaluation."""
for clinsig_obj in variant_obj['clnsig']:
# The clinsig objects allways have a accession
if isinstance(clinsig_obj['accession'], int):
# New version
link = "https://www.ncbi.nlm.nih.gov/clinvar/variation/{}"
else:
# Old version
link = "https://www.ncbi.nlm.nih.gov/clinvar/{}"
human_str = 'not provided'
if clinsig_obj.get('value'):
try:
# Old version
int(clinsig_obj['value'])
human_str = CLINSIG_MAP.get(clinsig_obj['value'], 'not provided')
except ValueError:
# New version
human_str = clinsig_obj['value']
clinsig_obj['human'] = human_str
clinsig_obj['link'] = link.format(clinsig_obj['accession'])
yield clinsig_obj | python | {
"resource": ""
} |
q273207 | thousandg_link | test | def thousandg_link(variant_obj, build=None):
"""Compose link to 1000G page for detailed information."""
dbsnp_id = variant_obj.get('dbsnp_id')
build = build or 37
if not dbsnp_id:
return None
if build == 37:
url_template = ("http://grch37.ensembl.org/Homo_sapiens/Variation/Explore"
"?v={};vdb=variation")
else:
url_template = ("http://www.ensembl.org/Homo_sapiens/Variation/Explore"
"?v={};vdb=variation")
return url_template.format(dbsnp_id) | python | {
"resource": ""
} |
q273208 | cosmic_link | test | def cosmic_link(variant_obj):
"""Compose link to COSMIC Database.
Args:
variant_obj(scout.models.Variant)
Returns:
url_template(str): Link to COSMIIC database if cosmic id is present
"""
cosmic_ids = variant_obj.get('cosmic_ids')
if not cosmic_ids:
return None
else:
cosmic_id = cosmic_ids[0]
url_template = ("https://cancer.sanger.ac.uk/cosmic/mutation/overview?id={}")
return url_template.format(cosmic_id) | python | {
"resource": ""
} |
q273209 | beacon_link | test | def beacon_link(variant_obj, build=None):
"""Compose link to Beacon Network."""
build = build or 37
url_template = ("https://beacon-network.org/#/search?pos={this[position]}&"
"chrom={this[chromosome]}&allele={this[alternative]}&"
"ref={this[reference]}&rs=GRCh37")
# beacon does not support build 38 at the moment
# if build == '38':
# url_template = ("https://beacon-network.org/#/search?pos={this[position]}&"
# "chrom={this[chromosome]}&allele={this[alternative]}&"
# "ref={this[reference]}&rs=GRCh38")
return url_template.format(this=variant_obj) | python | {
"resource": ""
} |
q273210 | ucsc_link | test | def ucsc_link(variant_obj, build=None):
"""Compose link to UCSC."""
build = build or 37
url_template = ("http://genome.ucsc.edu/cgi-bin/hgTracks?db=hg19&"
"position=chr{this[chromosome]}:{this[position]}"
"-{this[position]}&dgv=pack&knownGene=pack&omimGene=pack")
if build == 38:
url_template = ("http://genome.ucsc.edu/cgi-bin/hgTracks?db=hg20&"
"position=chr{this[chromosome]}:{this[position]}"
"-{this[position]}&dgv=pack&knownGene=pack&omimGene=pack")
return url_template.format(this=variant_obj) | python | {
"resource": ""
} |
q273211 | spidex_human | test | def spidex_human(variant_obj):
"""Translate SPIDEX annotation to human readable string."""
if variant_obj.get('spidex') is None:
return 'not_reported'
elif abs(variant_obj['spidex']) < SPIDEX_HUMAN['low']['pos'][1]:
return 'low'
elif abs(variant_obj['spidex']) < SPIDEX_HUMAN['medium']['pos'][1]:
return 'medium'
else:
return 'high' | python | {
"resource": ""
} |
q273212 | expected_inheritance | test | def expected_inheritance(variant_obj):
"""Gather information from common gene information."""
manual_models = set()
for gene in variant_obj.get('genes', []):
manual_models.update(gene.get('manual_inheritance', []))
return list(manual_models) | python | {
"resource": ""
} |
q273213 | callers | test | def callers(variant_obj, category='snv'):
"""Return info about callers."""
calls = set()
for caller in CALLERS[category]:
if variant_obj.get(caller['id']):
calls.add((caller['name'], variant_obj[caller['id']]))
return list(calls) | python | {
"resource": ""
} |
q273214 | cancer_variants | test | def cancer_variants(store, request_args, institute_id, case_name):
"""Fetch data related to cancer variants for a case."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
form = CancerFiltersForm(request_args)
variants_query = store.variants(case_obj['_id'], category='cancer', query=form.data).limit(50)
data = dict(
institute=institute_obj,
case=case_obj,
variants=(parse_variant(store, institute_obj, case_obj, variant, update=True) for
variant in variants_query),
form=form,
variant_type=request_args.get('variant_type', 'clinical'),
)
return data | python | {
"resource": ""
} |
q273215 | clinvar_export | test | def clinvar_export(store, institute_id, case_name, variant_id):
"""Gather the required data for creating the clinvar submission form
Args:
store(scout.adapter.MongoAdapter)
institute_id(str): Institute ID
case_name(str): case ID
variant_id(str): variant._id
Returns:
a dictionary with all the required data (case and variant level) to pre-fill in fields in the clinvar submission form
"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
pinned = [store.variant(variant_id) or variant_id for variant_id in
case_obj.get('suspects', [])]
variant_obj = store.variant(variant_id)
return dict(
today = str(date.today()),
institute=institute_obj,
case=case_obj,
variant=variant_obj,
pinned_vars=pinned
) | python | {
"resource": ""
} |
q273216 | get_clinvar_submission | test | def get_clinvar_submission(store, institute_id, case_name, variant_id, submission_id):
"""Collects all variants from the clinvar submission collection with a specific submission_id
Args:
store(scout.adapter.MongoAdapter)
institute_id(str): Institute ID
case_name(str): case ID
variant_id(str): variant._id
submission_id(str): clinvar submission id, i.e. SUB76578
Returns:
A dictionary with all the data to display the clinvar_update.html template page
"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
pinned = [store.variant(variant_id) or variant_id for variant_id in
case_obj.get('suspects', [])]
variant_obj = store.variant(variant_id)
clinvar_submission_objs = store.clinvars(submission_id=submission_id)
return dict(
today = str(date.today()),
institute=institute_obj,
case=case_obj,
variant=variant_obj,
pinned_vars=pinned,
clinvars = clinvar_submission_objs
) | python | {
"resource": ""
} |
q273217 | variant_acmg | test | def variant_acmg(store, institute_id, case_name, variant_id):
"""Collect data relevant for rendering ACMG classification form."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_obj = store.variant(variant_id)
return dict(institute=institute_obj, case=case_obj, variant=variant_obj,
CRITERIA=ACMG_CRITERIA, ACMG_OPTIONS=ACMG_OPTIONS) | python | {
"resource": ""
} |
q273218 | variant_acmg_post | test | def variant_acmg_post(store, institute_id, case_name, variant_id, user_email, criteria):
"""Calculate an ACMG classification based on a list of criteria."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_obj = store.variant(variant_id)
user_obj = store.user(user_email)
variant_link = url_for('variants.variant', institute_id=institute_id,
case_name=case_name, variant_id=variant_id)
classification = store.submit_evaluation(
institute_obj=institute_obj,
case_obj=case_obj,
variant_obj=variant_obj,
user_obj=user_obj,
link=variant_link,
criteria=criteria,
)
return classification | python | {
"resource": ""
} |
q273219 | evaluation | test | def evaluation(store, evaluation_obj):
"""Fetch and fill-in evaluation object."""
evaluation_obj['institute'] = store.institute(evaluation_obj['institute_id'])
evaluation_obj['case'] = store.case(evaluation_obj['case_id'])
evaluation_obj['variant'] = store.variant(evaluation_obj['variant_specific'])
evaluation_obj['criteria'] = {criterion['term']: criterion for criterion in
evaluation_obj['criteria']}
evaluation_obj['classification'] = ACMG_COMPLETE_MAP[evaluation_obj['classification']]
return evaluation_obj | python | {
"resource": ""
} |
q273220 | upload_panel | test | def upload_panel(store, institute_id, case_name, stream):
"""Parse out HGNC symbols from a stream."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
raw_symbols = [line.strip().split('\t')[0] for line in stream if
line and not line.startswith('#')]
# check if supplied gene symbols exist
hgnc_symbols = []
for raw_symbol in raw_symbols:
if store.hgnc_genes(raw_symbol).count() == 0:
flash("HGNC symbol not found: {}".format(raw_symbol), 'warning')
else:
hgnc_symbols.append(raw_symbol)
return hgnc_symbols | python | {
"resource": ""
} |
q273221 | verified_excel_file | test | def verified_excel_file(store, institute_list, temp_excel_dir):
"""Collect all verified variants in a list on institutes and save them to file
Args:
store(adapter.MongoAdapter)
institute_list(list): a list of institute ids
temp_excel_dir(os.Path): folder where the temp excel files are written to
Returns:
written_files(int): the number of files written to temp_excel_dir
"""
document_lines = []
written_files = 0
today = datetime.datetime.now().strftime('%Y-%m-%d')
LOG.info('Creating verified variant document..')
for cust in institute_list:
verif_vars = store.verified(institute_id=cust)
LOG.info('Found {} verified variants for customer {}'.format(len(verif_vars), cust))
if not verif_vars:
continue
unique_callers = set()
for var_type, var_callers in CALLERS.items():
for caller in var_callers:
unique_callers.add(caller.get('id'))
cust_verified = export_verified_variants(verif_vars, unique_callers)
document_name = '.'.join([cust, '_verified_variants', today]) + '.xlsx'
workbook = Workbook(os.path.join(temp_excel_dir,document_name))
Report_Sheet = workbook.add_worksheet()
# Write the column header
row = 0
for col,field in enumerate(VERIFIED_VARIANTS_HEADER + list(unique_callers)):
Report_Sheet.write(row,col,field)
# Write variant lines, after header (start at line 1)
for row, line in enumerate(cust_verified,1): # each line becomes a row in the document
for col, field in enumerate(line): # each field in line becomes a cell
Report_Sheet.write(row,col,field)
workbook.close()
if os.path.exists(os.path.join(temp_excel_dir,document_name)):
written_files += 1
return written_files | python | {
"resource": ""
} |
q273222 | export_genes | test | def export_genes(adapter, build='37'):
"""Export all genes from the database"""
LOG.info("Exporting all genes to .bed format")
for gene_obj in adapter.all_genes(build=build):
yield gene_obj | python | {
"resource": ""
} |
q273223 | parse_clnsig | test | def parse_clnsig(acc, sig, revstat, transcripts):
"""Get the clnsig information
Args:
acc(str): The clnsig accession number, raw from vcf
sig(str): The clnsig significance score, raw from vcf
revstat(str): The clnsig revstat, raw from vcf
transcripts(iterable(dict))
Returns:
clnsig_accsessions(list): A list with clnsig accessions
"""
clnsig_accsessions = []
if acc:
# New format of clinvar allways have integers as accession numbers
try:
acc = int(acc)
except ValueError:
pass
# There are sometimes different separators so we need to check which
# one to use
if isinstance(acc, int):
revstat_groups = []
if revstat:
revstat_groups = [rev.lstrip('_') for rev in revstat.split(',')]
sig_groups = []
if sig:
for significance in sig.split('/'):
splitted_word = significance.split('_')
sig_groups.append(' '.join(splitted_word[:2]))
for sign_term in sig_groups:
clnsig_accsessions.append({
'value': sign_term,
'accession': int(acc),
'revstat': ', '.join(revstat_groups),
})
else:
# There are sometimes different separators so we need to check which
# one to use
acc_groups = acc.split('|')
sig_groups = sig.split('|')
revstat_groups = revstat.split('|')
for acc_group, sig_group, revstat_group in zip(acc_groups, sig_groups, revstat_groups):
accessions = acc_group.split(',')
significances = sig_group.split(',')
revstats = revstat_group.split(',')
for accession, significance, revstat in zip(accessions, significances, revstats):
clnsig_accsessions.append({
'value': int(significance),
'accession': accession,
'revstat': revstat,
})
elif transcripts:
clnsig = set()
for transcript in transcripts:
for annotation in transcript.get('clinsig', []):
clnsig.add(annotation)
for annotation in clnsig:
clnsig_accsessions.append({'value': annotation})
return clnsig_accsessions | python | {
"resource": ""
} |
q273224 | parse_compounds | test | def parse_compounds(compound_info, case_id, variant_type):
"""Get a list with compounds objects for this variant.
Arguments:
compound_info(str): A Variant dictionary
case_id (str): unique family id
variant_type(str): 'research' or 'clinical'
Returns:
compounds(list(dict)): A list of compounds
"""
# We need the case to construct the correct id
compounds = []
if compound_info:
for family_info in compound_info.split(','):
splitted_entry = family_info.split(':')
# This is the family id
if splitted_entry[0] == case_id:
for compound in splitted_entry[1].split('|'):
splitted_compound = compound.split('>')
compound_obj = {}
compound_name = splitted_compound[0]
compound_obj['variant'] = generate_md5_key(compound_name.split('_') +
[variant_type, case_id])
try:
compound_score = float(splitted_compound[1])
except (TypeError, IndexError):
compound_score = 0.0
compound_obj['score'] = compound_score
compound_obj['display_name'] = compound_name
compounds.append(compound_obj)
return compounds | python | {
"resource": ""
} |
q273225 | genes | test | def genes(context, build, json):
"""Export all genes from a build"""
LOG.info("Running scout export genes")
adapter = context.obj['adapter']
result = adapter.all_genes(build=build)
if json:
click.echo(dumps(result))
return
gene_string = ("{0}\t{1}\t{2}\t{3}\t{4}")
click.echo("#Chromosom\tStart\tEnd\tHgnc_id\tHgnc_symbol")
for gene_obj in result:
click.echo(gene_string.format(
gene_obj['chromosome'],
gene_obj['start'],
gene_obj['end'],
gene_obj['hgnc_id'],
gene_obj['hgnc_symbol'],
)) | python | {
"resource": ""
} |
q273226 | build_individual | test | def build_individual(ind):
"""Build a Individual object
Args:
ind (dict): A dictionary with individual information
Returns:
ind_obj (dict): A Individual object
dict(
individual_id = str, # required
display_name = str,
sex = str,
phenotype = int,
father = str, # Individual id of father
mother = str, # Individual id of mother
capture_kits = list, # List of names of capture kits
bam_file = str, # Path to bam file
vcf2cytosure = str, # Path to CGH file
analysis_type = str, # choices=ANALYSIS_TYPES
)
"""
try:
ind_obj = dict(
individual_id=ind['individual_id']
)
log.info("Building Individual with id:{0}".format(ind['individual_id']))
except KeyError as err:
raise PedigreeError("Individual is missing individual_id")
ind_obj['display_name'] = ind.get('display_name', ind_obj['individual_id'])
sex = ind.get('sex', 'unknown')
# Convert sex to .ped
try:
# Check if sex is coded as an integer
int(sex)
ind_obj['sex'] = str(sex)
except ValueError as err:
try:
# Sex are numbers in the database
ind_obj['sex'] = REV_SEX_MAP[sex]
except KeyError as err:
raise(PedigreeError("Unknown sex: %s" % sex))
phenotype = ind.get('phenotype', 'unknown')
# Make the phenotype integers
try:
ped_phenotype = REV_PHENOTYPE_MAP[phenotype]
if ped_phenotype == -9:
ped_phenotype = 0
ind_obj['phenotype'] = ped_phenotype
except KeyError as err:
raise(PedigreeError("Unknown phenotype: %s" % phenotype))
ind_obj['father'] = ind.get('father')
ind_obj['mother'] = ind.get('mother')
ind_obj['capture_kits'] = ind.get('capture_kits', [])
ind_obj['bam_file'] = ind.get('bam_file')
ind_obj['mt_bam'] = ind.get('mt_bam')
ind_obj['vcf2cytosure'] = ind.get('vcf2cytosure')
ind_obj['confirmed_sex'] = ind.get('confirmed_sex')
ind_obj['confirmed_parent'] = ind.get('confirmed_parent')
ind_obj['predicted_ancestry'] = ind.get('predicted_ancestry')
# Check if the analysis type is ok
# Can be anyone of ('wgs', 'wes', 'mixed', 'unknown')
analysis_type = ind.get('analysis_type', 'unknown')
if not analysis_type in ANALYSIS_TYPES:
raise PedigreeError("Analysis type %s not allowed", analysis_type)
ind_obj['analysis_type'] = analysis_type
if 'tmb' in ind:
ind_obj['tmb'] = ind['tmb']
if 'msi' in ind:
ind_obj['msi'] = ind['msi']
if 'tumor_purity' in ind:
ind_obj['tumor_purity'] = ind['tumor_purity']
if 'tumor_type' in ind:
ind_obj['tumor_type'] = ind['tumor_type']
return ind_obj | python | {
"resource": ""
} |
q273227 | variants | test | def variants(context, case_id, institute, force, cancer, cancer_research, sv,
sv_research, snv, snv_research, str_clinical, chrom, start, end, hgnc_id,
hgnc_symbol, rank_treshold):
"""Upload variants to a case
Note that the files has to be linked with the case,
if they are not use 'scout update case'.
"""
LOG.info("Running scout load variants")
adapter = context.obj['adapter']
if institute:
case_id = "{0}-{1}".format(institute, case_id)
else:
institute = case_id.split('-')[0]
case_obj = adapter.case(case_id=case_id)
if case_obj is None:
LOG.info("No matching case found")
context.abort()
files = [
{'category': 'cancer', 'variant_type': 'clinical', 'upload': cancer},
{'category': 'cancer', 'variant_type': 'research', 'upload': cancer_research},
{'category': 'sv', 'variant_type': 'clinical', 'upload': sv},
{'category': 'sv', 'variant_type': 'research', 'upload': sv_research},
{'category': 'snv', 'variant_type': 'clinical', 'upload': snv},
{'category': 'snv', 'variant_type': 'research', 'upload': snv_research},
{'category': 'str', 'variant_type': 'clinical', 'upload': str_clinical},
]
gene_obj = None
if (hgnc_id or hgnc_symbol):
if hgnc_id:
gene_obj = adapter.hgnc_gene(hgnc_id)
if hgnc_symbol:
for res in adapter.gene_by_alias(hgnc_symbol):
gene_obj = res
if not gene_obj:
LOG.warning("The gene could not be found")
context.abort()
i = 0
for file_type in files:
variant_type = file_type['variant_type']
category = file_type['category']
if file_type['upload']:
i += 1
if variant_type == 'research':
if not (force or case_obj['research_requested']):
LOG.warn("research not requested, use '--force'")
context.abort()
LOG.info("Delete {0} {1} variants for case {2}".format(
variant_type, category, case_id))
adapter.delete_variants(case_id=case_obj['_id'],
variant_type=variant_type,
category=category)
LOG.info("Load {0} {1} variants for case {2}".format(
variant_type, category, case_id))
try:
adapter.load_variants(
case_obj=case_obj,
variant_type=variant_type,
category=category,
rank_threshold=rank_treshold,
chrom=chrom,
start=start,
end=end,
gene_obj=gene_obj
)
except Exception as e:
LOG.warning(e)
context.abort()
if i == 0:
LOG.info("No files where specified to upload variants from") | python | {
"resource": ""
} |
q273228 | case | test | def case(institute_id, case_name):
"""Return a variant."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
if case_obj is None:
return abort(404)
return Response(json_util.dumps(case_obj), mimetype='application/json') | python | {
"resource": ""
} |
q273229 | collections | test | def collections(context):
"""Show all collections in the database"""
LOG.info("Running scout view collections")
adapter = context.obj['adapter']
for collection_name in adapter.collections():
click.echo(collection_name) | python | {
"resource": ""
} |
q273230 | institute | test | def institute(ctx, internal_id, display_name, sanger_recipients):
"""
Create a new institute and add it to the database
"""
adapter = ctx.obj['adapter']
if not internal_id:
logger.warning("A institute has to have an internal id")
ctx.abort()
if not display_name:
display_name = internal_id
if sanger_recipients:
sanger_recipients = list(sanger_recipients)
try:
load_institute(
adapter=adapter,
internal_id=internal_id,
display_name=display_name,
sanger_recipients=sanger_recipients
)
except Exception as e:
logger.warning(e)
ctx.abort() | python | {
"resource": ""
} |
q273231 | institute | test | def institute(context, institute_id, sanger_recipient, coverage_cutoff, frequency_cutoff,
display_name, remove_sanger):
"""
Update an institute
"""
adapter = context.obj['adapter']
LOG.info("Running scout update institute")
try:
adapter.update_institute(
internal_id=institute_id,
sanger_recipient=sanger_recipient,
coverage_cutoff=coverage_cutoff,
frequency_cutoff=frequency_cutoff,
display_name=display_name,
remove_sanger=remove_sanger,
)
except Exception as err:
LOG.warning(err)
context.abort() | python | {
"resource": ""
} |
q273232 | get_file_handle | test | def get_file_handle(file_path):
"""Return a opened file"""
if file_path.endswith('.gz'):
file_handle = getreader('utf-8')(gzip.open(file_path, 'r'), errors='replace')
else:
file_handle = open(file_path, 'r', encoding='utf-8')
return file_handle | python | {
"resource": ""
} |
q273233 | get_net | test | def get_net(req):
"""Get the net of any 'next' and 'prev' querystrings."""
try:
nxt, prev = map(
int, (req.GET.get('cal_next', 0), req.GET.get('cal_prev', 0))
)
net = nxt - prev
except Exception:
net = 0
return net | python | {
"resource": ""
} |
q273234 | get_next_and_prev | test | def get_next_and_prev(net):
"""Returns what the next and prev querystrings should be."""
if net == 0:
nxt = prev = 1
elif net > 0:
nxt = net + 1
prev = -(net - 1)
else:
nxt = net + 1
prev = abs(net) + 1
return nxt, prev | python | {
"resource": ""
} |
q273235 | _check_year | test | def _check_year(year, month, error, error_msg):
"""Checks that the year is within 50 years from now."""
if year not in xrange((now.year - 50), (now.year + 51)):
year = now.year
month = now.month
error = error_msg
return year, month, error | python | {
"resource": ""
} |
q273236 | check_weekday | test | def check_weekday(year, month, day, reverse=False):
"""
Make sure any event day we send back for weekday repeating
events is not a weekend.
"""
d = date(year, month, day)
while d.weekday() in (5, 6):
if reverse:
d -= timedelta(days=1)
else:
d += timedelta(days=1)
return d.year, d.month, d.day | python | {
"resource": ""
} |
q273237 | parse_case_data | test | def parse_case_data(config=None, ped=None, owner=None, vcf_snv=None,
vcf_sv=None, vcf_cancer=None, vcf_str=None, peddy_ped=None,
peddy_sex=None, peddy_check=None, delivery_report=None, multiqc=None):
"""Parse all data necessary for loading a case into scout
This can be done either by providing a VCF file and other information
on the command line. Or all the information can be specified in a config file.
Please see Scout documentation for further instructions.
Args:
config(dict): A yaml formatted config file
ped(iterable(str)): A ped formatted family file
owner(str): The institute that owns a case
vcf_snv(str): Path to a vcf file
vcf_str(str): Path to a VCF file
vcf_sv(str): Path to a vcf file
vcf_cancer(str): Path to a vcf file
peddy_ped(str): Path to a peddy ped
multiqc(str): Path to dir with multiqc information
Returns:
config_data(dict): Holds all the necessary information for loading
Scout
"""
config_data = copy.deepcopy(config) or {}
# Default the analysis date to now if not specified in load config
if 'analysis_date' not in config_data:
config_data['analysis_date'] = datetime.datetime.now()
# If the family information is in a ped file we nned to parse that
if ped:
family_id, samples = parse_ped(ped)
config_data['family'] = family_id
config_data['samples'] = samples
# Each case has to have a owner. If not provided in config file it needs to be given as a
# argument
if 'owner' not in config_data:
if not owner:
raise SyntaxError("Case has no owner")
else:
config_data['owner'] = owner
if 'gene_panels' in config_data:
# handle whitespace in gene panel names
config_data['gene_panels'] = [panel.strip() for panel in
config_data['gene_panels']]
config_data['default_gene_panels'] = [panel.strip() for panel in
config_data['default_gene_panels']]
##################### Add information from peddy if existing #####################
config_data['peddy_ped'] = peddy_ped or config_data.get('peddy_ped')
config_data['peddy_sex_check'] = peddy_sex or config_data.get('peddy_sex')
config_data['peddy_ped_check'] = peddy_check or config_data.get('peddy_check')
# This will add information from peddy to the individuals
add_peddy_information(config_data)
##################### Add multiqc information #####################
config_data['multiqc'] = multiqc or config_data.get('multiqc')
config_data['vcf_snv'] = vcf_snv if vcf_snv else config_data.get('vcf_snv')
config_data['vcf_sv'] = vcf_sv if vcf_sv else config_data.get('vcf_sv')
config_data['vcf_str'] = vcf_str if vcf_str else config_data.get('vcf_str')
log.debug("Config vcf_str set to {0}".format(config_data['vcf_str']))
config_data['vcf_cancer'] = vcf_cancer if vcf_cancer else config_data.get('vcf_cancer')
config_data['delivery_report'] = delivery_report if delivery_report else config_data.get('delivery_report')
config_data['rank_model_version'] = config_data.get('rank_model_version')
config_data['rank_score_threshold'] = config_data.get('rank_score_threshold', 0)
config_data['track'] = config_data.get('track', 'rare')
if config_data['vcf_cancer']:
config_data['track'] = 'cancer'
return config_data | python | {
"resource": ""
} |
q273238 | add_peddy_information | test | def add_peddy_information(config_data):
"""Add information from peddy outfiles to the individuals"""
ped_info = {}
ped_check = {}
sex_check = {}
relations = []
if config_data.get('peddy_ped'):
file_handle = open(config_data['peddy_ped'], 'r')
for ind_info in parse_peddy_ped(file_handle):
ped_info[ind_info['sample_id']] = ind_info
if config_data.get('peddy_ped_check'):
file_handle = open(config_data['peddy_ped_check'], 'r')
for pair_info in parse_peddy_ped_check(file_handle):
ped_check[(pair_info['sample_a'], pair_info['sample_b'])] = pair_info
if config_data.get('peddy_sex_check'):
file_handle = open(config_data['peddy_sex_check'], 'r')
for ind_info in parse_peddy_sex_check(file_handle):
sex_check[ind_info['sample_id']] = ind_info
if not ped_info:
return
analysis_inds = {}
for ind in config_data['samples']:
ind_id = ind['sample_id']
analysis_inds[ind_id] = ind
for ind_id in analysis_inds:
ind = analysis_inds[ind_id]
# Check if peddy has inferred the ancestry
if ind_id in ped_info:
ind['predicted_ancestry'] = ped_info[ind_id].get('ancestry-prediction', 'UNKNOWN')
# Check if peddy has inferred the sex
if ind_id in sex_check:
if sex_check[ind_id]['error']:
ind['confirmed_sex'] = False
else:
ind['confirmed_sex'] = True
# Check if peddy har confirmed parental relations
for parent in ['mother', 'father']:
# If we are looking at individual with parents
if ind[parent] != '0':
# Check if the child/parent pair is in peddy data
for pair in ped_check:
if (ind_id in pair and ind[parent] in pair):
# If there is a parent error we mark that
if ped_check[pair]['parent_error']:
analysis_inds[ind[parent]]['confirmed_parent'] = False
else:
# Else if parent confirmation has not been done
if 'confirmed_parent' not in analysis_inds[ind[parent]]:
# Set confirmatio to True
analysis_inds[ind[parent]]['confirmed_parent'] = True | python | {
"resource": ""
} |
q273239 | parse_individual | test | def parse_individual(sample):
"""Parse individual information
Args:
sample (dict)
Returns:
{
'individual_id': str,
'father': str,
'mother': str,
'display_name': str,
'sex': str,
'phenotype': str,
'bam_file': str,
'vcf2cytosure': str,
'analysis_type': str,
'capture_kits': list(str),
}
"""
ind_info = {}
if 'sample_id' not in sample:
raise PedigreeError("One sample is missing 'sample_id'")
sample_id = sample['sample_id']
# Check the sex
if 'sex' not in sample:
raise PedigreeError("Sample %s is missing 'sex'" % sample_id)
sex = sample['sex']
if sex not in REV_SEX_MAP:
log.warning("'sex' is only allowed to have values from {}"
.format(', '.join(list(REV_SEX_MAP.keys()))))
raise PedigreeError("Individual %s has wrong formated sex" % sample_id)
# Check the phenotype
if 'phenotype' not in sample:
raise PedigreeError("Sample %s is missing 'phenotype'"
% sample_id)
phenotype = sample['phenotype']
if phenotype not in REV_PHENOTYPE_MAP:
log.warning("'phenotype' is only allowed to have values from {}"
.format(', '.join(list(REV_PHENOTYPE_MAP.keys()))))
raise PedigreeError("Individual %s has wrong formated phenotype" % sample_id)
ind_info['individual_id'] = sample_id
ind_info['display_name'] = sample.get('sample_name', sample['sample_id'])
ind_info['sex'] = sex
ind_info['phenotype'] = phenotype
ind_info['father'] = sample.get('father')
ind_info['mother'] = sample.get('mother')
ind_info['confirmed_parent'] = sample.get('confirmed_parent')
ind_info['confirmed_sex'] = sample.get('confirmed_sex')
ind_info['predicted_ancestry'] = sample.get('predicted_ancestry')
bam_file = sample.get('bam_path')
if bam_file:
ind_info['bam_file'] = bam_file
mt_bam = sample.get('mt_bam')
if mt_bam:
ind_info['mt_bam'] = mt_bam
analysis_type = sample.get('analysis_type')
if analysis_type:
ind_info['analysis_type'] = analysis_type
ind_info['capture_kits'] = ([sample.get('capture_kit')]
if 'capture_kit' in sample else [])
# Path to downloadable vcf2cytosure file
vcf2cytosure = sample.get('vcf2cytosure')
if vcf2cytosure:
ind_info['vcf2cytosure'] = vcf2cytosure
# Cancer specific values
tumor_type = sample.get('tumor_type')
if tumor_type:
ind_info['tumor_type'] = tumor_type
tumor_mutational_burden = sample.get('tmb')
if tumor_mutational_burden:
ind_info['tmb'] = tumor_mutational_burden
msi = sample.get('msi')
if msi:
ind_info['msi'] = msi
tumor_purity = sample.get('tumor_purity')
if tumor_purity:
ind_info['tumor_purity'] = tumor_purity
return ind_info | python | {
"resource": ""
} |
q273240 | parse_individuals | test | def parse_individuals(samples):
"""Parse the individual information
Reformat sample information to proper individuals
Args:
samples(list(dict))
Returns:
individuals(list(dict))
"""
individuals = []
if len(samples) == 0:
raise PedigreeError("No samples could be found")
ind_ids = set()
for sample_info in samples:
parsed_ind = parse_individual(sample_info)
individuals.append(parsed_ind)
ind_ids.add(parsed_ind['individual_id'])
# Check if relations are correct
for parsed_ind in individuals:
father = parsed_ind['father']
if (father and father != '0'):
if father not in ind_ids:
raise PedigreeError('father %s does not exist in family' % father)
mother = parsed_ind['mother']
if (mother and mother != '0'):
if mother not in ind_ids:
raise PedigreeError('mother %s does not exist in family' % mother)
return individuals | python | {
"resource": ""
} |
q273241 | parse_case | test | def parse_case(config):
"""Parse case information from config or PED files.
Args:
config (dict): case config with detailed information
Returns:
dict: parsed case data
"""
if 'owner' not in config:
raise ConfigError("A case has to have a owner")
if 'family' not in config:
raise ConfigError("A case has to have a 'family'")
individuals = parse_individuals(config['samples'])
case_data = {
'owner': config['owner'],
'collaborators': [config['owner']],
'case_id': config['family'],
'display_name': config.get('family_name', config['family']),
'genome_build': config.get('human_genome_build'),
'rank_model_version': config.get('rank_model_version'),
'rank_score_threshold': config.get('rank_score_threshold', 0),
'analysis_date': config['analysis_date'],
'individuals': individuals,
'vcf_files': {
'vcf_snv': config.get('vcf_snv'),
'vcf_sv': config.get('vcf_sv'),
'vcf_str': config.get('vcf_str'),
'vcf_cancer': config.get('vcf_cancer'),
'vcf_snv_research': config.get('vcf_snv_research'),
'vcf_sv_research': config.get('vcf_sv_research'),
'vcf_cancer_research': config.get('vcf_cancer_research'),
},
'default_panels': config.get('default_gene_panels', []),
'gene_panels': config.get('gene_panels', []),
'assignee': config.get('assignee'),
'peddy_ped': config.get('peddy_ped'),
'peddy_sex': config.get('peddy_sex'),
'peddy_check': config.get('peddy_check'),
'delivery_report': config.get('delivery_report'),
'multiqc': config.get('multiqc'),
'track': config.get('track', 'rare'),
}
# add the pedigree figure, this is a xml file which is dumped in the db
if 'madeline' in config:
mad_path = Path(config['madeline'])
if not mad_path.exists():
raise ValueError("madeline path not found: {}".format(mad_path))
with mad_path.open('r') as in_handle:
case_data['madeline_info'] = in_handle.read()
if (case_data['vcf_files']['vcf_cancer'] or case_data['vcf_files']['vcf_cancer_research']):
case_data['track'] = 'cancer'
return case_data | python | {
"resource": ""
} |
q273242 | parse_ped | test | def parse_ped(ped_stream, family_type='ped'):
"""Parse out minimal family information from a PED file.
Args:
ped_stream(iterable(str))
family_type(str): Format of the pedigree information
Returns:
family_id(str), samples(list[dict])
"""
pedigree = FamilyParser(ped_stream, family_type=family_type)
if len(pedigree.families) != 1:
raise PedigreeError("Only one case per ped file is allowed")
family_id = list(pedigree.families.keys())[0]
family = pedigree.families[family_id]
samples = [{
'sample_id': ind_id,
'father': individual.father,
'mother': individual.mother,
# Convert sex to human readable
'sex': SEX_MAP[individual.sex],
'phenotype': PHENOTYPE_MAP[int(individual.phenotype)],
} for ind_id, individual in family.individuals.items()]
return family_id, samples | python | {
"resource": ""
} |
q273243 | build_evaluation | test | def build_evaluation(variant_specific, variant_id, user_id, user_name,
institute_id, case_id, classification, criteria):
"""Build a evaluation object ready to be inserted to database
Args:
variant_specific(str): md5 string for the specific variant
variant_id(str): md5 string for the common variant
user_id(str)
user_name(str)
institute_id(str)
case_id(str)
classification(str): The ACMG classification
criteria(list(dict)): A list of dictionaries with ACMG criterias
Returns:
evaluation_obj(dict): Correctly formatted evaluation object
"""
criteria = criteria or []
evaluation_obj = dict(
variant_specific = variant_specific,
variant_id = variant_id,
institute_id = institute_id,
case_id = case_id,
classification = classification,
user_id = user_id,
user_name = user_name,
created_at = datetime.datetime.now(),
)
criteria_objs = []
for info in criteria:
criteria_obj = {}
# This allways has to exist
# We might want to check if the term is valid here...
criteria_obj['term'] = info['term']
if 'comment' in info:
criteria_obj['comment'] = info['comment']
if 'links' in info:
criteria_obj['links'] = info['links']
criteria_objs.append(criteria_obj)
evaluation_obj['criteria'] = criteria_objs
return evaluation_obj | python | {
"resource": ""
} |
q273244 | mt_report | test | def mt_report(context, case_id, test, outpath=None):
"""Export all mitochondrial variants for each sample of a case
and write them to an excel file
Args:
adapter(MongoAdapter)
case_id(str)
test(bool): True if the function is called for testing purposes
outpath(str): path to output file
Returns:
written_files(int): number of written or simulated files
"""
LOG.info('exporting mitochondrial variants for case "{}"'.format(case_id))
adapter = context.obj['adapter']
query = {'chrom':'MT'}
case_obj = adapter.case(case_id=case_id)
if not case_obj:
LOG.warning('Could not find a scout case with id "{}". No report was created.'.format(case_id))
context.abort()
samples = case_obj.get('individuals')
mt_variants = list(adapter.variants(case_id=case_id, query=query, nr_of_variants= -1, sort_key='position'))
if not mt_variants:
LOG.warning('There are no MT variants associated to case {} in database!'.format(case_id))
context.abort()
today = datetime.datetime.now().strftime('%Y-%m-%d')
# set up outfolder
if not outpath:
outpath = str(os.getcwd())
# get document lines for each of the cases's individuals
# Write excel document for each sample in case
written_files = 0
for sample in samples:
sample_id = sample['individual_id']
sample_lines = export_mt_variants(variants=mt_variants, sample_id=sample_id)
# set up document name
document_name = '.'.join([case_obj['display_name'], sample_id, today]) + '.xlsx'
workbook = Workbook(os.path.join(outpath,document_name))
Report_Sheet = workbook.add_worksheet()
if test and sample_lines and workbook:
written_files +=1
continue
# Write the column header
row = 0
for col,field in enumerate(MT_EXPORT_HEADER):
Report_Sheet.write(row,col,field)
# Write variant lines, after header (start at line 1)
for row, line in enumerate(sample_lines,1): # each line becomes a row in the document
for col, field in enumerate(line): # each field in line becomes a cell
Report_Sheet.write(row,col,field)
workbook.close()
if os.path.exists(os.path.join(outpath,document_name)):
written_files += 1
if test:
LOG.info("Number of excel files that can be written to folder {0}: {1}".format(outpath, written_files))
else:
LOG.info("Number of excel files written to folder {0}: {1}".format(outpath, written_files))
return written_files | python | {
"resource": ""
} |
q273245 | is_pathogenic | test | def is_pathogenic(pvs, ps_terms, pm_terms, pp_terms):
"""Check if the criterias for Pathogenic is fullfilled
The following are descriptions of Pathogenic clasification from ACMG paper:
Pathogenic
(i) 1 Very strong (PVS1) AND
(a) ≥1 Strong (PS1–PS4) OR
(b) ≥2 Moderate (PM1–PM6) OR
(c) 1 Moderate (PM1–PM6) and 1 supporting (PP1–PP5) OR
(d) ≥2 Supporting (PP1–PP5)
(ii) ≥2 Strong (PS1–PS4) OR
(iii) 1 Strong (PS1–PS4) AND
(a)≥3 Moderate (PM1–PM6) OR
(b)2 Moderate (PM1–PM6) AND ≥2 Supporting (PP1–PP5) OR
(c)1 Moderate (PM1–PM6) AND ≥4 supporting (PP1–PP5)
Args:
pvs(bool): Pathogenic Very Strong
ps_terms(list(str)): Pathogenic Strong terms
pm_terms(list(str)): Pathogenic Moderate terms
pp_terms(list(str)): Pathogenic Supporting terms
Returns:
bool: if classification indicates Pathogenic level
"""
if pvs:
# Pathogenic (i)(a):
if ps_terms:
return True
if pm_terms:
# Pathogenic (i)(c):
if pp_terms:
return True
# Pathogenic (i)(b):
if len(pm_terms) >= 2:
return True
# Pathogenic (i)(d):
if len(pp_terms) >= 2:
return True
if ps_terms:
# Pathogenic (ii):
if len(ps_terms) >= 2:
return True
# Pathogenic (iii)(a):
if pm_terms:
if len(pm_terms) >= 3:
return True
elif len(pm_terms) >= 2:
if len(pp_terms) >= 2:
return True
elif len(pp_terms) >= 4:
return True
return False | python | {
"resource": ""
} |
q273246 | is_likely_pathogenic | test | def is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms):
"""Check if the criterias for Likely Pathogenic is fullfilled
The following are descriptions of Likely Pathogenic clasification from ACMG paper:
Likely pathogenic
(i) 1 Very strong (PVS1) AND 1 moderate (PM1– PM6) OR
(ii) 1 Strong (PS1–PS4) AND 1–2 moderate (PM1–PM6) OR
(iii) 1 Strong (PS1–PS4) AND ≥2 supporting (PP1–PP5) OR
(iv) ≥3 Moderate (PM1–PM6) OR
(v) 2 Moderate (PM1–PM6) AND ≥2 supporting (PP1–PP5) OR
(vi) 1 Moderate (PM1–PM6) AND ≥4 supportin (PP1–PP5)
Args:
pvs(bool): Pathogenic Very Strong
ps_terms(list(str)): Pathogenic Strong terms
pm_terms(list(str)): Pathogenic Moderate terms
pp_terms(list(str)): Pathogenic Supporting terms
Returns:
bool: if classification indicates Likely Pathogenic level
"""
if pvs:
# Likely Pathogenic (i):
if pm_terms:
return True
if ps_terms:
# Likely Pathogenic (ii):
if pm_terms:
return True
# Likely Pathogenic (iii):
if len(pp_terms) >= 2:
return True
if pm_terms:
# Likely Pathogenic (iv):
if len(pm_terms) >= 3:
return True
# Likely Pathogenic (v):
elif len(pm_terms) >= 2:
if len(pp_terms) >= 2:
return True
# Likely Pathogenic (vi):
elif len(pp_terms) >= 4:
return True
return False | python | {
"resource": ""
} |
q273247 | is_likely_benign | test | def is_likely_benign(bs_terms, bp_terms):
"""Check if criterias for Likely Benign are fullfilled
The following are descriptions of Likely Benign clasification from ACMG paper:
Likely Benign
(i) 1 Strong (BS1–BS4) and 1 supporting (BP1– BP7) OR
(ii) ≥2 Supporting (BP1–BP7)
Args:
bs_terms(list(str)): Terms that indicate strong evidence for benign variant
bp_terms(list(str)): Terms that indicate supporting evidence for benign variant
Returns:
bool: if classification indicates Benign level
"""
if bs_terms:
# Likely Benign (i)
if bp_terms:
return True
# Likely Benign (ii)
if len(bp_terms) >= 2:
return True
return False | python | {
"resource": ""
} |
q273248 | get_acmg | test | def get_acmg(acmg_terms):
"""Use the algorithm described in ACMG paper to get a ACMG calssification
Args:
acmg_terms(set(str)): A collection of prediction terms
Returns:
prediction(int):
0 - Uncertain Significanse
1 - Benign
2 - Likely Benign
3 - Likely Pathogenic
4 - Pathogenic
"""
prediction = 'uncertain_significance'
# This variable indicates if Pathogenecity Very Strong exists
pvs = False
# Collection of terms with Pathogenecity Strong
ps_terms = []
# Collection of terms with Pathogenecity moderate
pm_terms = []
# Collection of terms with Pathogenecity supporting
pp_terms = []
# This variable indicates if Benign impact stand-alone exists
ba = False
# Collection of terms with Benign evidence Strong
bs_terms = []
# Collection of terms with supporting Benign evidence
bp_terms = []
for term in acmg_terms:
if term.startswith('PVS'):
pvs = True
elif term.startswith('PS'):
ps_terms.append(term)
elif term.startswith('PM'):
pm_terms.append(term)
elif term.startswith('PP'):
pp_terms.append(term)
elif term.startswith('BA'):
ba = True
elif term.startswith('BS'):
bs_terms.append(term)
elif term.startswith('BP'):
bp_terms.append(term)
# We need to start by checking for Pathogenecity
pathogenic = is_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
likely_pathogenic = is_likely_pathogenic(pvs, ps_terms, pm_terms, pp_terms)
benign = is_benign(ba, bs_terms)
likely_benign = is_likely_benign(bs_terms, bp_terms)
if (pathogenic or likely_pathogenic):
if (benign or likely_benign):
prediction = 'uncertain_significance'
elif pathogenic:
prediction = 'pathogenic'
else:
prediction = 'likely_pathogenic'
else:
if benign:
prediction = 'benign'
if likely_benign:
prediction = 'likely_benign'
return prediction | python | {
"resource": ""
} |
q273249 | VariantHandler.add_gene_info | test | def add_gene_info(self, variant_obj, gene_panels=None):
"""Add extra information about genes from gene panels
Args:
variant_obj(dict): A variant from the database
gene_panels(list(dict)): List of panels from database
"""
gene_panels = gene_panels or []
# Add a variable that checks if there are any refseq transcripts
variant_obj['has_refseq'] = False
# We need to check if there are any additional information in the gene panels
# extra_info will hold information from gene panels
# Collect all extra info from the panels in a dictionary with hgnc_id as keys
extra_info = {}
for panel_obj in gene_panels:
for gene_info in panel_obj['genes']:
hgnc_id = gene_info['hgnc_id']
if hgnc_id not in extra_info:
extra_info[hgnc_id] = []
extra_info[hgnc_id].append(gene_info)
# Loop over the genes in the variant object to add information
# from hgnc_genes and panel genes to the variant object
for variant_gene in variant_obj.get('genes', []):
hgnc_id = variant_gene['hgnc_id']
# Get the hgnc_gene
hgnc_gene = self.hgnc_gene(hgnc_id)
if not hgnc_gene:
continue
# Create a dictionary with transcripts information
# Use ensembl transcript id as keys
transcripts_dict = {}
# Add transcript information from the hgnc gene
for transcript in hgnc_gene.get('transcripts', []):
tx_id = transcript['ensembl_transcript_id']
transcripts_dict[tx_id] = transcript
# Add the transcripts to the gene object
hgnc_gene['transcripts_dict'] = transcripts_dict
if hgnc_gene.get('incomplete_penetrance'):
variant_gene['omim_penetrance'] = True
############# PANEL SPECIFIC INFORMATION #############
# Panels can have extra information about genes and transcripts
panel_info = extra_info.get(hgnc_id, [])
# Manually annotated disease associated transcripts
disease_associated = set()
# We need to strip the version to compare against others
disease_associated_no_version = set()
manual_penetrance = False
mosaicism = False
manual_inheritance = set()
# We need to loop since there can be information from multiple panels
for gene_info in panel_info:
# Check if there are manually annotated disease transcripts
for tx in gene_info.get('disease_associated_transcripts', []):
# We remove the version of transcript at this stage
stripped = re.sub(r'\.[0-9]', '', tx)
disease_associated_no_version.add(stripped)
disease_associated.add(tx)
if gene_info.get('reduced_penetrance'):
manual_penetrance = True
if gene_info.get('mosaicism'):
mosaicism = True
manual_inheritance.update(gene_info.get('inheritance_models', []))
variant_gene['disease_associated_transcripts'] = list(disease_associated)
variant_gene['manual_penetrance'] = manual_penetrance
variant_gene['mosaicism'] = mosaicism
variant_gene['manual_inheritance'] = list(manual_inheritance)
# Now add the information from hgnc and panels
# to the transcripts on the variant
# First loop over the variants transcripts
for transcript in variant_gene.get('transcripts', []):
tx_id = transcript['transcript_id']
if not tx_id in transcripts_dict:
continue
# This is the common information about the transcript
hgnc_transcript = transcripts_dict[tx_id]
# Check in the common information if it is a primary transcript
if hgnc_transcript.get('is_primary'):
transcript['is_primary'] = True
# If the transcript has a ref seq identifier we add that
# to the variants transcript
if not hgnc_transcript.get('refseq_id'):
continue
refseq_id = hgnc_transcript['refseq_id']
transcript['refseq_id'] = refseq_id
variant_obj['has_refseq'] = True
# Check if the refseq id are disease associated
if refseq_id in disease_associated_no_version:
transcript['is_disease_associated'] = True
# Since a ensemble transcript can have multiple refseq identifiers we add all of
# those
transcript['refseq_identifiers'] = hgnc_transcript.get('refseq_identifiers',[])
variant_gene['common'] = hgnc_gene
# Add the associated disease terms
variant_gene['disease_terms'] = self.disease_terms(hgnc_id)
return variant_obj | python | {
"resource": ""
} |
q273250 | VariantHandler.variants | test | def variants(self, case_id, query=None, variant_ids=None, category='snv',
nr_of_variants=10, skip=0, sort_key='variant_rank'):
"""Returns variants specified in question for a specific case.
If skip not equal to 0 skip the first n variants.
Arguments:
case_id(str): A string that represents the case
query(dict): A dictionary with querys for the database
variant_ids(List[str])
category(str): 'sv', 'str', 'snv' or 'cancer'
nr_of_variants(int): if -1 return all variants
skip(int): How many variants to skip
sort_key: ['variant_rank', 'rank_score', 'position']
Yields:
result(Iterable[Variant])
"""
LOG.debug("Fetching variants from {0}".format(case_id))
if variant_ids:
nr_of_variants = len(variant_ids)
elif nr_of_variants == -1:
nr_of_variants = 0 # This will return all variants
else:
nr_of_variants = skip + nr_of_variants
mongo_query = self.build_query(case_id, query=query,
variant_ids=variant_ids,
category=category)
sorting = []
if sort_key == 'variant_rank':
sorting = [('variant_rank', pymongo.ASCENDING)]
if sort_key == 'rank_score':
sorting = [('rank_score', pymongo.DESCENDING)]
if sort_key == 'position':
sorting = [('position', pymongo.ASCENDING)]
result = self.variant_collection.find(
mongo_query,
skip=skip,
limit=nr_of_variants
).sort(sorting)
return result | python | {
"resource": ""
} |
q273251 | VariantHandler.sanger_variants | test | def sanger_variants(self, institute_id=None, case_id=None):
"""Return all variants with sanger information
Args:
institute_id(str)
case_id(str)
Returns:
res(pymongo.Cursor): A Cursor with all variants with sanger activity
"""
query = {'validation': {'$exists': True}}
if institute_id:
query['institute_id'] = institute_id
if case_id:
query['case_id'] = case_id
return self.variant_collection.find(query) | python | {
"resource": ""
} |
q273252 | VariantHandler.variant | test | def variant(self, document_id, gene_panels=None, case_id=None):
"""Returns the specified variant.
Arguments:
document_id : A md5 key that represents the variant or "variant_id"
gene_panels(List[GenePanel])
case_id (str): case id (will search with "variant_id")
Returns:
variant_object(Variant): A odm variant object
"""
query = {}
if case_id:
# search for a variant in a case
query['case_id'] = case_id
query['variant_id'] = document_id
else:
# search with a unique id
query['_id'] = document_id
variant_obj = self.variant_collection.find_one(query)
if variant_obj:
variant_obj = self.add_gene_info(variant_obj, gene_panels)
if variant_obj['chromosome'] in ['X', 'Y']:
## TODO add the build here
variant_obj['is_par'] = is_par(variant_obj['chromosome'],
variant_obj['position'])
return variant_obj | python | {
"resource": ""
} |
q273253 | VariantHandler.gene_variants | test | def gene_variants(self, query=None,
category='snv', variant_type=['clinical'],
nr_of_variants=50, skip=0):
"""Return all variants seen in a given gene.
If skip not equal to 0 skip the first n variants.
Arguments:
query(dict): A dictionary with querys for the database, including
variant_type: 'clinical', 'research'
category(str): 'sv', 'str', 'snv' or 'cancer'
nr_of_variants(int): if -1 return all variants
skip(int): How many variants to skip
"""
mongo_variant_query = self.build_variant_query(query=query,
category=category, variant_type=variant_type)
sorting = [('rank_score', pymongo.DESCENDING)]
if nr_of_variants == -1:
nr_of_variants = 0 # This will return all variants
else:
nr_of_variants = skip + nr_of_variants
result = self.variant_collection.find(
mongo_variant_query
).sort(sorting).skip(skip).limit(nr_of_variants)
return result | python | {
"resource": ""
} |
q273254 | VariantHandler.verified | test | def verified(self, institute_id):
"""Return all verified variants for a given institute
Args:
institute_id(str): institute id
Returns:
res(list): a list with validated variants
"""
query = {
'verb' : 'validate',
'institute' : institute_id,
}
res = []
validate_events = self.event_collection.find(query)
for validated in list(validate_events):
case_id = validated['case']
var_obj = self.variant(case_id=case_id, document_id=validated['variant_id'])
case_obj = self.case(case_id=case_id)
if not case_obj or not var_obj:
continue # Take into account that stuff might have been removed from database
var_obj['case_obj'] = {
'display_name' : case_obj['display_name'],
'individuals' : case_obj['individuals']
}
res.append(var_obj)
return res | python | {
"resource": ""
} |
q273255 | VariantHandler.get_causatives | test | def get_causatives(self, institute_id, case_id=None):
"""Return all causative variants for an institute
Args:
institute_id(str)
case_id(str)
Yields:
str: variant document id
"""
causatives = []
if case_id:
case_obj = self.case_collection.find_one(
{"_id": case_id}
)
causatives = [causative for causative in case_obj['causatives']]
elif institute_id:
query = self.case_collection.aggregate([
{'$match': {'collaborators': institute_id, 'causatives': {'$exists': True}}},
{'$unwind': '$causatives'},
{'$group': {'_id': '$causatives'}}
])
causatives = [item['_id'] for item in query]
return causatives | python | {
"resource": ""
} |
q273256 | VariantHandler.check_causatives | test | def check_causatives(self, case_obj=None, institute_obj=None):
"""Check if there are any variants that are previously marked causative
Loop through all variants that are marked 'causative' for an
institute and check if any of the variants are present in the
current case.
Args:
case_obj (dict): A Case object
institute_obj (dict): check across the whole institute
Returns:
causatives(iterable(Variant))
"""
institute_id = case_obj['owner'] if case_obj else institute_obj['_id']
institute_causative_variant_ids = self.get_causatives(institute_id)
if len(institute_causative_variant_ids) == 0:
return []
if case_obj:
# exclude variants that are marked causative in "case_obj"
case_causative_ids = set(case_obj.get('causatives', []))
institute_causative_variant_ids = list(
set(institute_causative_variant_ids).difference(case_causative_ids)
)
# convert from unique ids to general "variant_id"
query = self.variant_collection.find(
{'_id': {'$in': institute_causative_variant_ids}},
{'variant_id': 1}
)
positional_variant_ids = [item['variant_id'] for item in query]
filters = {'variant_id': {'$in': positional_variant_ids}}
if case_obj:
filters['case_id'] = case_obj['_id']
else:
filters['institute'] = institute_obj['_id']
return self.variant_collection.find(filters) | python | {
"resource": ""
} |
q273257 | VariantHandler.other_causatives | test | def other_causatives(self, case_obj, variant_obj):
"""Find the same variant in other cases marked causative.
Args:
case_obj(dict)
variant_obj(dict)
Yields:
other_variant(dict)
"""
# variant id without "*_[variant_type]"
variant_id = variant_obj['display_name'].rsplit('_', 1)[0]
institute_causatives = self.get_causatives(variant_obj['institute'])
for causative_id in institute_causatives:
other_variant = self.variant(causative_id)
if not other_variant:
continue
not_same_case = other_variant['case_id'] != case_obj['_id']
same_variant = other_variant['display_name'].startswith(variant_id)
if not_same_case and same_variant:
yield other_variant | python | {
"resource": ""
} |
q273258 | VariantHandler.delete_variants | test | def delete_variants(self, case_id, variant_type, category=None):
"""Delete variants of one type for a case
This is used when a case is reanalyzed
Args:
case_id(str): The case id
variant_type(str): 'research' or 'clinical'
category(str): 'snv', 'sv' or 'cancer'
"""
category = category or ''
LOG.info("Deleting old {0} {1} variants for case {2}".format(
variant_type, category, case_id))
query = {'case_id': case_id, 'variant_type': variant_type}
if category:
query['category'] = category
result = self.variant_collection.delete_many(query)
LOG.info("{0} variants deleted".format(result.deleted_count)) | python | {
"resource": ""
} |
q273259 | VariantHandler.overlapping | test | def overlapping(self, variant_obj):
"""Return overlapping variants.
Look at the genes that a variant overlaps to.
Then return all variants that overlap these genes.
If variant_obj is sv it will return the overlapping snvs and oposite
There is a problem when SVs are huge since there are to many overlapping variants.
Args:
variant_obj(dict)
Returns:
variants(iterable(dict))
"""
#This is the category of the variants that we want to collect
category = 'snv' if variant_obj['category'] == 'sv' else 'sv'
query = {
'$and': [
{'case_id': variant_obj['case_id']},
{'category': category},
{'hgnc_ids' : { '$in' : variant_obj['hgnc_ids']}}
]
}
sort_key = [('rank_score', pymongo.DESCENDING)]
# We collect the 30 most severe overlapping variants
variants = self.variant_collection.find(query).sort(sort_key).limit(30)
return variants | python | {
"resource": ""
} |
q273260 | VariantHandler.evaluated_variants | test | def evaluated_variants(self, case_id):
"""Returns variants that has been evaluated
Return all variants, snvs/indels and svs from case case_id
which have a entry for 'acmg_classification', 'manual_rank', 'dismiss_variant'
or if they are commented.
Args:
case_id(str)
Returns:
variants(iterable(Variant))
"""
# Get all variants that have been evaluated in some way for a case
query = {
'$and': [
{'case_id': case_id},
{
'$or': [
{'acmg_classification': {'$exists': True}},
{'manual_rank': {'$exists': True}},
{'dismiss_variant': {'$exists': True}},
]
}
],
}
# Collect the result in a dictionary
variants = {}
for var in self.variant_collection.find(query):
variants[var['variant_id']] = self.add_gene_info(var)
# Collect all variant comments from the case
event_query = {
'$and': [
{'case': case_id},
{'category': 'variant'},
{'verb': 'comment'},
]
}
# Get all variantids for commented variants
comment_variants = {event['variant_id'] for event in self.event_collection.find(event_query)}
# Get the variant objects for commented variants, if they exist
for var_id in comment_variants:
# Skip if we already added the variant
if var_id in variants:
continue
# Get the variant with variant_id (not _id!)
variant_obj = self.variant(var_id, case_id=case_id)
# There could be cases with comments that refers to non existing variants
# if a case has been reanalysed
if not variant_obj:
continue
variant_obj['is_commented'] = True
variants[var_id] = variant_obj
# Return a list with the variant objects
return variants.values() | python | {
"resource": ""
} |
q273261 | VariantHandler.get_region_vcf | test | def get_region_vcf(self, case_obj, chrom=None, start=None, end=None,
gene_obj=None, variant_type='clinical', category='snv',
rank_threshold=None):
"""Produce a reduced vcf with variants from the specified coordinates
This is used for the alignment viewer.
Args:
case_obj(dict): A case from the scout database
variant_type(str): 'clinical' or 'research'. Default: 'clinical'
category(str): 'snv' or 'sv'. Default: 'snv'
rank_threshold(float): Only load variants above this score. Default: 5
chrom(str): Load variants from a certain chromosome
start(int): Specify the start position
end(int): Specify the end position
gene_obj(dict): A gene object from the database
Returns:
file_name(str): Path to the temporary file
"""
rank_threshold = rank_threshold or -100
variant_file = None
if variant_type == 'clinical':
if category == 'snv':
variant_file = case_obj['vcf_files'].get('vcf_snv')
elif category == 'sv':
variant_file = case_obj['vcf_files'].get('vcf_sv')
elif category == 'str':
variant_file = case_obj['vcf_files'].get('vcf_str')
elif variant_type == 'research':
if category == 'snv':
variant_file = case_obj['vcf_files'].get('vcf_snv_research')
elif category == 'sv':
variant_file = case_obj['vcf_files'].get('vcf_sv_research')
if not variant_file:
raise SyntaxError("Vcf file does not seem to exist")
vcf_obj = VCF(variant_file)
region = ""
if gene_obj:
chrom = gene_obj['chromosome']
start = gene_obj['start']
end = gene_obj['end']
if chrom:
if (start and end):
region = "{0}:{1}-{2}".format(chrom, start, end)
else:
region = "{0}".format(chrom)
else:
rank_threshold = rank_threshold or 5
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp:
file_name = str(pathlib.Path(temp.name))
for header_line in vcf_obj.raw_header.split('\n'):
if len(header_line) > 3:
temp.write(header_line + '\n')
for variant in vcf_obj(region):
temp.write(str(variant))
return file_name | python | {
"resource": ""
} |
q273262 | VariantHandler.sample_variants | test | def sample_variants(self, variants, sample_name, category = 'snv'):
"""Given a list of variants get variant objects found in a specific patient
Args:
variants(list): a list of variant ids
sample_name(str): a sample display name
category(str): 'snv', 'sv' ..
Returns:
result(iterable(Variant))
"""
LOG.info('Retrieving variants for subject : {0}'.format(sample_name))
has_allele = re.compile('1|2') # a non wild-type allele is called at least once in this sample
query = {
'$and': [
{'_id' : { '$in' : variants}},
{'category' : category},
{'samples': {
'$elemMatch': { 'display_name' : sample_name, 'genotype_call': { '$regex' : has_allele } }
}}
]
}
result = self.variant_collection.find(query)
return result | python | {
"resource": ""
} |
q273263 | get_connection | test | def get_connection(host='localhost', port=27017, username=None, password=None,
uri=None, mongodb=None, authdb=None, timeout=20, *args, **kwargs):
"""Get a client to the mongo database
host(str): Host of database
port(int): Port of database
username(str)
password(str)
uri(str)
authdb (str): database to use for authentication
timeout(int): How long should the client try to connect
"""
authdb = authdb or mongodb
if uri is None:
if username and password:
uri = ("mongodb://{}:{}@{}:{}/{}"
.format(quote_plus(username), quote_plus(password), host, port, authdb))
log_uri = ("mongodb://{}:****@{}:{}/{}"
.format(quote_plus(username), host, port, authdb))
else:
log_uri = uri = "mongodb://%s:%s" % (host, port)
LOG.info("Try to connect to %s" % log_uri)
try:
client = MongoClient(uri, serverSelectionTimeoutMS=timeout)
except ServerSelectionTimeoutError as err:
LOG.warning("Connection Refused")
raise ConnectionFailure
LOG.info("Connection established")
return client | python | {
"resource": ""
} |
q273264 | get_objects_from_form | test | def get_objects_from_form(variant_ids, form_fields, object_type):
"""Extract the objects to be saved in the clinvar database collection.
object_type param specifies if these objects are variant or casedata objects
Args:
variant_ids(list): list of database variant ids
form_fields(dict): it's the submission form dictionary. Keys have the same names as CLINVAR_HEADER and CASEDATA_HEADER
object_type(str): either 'variant' or 'case_data'
Returns:
submission_objects(list): list of submission objects of either type 'variant' or 'casedata'
"""
submission_fields = []
if object_type == 'variant':
submission_fields = CLINVAR_HEADER
else: #collect casedata objects
submission_fields = CASEDATA_HEADER
# A list of objects (variants of casedata info) to be saved into clinvar database collection
submission_objects = []
# Loop over the form fields and collect the data:
for variant_id in variant_ids: # loop over the variants
subm_obj = {} # A new submission object for each
# Don't included casedata for a variant unless specified by user
if object_type == 'casedata' and 'casedata_'+variant_id not in form_fields:
continue
subm_obj['csv_type'] = object_type
subm_obj['case_id'] = form_fields.get('case_id')
subm_obj['category'] = form_fields.get('category@'+variant_id)
for key, values in submission_fields.items(): # loop over the form info fields
field_value = form_fields.get(key+'@'+variant_id)
if field_value and not field_value == '-':
if key == 'ref_seq': # split this field into
refseq_raw = field_value.split('|')
subm_obj['ref_seq'] = refseq_raw[0]
subm_obj['hgvs'] = refseq_raw[1]
else:
subm_obj[key] = field_value
# Create a unique ID for the database
# For casedata : = caseID_sampleID_variantID
# For variants : ID = caseID_variantID
if object_type == 'casedata':
subm_obj['_id'] = str(subm_obj['case_id']) + '_' + variant_id + '_' + str(subm_obj['individual_id'])
else:
subm_obj['_id'] = str(subm_obj['case_id']) + '_' + variant_id
submission_objects.append(subm_obj)
return submission_objects | python | {
"resource": ""
} |
q273265 | clinvar_submission_header | test | def clinvar_submission_header(submission_objs, csv_type):
"""Determine which fields to include in csv header by checking a list of submission objects
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
csv_type(str) : 'variant_data' or 'case_data'
Returns:
custom_header(dict): A dictionary with the fields required in the csv header. Keys and values are specified in CLINVAR_HEADER and CASEDATA_HEADER
"""
complete_header = {} # header containing all available fields
custom_header = {} # header reflecting the real data included in the submission objects
if csv_type == 'variant_data' :
complete_header = CLINVAR_HEADER
else:
complete_header = CASEDATA_HEADER
for header_key, header_value in complete_header.items(): # loop over the info fields provided in each submission object
for clinvar_obj in submission_objs: # loop over the submission objects
for key, value in clinvar_obj.items(): # loop over the keys and values of the clinvar objects
if not header_key in custom_header and header_key == key: # add to custom header if missing and specified in submission object
custom_header[header_key] = header_value
return custom_header | python | {
"resource": ""
} |
q273266 | clinvar_submission_lines | test | def clinvar_submission_lines(submission_objs, submission_header):
"""Create the lines to include in a Clinvar submission csv file from a list of submission objects and a custom document header
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
submission_header(dict) : as in constants CLINVAR_HEADER and CASEDATA_HEADER, but with required fields only
Returns:
submission_lines(list) a list of strings, each string represents a line of the clinvar csv file to be doenloaded
"""
submission_lines = []
for submission_obj in submission_objs: # Loop over the submission objects. Each of these is a line
csv_line = []
for header_key, header_value in submission_header.items(): # header_keys are the same keys as in submission_objs
if header_key in submission_obj: # The field is filled in for this variant/casedata object
csv_line.append('"'+submission_obj.get(header_key)+'"')
else: # Empty field for this this variant/casedata object
csv_line.append('""')
submission_lines.append(','.join(csv_line))
return submission_lines | python | {
"resource": ""
} |
q273267 | load_transcripts | test | def load_transcripts(adapter, transcripts_lines=None, build='37', ensembl_genes=None):
"""Load all the transcripts
Transcript information is from ensembl.
Args:
adapter(MongoAdapter)
transcripts_lines(iterable): iterable with ensembl transcript lines
build(str)
ensembl_genes(dict): Map from ensembl_id -> HgncGene
Returns:
transcript_objs(list): A list with all transcript objects
"""
# Fetch all genes with ensemblid as keys
ensembl_genes = ensembl_genes or adapter.ensembl_genes(build)
if transcripts_lines is None:
transcripts_lines = fetch_ensembl_transcripts(build=build)
# Map with all transcripts enstid -> parsed transcript
transcripts_dict = parse_transcripts(transcripts_lines)
for ens_tx_id in list(transcripts_dict):
parsed_tx = transcripts_dict[ens_tx_id]
# Get the ens gene id
ens_gene_id = parsed_tx['ensembl_gene_id']
# Fetch the internal gene object to find out the correct hgnc id
gene_obj = ensembl_genes.get(ens_gene_id)
# If the gene is non existing in scout we skip the transcript
if not gene_obj:
transcripts_dict.pop(ens_tx_id)
LOG.debug("Gene %s does not exist in build %s", ens_gene_id, build)
continue
# Add the correct hgnc id
parsed_tx['hgnc_id'] = gene_obj['hgnc_id']
# Primary transcript information is collected from HGNC
parsed_tx['primary_transcripts'] = set(gene_obj.get('primary_transcripts', []))
ref_seq_transcripts = 0
nr_primary_transcripts = 0
nr_transcripts = len(transcripts_dict)
transcript_objs = []
with progressbar(transcripts_dict.values(), label="Building transcripts", length=nr_transcripts) as bar:
for tx_data in bar:
#################### Get the correct refseq identifier ####################
# We need to decide one refseq identifier for each transcript, if there are any to
# choose from. The algorithm is as follows:
# If there is ONE mrna this is choosen
# If there are several mrna the one that is in 'primary_transcripts' is choosen
# Else one is choosen at random
# The same follows for the other categories where nc_rna has precedense over mrna_predicted
# We will store all refseq identifiers in a "refseq_identifiers" list as well
tx_data['is_primary'] = False
primary_transcripts = tx_data['primary_transcripts']
refseq_identifier = None
refseq_identifiers = []
for category in TRANSCRIPT_CATEGORIES:
identifiers = tx_data[category]
if not identifiers:
continue
for refseq_id in identifiers:
# Add all refseq identifiers to refseq_identifiers
refseq_identifiers.append(refseq_id)
ref_seq_transcripts += 1
if refseq_id in primary_transcripts:
refseq_identifier = refseq_id
tx_data['is_primary'] = True
nr_primary_transcripts += 1
if not refseq_identifier:
refseq_identifier = refseq_id
if refseq_identifier:
tx_data['refseq_id'] = refseq_identifier
if refseq_identifiers:
tx_data['refseq_identifiers'] = refseq_identifiers
#################### #################### ####################
# Build the transcript object
tx_obj = build_transcript(tx_data, build)
transcript_objs.append(tx_obj)
# Load all transcripts
LOG.info("Loading transcripts...")
if len(transcript_objs) > 0:
adapter.load_transcript_bulk(transcript_objs)
LOG.info('Number of transcripts in build %s: %s', build, nr_transcripts)
LOG.info('Number of transcripts with refseq identifier: %s', ref_seq_transcripts)
LOG.info('Number of primary transcripts: %s', nr_primary_transcripts)
return transcript_objs | python | {
"resource": ""
} |
q273268 | panel | test | def panel(context, path, date, display_name, version, panel_type, panel_id, institute, omim, api_key, panel_app):
"""Add a gene panel to the database."""
adapter = context.obj['adapter']
institute = institute or 'cust000'
if omim:
api_key = api_key or context.obj.get('omim_api_key')
if not api_key:
LOG.warning("Please provide a omim api key to load the omim gene panel")
context.abort()
#Check if OMIM-AUTO exists
if adapter.gene_panel(panel_id='OMIM-AUTO'):
LOG.warning("OMIM-AUTO already exists in database")
LOG.info("To create a new version use scout update omim")
return
# Here we know that there is no panel loaded
try:
adapter.load_omim_panel(api_key, institute=institute)
except Exception as err:
LOG.error(err)
context.abort()
if panel_app:
# try:
load_panel_app(adapter, panel_id, institute=institute)
# except Exception as err:
# LOG.warning(err)
# context.abort()
if (omim or panel_app):
return
if path is None:
LOG.info("Please provide a panel")
return
try:
load_panel(path, adapter, date, display_name, version, panel_type, panel_id, institute)
except Exception as err:
LOG.warning(err)
context.abort() | python | {
"resource": ""
} |
q273269 | build_exon | test | def build_exon(exon_info, build='37'):
"""Build a Exon object object
Args:
exon_info(dict): Exon information
Returns:
exon_obj(Exon)
"exon_id": str, # str(chrom-start-end)
"chrom": str,
"start": int,
"end": int,
"transcript": str, # ENST ID
"hgnc_id": int, # HGNC_id
"rank": int, # Order of exon in transcript
"build": str, # Genome build
"""
try:
chrom = exon_info['chrom']
except KeyError:
raise KeyError("Exons has to have a chromosome")
try:
start = int(exon_info['start'])
except KeyError:
raise KeyError("Exon has to have a start")
except TypeError:
raise TypeError("Exon start has to be integer")
try:
end = int(exon_info['end'])
except KeyError:
raise KeyError("Exon has to have a end")
except TypeError:
raise TypeError("Exon end has to be integer")
try:
rank = int(exon_info['rank'])
except KeyError:
raise KeyError("Exon has to have a rank")
except TypeError:
raise TypeError("Exon rank has to be integer")
try:
exon_id = exon_info['exon_id']
except KeyError:
raise KeyError("Exons has to have a id")
try:
transcript = exon_info['transcript']
except KeyError:
raise KeyError("Exons has to have a transcript")
try:
hgnc_id = int(exon_info['hgnc_id'])
except KeyError:
raise KeyError("Exons has to have a hgnc_id")
except TypeError:
raise TypeError("hgnc_id has to be integer")
exon_obj = Exon(
exon_id = exon_id,
chrom = chrom,
start = start,
end = end,
rank = rank,
transcript = transcript,
hgnc_id = hgnc_id,
build = build,
)
return exon_obj | python | {
"resource": ""
} |
q273270 | panel | test | def panel(context, panel_id, version):
"""Delete a version of a gene panel or all versions of a gene panel"""
LOG.info("Running scout delete panel")
adapter = context.obj['adapter']
panel_objs = adapter.gene_panels(panel_id=panel_id, version=version)
if panel_objs.count() == 0:
LOG.info("No panels found")
for panel_obj in panel_objs:
adapter.delete_panel(panel_obj) | python | {
"resource": ""
} |
q273271 | index | test | def index(context):
"""Delete all indexes in the database"""
LOG.info("Running scout delete index")
adapter = context.obj['adapter']
for collection in adapter.db.collection_names():
adapter.db[collection].drop_indexes()
LOG.info("All indexes deleted") | python | {
"resource": ""
} |
q273272 | user | test | def user(context, mail):
"""Delete a user from the database"""
LOG.info("Running scout delete user")
adapter = context.obj['adapter']
user_obj = adapter.user(mail)
if not user_obj:
LOG.warning("User {0} could not be found in database".format(mail))
else:
adapter.delete_user(mail) | python | {
"resource": ""
} |
q273273 | genes | test | def genes(context, build):
"""Delete all genes in the database"""
LOG.info("Running scout delete genes")
adapter = context.obj['adapter']
if build:
LOG.info("Dropping genes collection for build: %s", build)
else:
LOG.info("Dropping genes collection")
adapter.drop_genes() | python | {
"resource": ""
} |
q273274 | exons | test | def exons(context, build):
"""Delete all exons in the database"""
LOG.info("Running scout delete exons")
adapter = context.obj['adapter']
adapter.drop_exons(build) | python | {
"resource": ""
} |
q273275 | case | test | def case(context, institute, case_id, display_name):
"""Delete a case and it's variants from the database"""
adapter = context.obj['adapter']
if not (case_id or display_name):
click.echo("Please specify what case to delete")
context.abort()
if display_name:
if not institute:
click.echo("Please specify the owner of the case that should be "
"deleted with flag '-i/--institute'.")
context.abort()
case_id = "{0}-{1}".format(institute, display_name)
LOG.info("Running deleting case {0}".format(case_id))
case = adapter.delete_case(
case_id=case_id,
institute_id=institute,
display_name=display_name
)
if case.deleted_count == 1:
adapter.delete_variants(case_id=case_id, variant_type='clinical')
adapter.delete_variants(case_id=case_id, variant_type='research')
else:
LOG.warning("Case does not exist in database")
context.abort() | python | {
"resource": ""
} |
q273276 | individuals | test | def individuals(context, institute, causatives, case_id):
"""Show all individuals from all cases in the database"""
LOG.info("Running scout view individuals")
adapter = context.obj['adapter']
individuals = []
if case_id:
case = adapter.case(case_id=case_id)
if case:
cases = [case]
else:
LOG.info("Could not find case %s", case_id)
return
else:
cases = [case_obj for case_obj in
adapter.cases(
collaborator=institute,
has_causatives=causatives)]
if len(cases) == 0:
LOG.info("Could not find cases that match criteria")
return
individuals = (ind_obj for case_obj in cases for ind_obj in case_obj['individuals'])
click.echo("#case_id\tind_id\tdisplay_name\tsex\tphenotype\tmother\tfather")
for case in cases:
for ind_obj in case['individuals']:
ind_info = [
case['_id'], ind_obj['individual_id'],
ind_obj['display_name'], SEX_MAP[int(ind_obj['sex'])],
PHENOTYPE_MAP[ind_obj['phenotype']], ind_obj['mother'],
ind_obj['father']
]
click.echo('\t'.join(ind_info)) | python | {
"resource": ""
} |
q273277 | parse_matches | test | def parse_matches(patient_id, match_objs):
"""Parse a list of matchmaker matches objects and returns
a readable list of matches to display in matchmaker matches view.
Args:
patient_id(str): id of a mme patient
match_objs(list): list of match objs returned by MME server for the patient
# match_objs looks like this:
[
{
'node' : { id : node_id , label: node_label},
'patients' : [
{ 'patient': {patient1_data} },
{ 'patient': {patient2_data} },
..
]
},
..
]
Returns:
parsed_matches(list): a list of parsed match objects
"""
LOG.info('Parsing MatchMaker matches for patient {}'.format(patient_id))
parsed_matches = []
for match_obj in match_objs:
# convert match date from millisecond to readable date
milliseconds_date = match_obj['created']['$date']
mdate = datetime.datetime.fromtimestamp(milliseconds_date/1000.0)
match_type = 'external'
matching_patients = []
parsed_match = {
'match_oid' : match_obj['_id']['$oid'],# save match object ID
'match_date' : mdate
}
# if patient was used as query patient:
if match_obj['data']['patient']['id'] == patient_id:
match_results = match_obj['results'] # List of matching patients
for node_result in match_results:
if match_obj['match_type'] == 'internal':
match_type = 'internal'
for patient in node_result['patients']:
match_patient = {
'patient_id' : patient['patient']['id'],
'score' : patient['score'],
'patient' : patient['patient'],
'node' : node_result['node']
}
matching_patients.append(match_patient)
else: # else if patient was returned as a match result for another patient
m_patient = match_obj['data']['patient']
contact_institution = m_patient['contact'].get('institution')
if contact_institution and 'Scout software user' in contact_institution:
match_type = 'internal'
# loop over match results to capture score for matching
score = None
for res in match_obj['results']:
for patient in res['patients']:
LOG.info('Looping in else, patient:{}'.format(patient['patient']['id']))
if patient['patient']['id'] == patient_id:
score = patient['score']
match_patient = {
'patient_id' : m_patient['id'],
'score' : score,
'patient' : m_patient,
'node' : res['node']
}
matching_patients.append(match_patient)
parsed_match['match_type'] = match_type
parsed_match['patients'] = matching_patients
parsed_matches.append(parsed_match)
# sort results by descending score
parsed_matches = sorted(parsed_matches, key=lambda k: k['match_date'], reverse=True)
return parsed_matches | python | {
"resource": ""
} |
q273278 | cases | test | def cases(context, institute, display_name, case_id, nr_variants, variants_treshold):
"""Display cases from the database"""
LOG.info("Running scout view institutes")
adapter = context.obj['adapter']
models = []
if case_id:
case_obj = adapter.case(case_id=case_id)
if case_obj:
models.append(case_obj)
else:
models = adapter.cases(collaborator=institute, name_query=display_name)
models = [case_obj for case_obj in models]
if not models:
LOG.info("No cases could be found")
return
header = ['case_id', 'display_name', 'institute']
if variants_treshold:
LOG.info("Only show cases with more than %s variants", variants_treshold)
nr_variants = True
if nr_variants:
LOG.info("Displaying number of variants for each case")
header.append('clinical')
header.append('research')
click.echo("#"+'\t'.join(header))
for model in models:
output_str = "{:<12}\t{:<12}\t{:<12}"
output_values = [model['_id'],model['display_name'],model['owner']]
if nr_variants:
output_str += "\t{:<12}\t{:<12}"
nr_clinical = 0
nr_research = 0
variants = adapter.variant_collection.find({'case_id':model['_id']})
i = 0
for i, var in enumerate(variants, 1):
if var['variant_type'] == 'clinical':
nr_clinical += 1
else:
nr_research += 1
output_values.extend([nr_clinical, nr_research])
if variants_treshold and i < variants_treshold:
LOG.debug("Case %s had to few variants, skipping", model['_id'])
continue
click.echo(output_str.format(*output_values)) | python | {
"resource": ""
} |
q273279 | load_user | test | def load_user(user_email):
"""Returns the currently active user as an object."""
user_obj = store.user(user_email)
user_inst = LoginUser(user_obj) if user_obj else None
return user_inst | python | {
"resource": ""
} |
q273280 | login | test | def login():
"""Login a user if they have access."""
# store potential next param URL in the session
if 'next' in request.args:
session['next_url'] = request.args['next']
if current_app.config.get('GOOGLE'):
callback_url = url_for('.authorized', _external=True)
return google.authorize(callback=callback_url)
user_email = request.args.get('email')
user_obj = store.user(user_email)
if user_obj is None:
flash("email not whitelisted: {}".format(user_email), 'warning')
return redirect(url_for('public.index'))
return perform_login(user_obj) | python | {
"resource": ""
} |
q273281 | build_institute | test | def build_institute(internal_id, display_name, sanger_recipients=None,
coverage_cutoff=None, frequency_cutoff=None):
"""Build a institute object
Args:
internal_id(str)
display_name(str)
sanger_recipients(list(str)): List with email addresses
Returns:
institute_obj(scout.models.Institute)
"""
LOG.info("Building institute %s with display name %s", internal_id,display_name)
institute_obj = Institute(
internal_id=internal_id,
display_name=display_name,
sanger_recipients=sanger_recipients,
coverage_cutoff = coverage_cutoff,
frequency_cutoff = frequency_cutoff
)
for key in list(institute_obj):
if institute_obj[key] is None:
institute_obj.pop(key)
return institute_obj | python | {
"resource": ""
} |
q273282 | EventHandler.delete_event | test | def delete_event(self, event_id):
"""Delete a event
Arguments:
event_id (str): The database key for the event
"""
LOG.info("Deleting event{0}".format(event_id))
if not isinstance(event_id, ObjectId):
event_id = ObjectId(event_id)
self.event_collection.delete_one({'_id': event_id})
LOG.debug("Event {0} deleted".format(event_id)) | python | {
"resource": ""
} |
q273283 | EventHandler.create_event | test | def create_event(self, institute, case, user, link, category, verb,
subject, level='specific', variant=None, content=None,
panel=None):
"""Create a Event with the parameters given.
Arguments:
institute (dict): A institute
case (dict): A case
user (dict): A User
link (str): The url to be used in the event
category (str): case or variant
verb (str): What type of event
subject (str): What is operated on
level (str): 'specific' or 'global'. Default is 'specific'
variant (dict): A variant
content (str): The content of the comment
Returns:
event(dict): The inserted event
"""
variant = variant or {}
event = dict(
institute=institute['_id'],
case=case['_id'],
user_id=user['_id'],
user_name=user['name'],
link=link,
category=category,
verb=verb,
subject=subject,
level=level,
variant_id=variant.get('variant_id'),
content=content,
panel=panel,
created_at=datetime.now(),
updated_at=datetime.now(),
)
LOG.debug("Saving Event")
self.event_collection.insert_one(event)
LOG.debug("Event Saved")
return event | python | {
"resource": ""
} |
q273284 | EventHandler.events | test | def events(self, institute, case=None, variant_id=None, level=None,
comments=False, panel=None):
"""Fetch events from the database.
Args:
institute (dict): A institute
case (dict): A case
variant_id (str, optional): global variant id
level (str, optional): restrict comments to 'specific' or 'global'
comments (bool, optional): restrict events to include only comments
panel (str): A panel name
Returns:
pymongo.Cursor: Query result
"""
query = {}
if variant_id:
if comments:
# If it's comment-related event collect global and variant-specific comment events
LOG.debug("Fetching all comments for institute {0} case {1} variant {2}".format(
institute['_id'], case['_id'], variant_id))
query = {
'$or': [
{
'category' : 'variant',
'variant_id' : variant_id,
'verb' : 'comment',
'level' : 'global'
},
{
'category' : 'variant',
'variant_id' : variant_id,
'institute' : institute['_id'],
'case' : case['_id'],
'verb' : 'comment',
'level' : 'specific'
}
]
}
else: # Collect other variant-specific events which are not comments
query['institute'] = institute['_id']
query['category'] = 'variant'
query['variant_id'] = variant_id
query['case'] = case['_id']
else:
query['institute'] = institute['_id']
if panel:
query['panel'] = panel
# If no variant_id or panel we know that it is a case level comment
else:
query['category'] = 'case'
if case:
query['case'] = case['_id']
if comments:
query['verb'] = 'comment'
return self.event_collection.find(query).sort('created_at', pymongo.DESCENDING) | python | {
"resource": ""
} |
q273285 | EventHandler.user_events | test | def user_events(self, user_obj=None):
"""Fetch all events by a specific user."""
query = dict(user_id=user_obj['_id']) if user_obj else dict()
return self.event_collection.find(query) | python | {
"resource": ""
} |
q273286 | EventHandler.add_phenotype | test | def add_phenotype(self, institute, case, user, link, hpo_term=None,
omim_term=None, is_group=False):
"""Add a new phenotype term to a case
Create a phenotype term and event with the given information
Args:
institute (Institute): A Institute object
case (Case): Case object
user (User): A User object
link (str): The url to be used in the event
hpo_term (str): A hpo id
omim_term (str): A omim id
is_group (bool): is phenotype term a group?
"""
hpo_results = []
try:
if hpo_term:
hpo_results = [hpo_term]
elif omim_term:
LOG.debug("Fetching info for mim term {0}".format(omim_term))
disease_obj = self.disease_term(omim_term)
if disease_obj:
for hpo_term in disease_obj.get('hpo_terms', []):
hpo_results.append(hpo_term)
else:
raise ValueError('Must supply either hpo or omim term')
except ValueError as e:
## TODO Should ve raise a more proper exception here?
raise e
existing_terms = set(term['phenotype_id'] for term in
case.get('phenotype_terms', []))
updated_case = case
phenotype_terms = []
for hpo_term in hpo_results:
LOG.debug("Fetching info for hpo term {0}".format(hpo_term))
hpo_obj = self.hpo_term(hpo_term)
if hpo_obj is None:
raise ValueError("Hpo term: %s does not exist in database" % hpo_term)
phenotype_id = hpo_obj['_id']
description = hpo_obj['description']
if phenotype_id not in existing_terms:
phenotype_term = dict(phenotype_id=phenotype_id, feature=description)
phenotype_terms.append(phenotype_term)
LOG.info("Creating event for adding phenotype term for case"
" {0}".format(case['display_name']))
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='add_phenotype',
subject=case['display_name'],
content=phenotype_id
)
if is_group:
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$addToSet': {
'phenotype_terms': {'$each': phenotype_terms},
'phenotype_groups': {'$each': phenotype_terms},
},
},
return_document=pymongo.ReturnDocument.AFTER
)
else:
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$addToSet': {
'phenotype_terms': {'$each': phenotype_terms},
},
},
return_document=pymongo.ReturnDocument.AFTER
)
LOG.debug("Case updated")
return updated_case | python | {
"resource": ""
} |
q273287 | EventHandler.remove_phenotype | test | def remove_phenotype(self, institute, case, user, link, phenotype_id,
is_group=False):
"""Remove an existing phenotype from a case
Args:
institute (dict): A Institute object
case (dict): Case object
user (dict): A User object
link (dict): The url to be used in the event
phenotype_id (str): A phenotype id
Returns:
updated_case(dict)
"""
LOG.info("Removing HPO term from case {0}".format(case['display_name']))
if is_group:
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$pull': {
'phenotype_terms': {'phenotype_id': phenotype_id},
'phenotype_groups': {'phenotype_id': phenotype_id},
},
},
return_document=pymongo.ReturnDocument.AFTER
)
else:
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$pull': {
'phenotype_terms': {'phenotype_id': phenotype_id},
},
},
return_document=pymongo.ReturnDocument.AFTER
)
LOG.info("Creating event for removing phenotype term {0}" \
" from case {1}".format(phenotype_id, case['display_name']))
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='remove_phenotype',
subject=case['display_name']
)
LOG.debug("Case updated")
return updated_case | python | {
"resource": ""
} |
q273288 | EventHandler.comment | test | def comment(self, institute, case, user, link, variant=None,
content="", comment_level="specific"):
"""Add a comment to a variant or a case.
This function will create an Event to log that a user have commented on
a variant. If a variant id is given it will be a variant comment.
A variant comment can be 'global' or specific. The global comments will
be shown for this variation in all cases while the specific comments
will only be shown for a specific case.
Arguments:
institute (dict): A Institute object
case (dict): A Case object
user (dict): A User object
link (str): The url to be used in the event
variant (dict): A variant object
content (str): The content of the comment
comment_level (str): Any one of 'specific' or 'global'.
Default is 'specific'
Return:
comment(dict): The comment event that was inserted
"""
if not comment_level in COMMENT_LEVELS:
raise SyntaxError("Comment levels can only be in {}".format(','.join(COMMENT_LEVELS)))
if variant:
LOG.info("Creating event for a {0} comment on variant {1}".format(
comment_level, variant['display_name']))
comment = self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='variant',
verb='comment',
level=comment_level,
variant=variant,
subject=variant['display_name'],
content=content
)
else:
LOG.info("Creating event for a comment on case {0}".format(
case['display_name']))
comment = self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='comment',
subject=case['display_name'],
content=content
)
return comment | python | {
"resource": ""
} |
q273289 | parse_genotypes | test | def parse_genotypes(variant, individuals, individual_positions):
"""Parse the genotype calls for a variant
Args:
variant(cyvcf2.Variant)
individuals: List[dict]
individual_positions(dict)
Returns:
genotypes(list(dict)): A list of genotypes
"""
genotypes = []
for ind in individuals:
pos = individual_positions[ind['individual_id']]
genotypes.append(parse_genotype(variant, ind, pos))
return genotypes | python | {
"resource": ""
} |
q273290 | check_coordinates | test | def check_coordinates(chromosome, pos, coordinates):
"""Check if the variant is in the interval given by the coordinates
Args:
chromosome(str): Variant chromosome
pos(int): Variant position
coordinates(dict): Dictionary with the region of interest
"""
chrom_match = CHR_PATTERN.match(chromosome)
chrom = chrom_match.group(2)
if chrom != coordinates['chrom']:
return False
if (pos >= coordinates['start'] and pos <= coordinates['end']):
return True
return False | python | {
"resource": ""
} |
q273291 | hpo_terms | test | def hpo_terms():
"""Render search box and view for HPO phenotype terms"""
if request.method == 'GET':
data = controllers.hpo_terms(store= store, limit=100)
return data
else: # POST. user is searching for a specific term or phenotype
search_term = request.form.get('hpo_term')
limit = request.form.get('limit')
data = controllers.hpo_terms(store= store, query = search_term, limit=limit)
return dict(data, query=search_term, limit=limit) | python | {
"resource": ""
} |
q273292 | transcripts | test | def transcripts(context, build):
"""Export all transcripts to .bed like format"""
LOG.info("Running scout export transcripts")
adapter = context.obj['adapter']
header = ["#Chrom\tStart\tEnd\tTranscript\tRefSeq\tHgncID"]
for line in header:
click.echo(line)
transcript_string = ("{0}\t{1}\t{2}\t{3}\t{4}\t{5}")
for tx_obj in export_transcripts(adapter):
click.echo(transcript_string.format(
tx_obj['chrom'],
tx_obj['start'],
tx_obj['end'],
tx_obj['ensembl_transcript_id'],
tx_obj.get('refseq_id',''),
tx_obj['hgnc_id'],
)
) | python | {
"resource": ""
} |
q273293 | exons | test | def exons(context, build):
"""Load exons into the scout database"""
adapter = context.obj['adapter']
start = datetime.now()
# Test if there are any exons loaded
nr_exons = adapter.exons(build=build).count()
if nr_exons:
LOG.warning("Dropping all exons ")
adapter.drop_exons(build=build)
LOG.info("Exons dropped")
# Load the exons
ensembl_exons = fetch_ensembl_exons(build=build)
load_exons(adapter, ensembl_exons, build)
adapter.update_indexes()
LOG.info("Time to load exons: {0}".format(datetime.now() - start)) | python | {
"resource": ""
} |
q273294 | region | test | def region(context, hgnc_id, case_id, chromosome, start, end):
"""Load all variants in a region to a existing case"""
adapter = context.obj['adapter']
load_region(
adapter=adapter, case_id=case_id, hgnc_id=hgnc_id, chrom=chromosome, start=start, end=end
) | python | {
"resource": ""
} |
q273295 | EventManager.all_month_events | test | def all_month_events(self, year, month, category=None, tag=None,
loc=False, cncl=False):
"""
Returns all events that have an occurrence within the given
month & year.
"""
kwargs = self._get_kwargs(category, tag)
ym_first, ym_last = self.get_first_and_last(year, month)
pref = []
if loc:
pref.append("location")
if cncl:
pref.append("cancellations")
# for yearly repeat, we need to check the start and end date months
# b/c yearly events should occur every year in the same month
r = Q(repeat="YEARLY")
dstart_mo = Q(start_date__month=month)
dend_mo = Q(end_date__month=month)
dstart_yr = Q(start_date__year=year)
dend_yr = Q(end_date__year=year)
return self.model.objects.filter(
# only events that are still repeating
r & (dstart_mo | dend_mo) | # yearly repeat
(~Q(repeat="NEVER")) | # all other repeats
((dstart_yr | dend_yr) & (dstart_mo | dend_yr)), # non-repeating
Q(end_repeat=None) | Q(end_repeat__gte=ym_first),
start_date__lte=ym_last # no events that haven't started yet
).filter(**kwargs).prefetch_related(*pref).order_by('start_date').distinct() | python | {
"resource": ""
} |
q273296 | EventManager.live | test | def live(self, now):
"""
Returns a queryset of events that will occur again after 'now'.
Used to help generate a list of upcoming events.
"""
return self.model.objects.filter(
Q(end_repeat=None) | Q(end_repeat__gte=now) |
Q(start_date__gte=now) | Q(end_date__gte=now)
).exclude( # exclude single day events that won't occur again
start_date__lt=now, end_date__lt=now,
repeat="NEVER", end_repeat=None,
).prefetch_related('cancellations') | python | {
"resource": ""
} |
q273297 | parse_reqs | test | def parse_reqs(req_path='./requirements.txt'):
"""Recursively parse requirements from nested pip files."""
install_requires = []
with io.open(os.path.join(here, 'requirements.txt'), encoding='utf-8') as handle:
# remove comments and empty lines
lines = (line.strip() for line in handle
if line.strip() and not line.startswith('#'))
for line in lines:
# check for nested requirements files
if line.startswith('-r'):
# recursively call this function
install_requires += parse_reqs(req_path=line[3:])
else:
# add the line as a new requirement
install_requires.append(line)
return install_requires | python | {
"resource": ""
} |
q273298 | existing_gene | test | def existing_gene(store, panel_obj, hgnc_id):
"""Check if gene is already added to a panel."""
existing_genes = {gene['hgnc_id']: gene for gene in panel_obj['genes']}
return existing_genes.get(hgnc_id) | python | {
"resource": ""
} |
q273299 | update_panel | test | def update_panel(store, panel_name, csv_lines, option):
"""Update an existing gene panel with genes.
Args:
store(scout.adapter.MongoAdapter)
panel_name(str)
csv_lines(iterable(str)): Stream with genes
option(str): 'add' or 'replace'
Returns:
panel_obj(dict)
"""
new_genes= []
panel_obj = store.gene_panel(panel_name)
if panel_obj is None:
return None
try:
new_genes = parse_genes(csv_lines) # a list of gene dictionaries containing gene info
except SyntaxError as error:
flash(error.args[0], 'danger')
return None
# if existing genes are to be replaced by those in csv_lines
if option == 'replace':
# all existing genes should be deleted
for gene in panel_obj['genes']:
#create extra key to use in pending actions:
gene['hgnc_symbol'] = gene['symbol']
store.add_pending(panel_obj, gene, action='delete', info=None)
for new_gene in new_genes:
if not new_gene['hgnc_id']:
flash("gene missing hgnc id: {}".format(new_gene['hgnc_symbol']),'danger')
continue
gene_obj = store.hgnc_gene(new_gene['hgnc_id'])
if gene_obj is None:
flash("gene not found: {} - {}".format(new_gene['hgnc_id'], new_gene['hgnc_symbol']),'danger')
continue
if new_gene['hgnc_symbol'] and gene_obj['hgnc_symbol'] != new_gene['hgnc_symbol']:
flash("symbol mis-match: {0} | {1}".format(
gene_obj['hgnc_symbol'], new_gene['hgnc_symbol']), 'warning')
info_data = {
'disease_associated_transcripts': new_gene['transcripts'],
'reduced_penetrance': new_gene['reduced_penetrance'],
'mosaicism': new_gene['mosaicism'],
'inheritance_models': new_gene['inheritance_models'],
'database_entry_version': new_gene['database_entry_version'],
}
if option == 'replace': # there will be no existing genes for sure, because we're replacing them all
action = 'add'
else: # add option. Add if genes is not existing. otherwise edit it
existing_genes = {gene['hgnc_id'] for gene in panel_obj['genes']}
action = 'edit' if gene_obj['hgnc_id'] in existing_genes else 'add'
store.add_pending(panel_obj, gene_obj, action=action, info=info_data)
return panel_obj | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.