_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q273300 | new_panel | test | def new_panel(store, institute_id, panel_name, display_name, csv_lines):
"""Create a new gene panel.
Args:
store(scout.adapter.MongoAdapter)
institute_id(str)
panel_name(str)
display_name(str)
csv_lines(iterable(str)): Stream with genes
Returns:
panel_id: the ID of the new panel document created or None
"""
institute_obj = store.institute(institute_id)
if institute_obj is None:
flash("{}: institute not found".format(institute_id))
return None
panel_obj = store.gene_panel(panel_name)
if panel_obj:
flash("panel already exists: {} - {}".format(panel_obj['panel_name'],
panel_obj['display_name']))
return None
log.debug("parse genes from CSV input")
try:
new_genes = parse_genes(csv_lines)
except SyntaxError as error:
flash(error.args[0], 'danger')
return None
log.debug("build new gene panel")
panel_id = None
try:
panel_data = build_panel(dict(
panel_name=panel_name,
institute=institute_obj['_id'],
version=1.0,
date=dt.datetime.now(),
display_name=display_name,
genes=new_genes,
), store)
panel_id= store.add_gene_panel(panel_data)
except Exception as err:
log.error('An error occurred while adding the gene panel {}'.format(err))
return panel_id | python | {
"resource": ""
} |
q273301 | panel_export | test | def panel_export(store, panel_obj):
"""Preprocess a panel of genes."""
panel_obj['institute'] = store.institute(panel_obj['institute'])
full_name = "{}({})".format(panel_obj['display_name'], panel_obj['version'])
panel_obj['name_and_version'] = full_name
return dict(panel=panel_obj) | python | {
"resource": ""
} |
q273302 | archive_info | test | def archive_info(database: Database, archive_case: dict) -> dict:
"""Get information about a case from archive."""
data = {
'collaborators': archive_case['collaborators'],
'synopsis': archive_case.get('synopsis'),
'assignees': [],
'suspects': [],
'causatives': [],
'phenotype_terms': [],
'phenotype_groups': [],
}
if archive_case.get('assignee'):
archive_user = database.user.find_one({'_id': archive_case['assignee']})
data['assignee'].append(archive_user['email'])
for key in ['suspects', 'causatives']:
for variant_id in archive_case.get(key, []):
archive_variant = database.variant.find_one({'_id': variant_id})
data[key].append({
'chromosome': archive_variant['chromosome'],
'position': archive_variant['position'],
'reference': archive_variant['reference'],
'alternative': archive_variant['alternative'],
'variant_type': archive_variant['variant_type'],
})
for key in ['phenotype_terms', 'phenotype_groups']:
for archive_term in archive_case.get(key, []):
data[key].append({
'phenotype_id': archive_term['phenotype_id'],
'feature': archive_term['feature'],
})
return data | python | {
"resource": ""
} |
q273303 | migrate_case | test | def migrate_case(adapter: MongoAdapter, scout_case: dict, archive_data: dict):
"""Migrate case information from archive."""
# update collaborators
collaborators = list(set(scout_case['collaborators'] + archive_data['collaborators']))
if collaborators != scout_case['collaborators']:
LOG.info(f"set collaborators: {', '.join(collaborators)}")
scout_case['collaborators'] = collaborators
# update assignees
if len(scout_case.get('assignees', [])) == 0:
scout_user = adapter.user(archive_data['assignee'])
if scout_user:
scout_case['assignees'] = [archive_data['assignee']]
else:
LOG.warning(f"{archive_data['assignee']}: unable to find assigned user")
# add/update suspected/causative variants
for key in ['suspects', 'causatives']:
scout_case[key] = scout_case.get(key, [])
for archive_variant in archive_data[key]:
variant_id = get_variantid(archive_variant, scout_case['_id'])
scout_variant = adapter.variant(variant_id)
if scout_variant:
if scout_variant['_id'] in scout_case[key]:
LOG.info(f"{scout_variant['_id']}: variant already in {key}")
else:
LOG.info(f"{scout_variant['_id']}: add to {key}")
scout_variant[key].append(scout_variant['_id'])
else:
LOG.warning(f"{scout_variant['_id']}: unable to find variant ({key})")
scout_variant[key].append(variant_id)
if not scout_case.get('synopsis'):
# update synopsis
scout_case['synopsis'] = archive_data['synopsis']
scout_case['is_migrated'] = True
adapter.case_collection.find_one_and_replace(
{'_id': scout_case['_id']},
scout_case,
)
# add/update phenotype groups/terms
scout_institute = adapter.institute(scout_case['owner'])
scout_user = adapter.user('mans.magnusson@scilifelab.se')
for key in ['phenotype_terms', 'phenotype_groups']:
for archive_term in archive_data[key]:
adapter.add_phenotype(
institute=scout_institute,
case=scout_case,
user=scout_user,
link=f"/{scout_case['owner']}/{scout_case['display_name']}",
hpo_term=archive_term['phenotype_id'],
is_group=key == 'phenotype_groups',
) | python | {
"resource": ""
} |
q273304 | migrate | test | def migrate(uri: str, archive_uri: str, case_id: str, dry: bool, force: bool):
"""Update all information that was manually annotated from a old instance."""
scout_client = MongoClient(uri)
scout_database = scout_client[uri.rsplit('/', 1)[-1]]
scout_adapter = MongoAdapter(database=scout_database)
scout_case = scout_adapter.case(case_id)
if not force and scout_case.get('is_migrated'):
print("case already migrated")
return
archive_client = MongoClient(archive_uri)
archive_database = archive_client[archive_uri.rsplit('/', 1)[-1]]
archive_case = archive_database.case.find_one({
'owner': scout_case['owner'],
'display_name': scout_case['display_name']
})
archive_data = archive_info(archive_database, archive_case)
if dry:
print(ruamel.yaml.safe_dump(archive_data))
else:
#migrate_case(scout_adapter, scout_case, archive_data)
pass | python | {
"resource": ""
} |
q273305 | research | test | def research(context, case_id, institute, force):
"""Upload research variants to cases
If a case is specified, all variants found for that case will be
uploaded.
If no cases are specified then all cases that have 'research_requested'
will have there research variants uploaded
"""
LOG.info("Running scout load research")
adapter = context.obj['adapter']
if case_id:
if not institute:
# There was an old way to create case ids so we need a special case to handle this
# Assume institute-case combo
splitted_case = case_id.split('-')
# Check if first part is institute, then we know it is the old format
if len(splitted_case) > 1:
institute_obj = adapter.institute(splitted_case[0])
if institute_obj:
institute = institute_obj['_id']
case_id = splitted_case[1]
case_obj = adapter.case(institute_id=institute, case_id=case_id)
if case_obj is None:
LOG.warning("No matching case found")
context.abort()
else:
case_objs = [case_obj]
else:
# Fetch all cases that have requested research
case_objs = adapter.cases(research_requested=True)
default_threshold = 8
files = False
for case_obj in case_objs:
if force or case_obj['research_requested']:
# Test to upload research snvs
if case_obj['vcf_files'].get('vcf_snv_research'):
files = True
adapter.delete_variants(case_id=case_obj['_id'],
variant_type='research',
category='snv')
LOG.info("Load research SNV for: %s", case_obj['_id'])
adapter.load_variants(
case_obj=case_obj,
variant_type='research',
category='snv',
rank_threshold=default_threshold,
)
# Test to upload research svs
if case_obj['vcf_files'].get('vcf_sv_research'):
files = True
adapter.delete_variants(case_id=case_obj['_id'],
variant_type='research',
category='sv')
LOG.info("Load research SV for: %s", case_obj['_id'])
adapter.load_variants(
case_obj=case_obj,
variant_type='research',
category='sv',
rank_threshold=default_threshold,
)
# Test to upload research cancer variants
if case_obj['vcf_files'].get('vcf_cancer_research'):
files = True
adapter.delete_variants(case_id=case_obj['_id'],
variant_type='research',
category='cancer')
LOG.info("Load research cancer for: %s", case_obj['_id'])
adapter.load_variants(
case_obj=case_obj,
variant_type='research',
category='cancer',
rank_threshold=default_threshold,
)
if not files:
LOG.warning("No research files found for case %s", case_id)
context.abort()
case_obj['is_research'] = True
case_obj['research_requested'] = False
adapter.update_case(case_obj)
else:
LOG.warn("research not requested, use '--force'") | python | {
"resource": ""
} |
q273306 | load_hgnc_genes | test | def load_hgnc_genes(adapter, genes = None, ensembl_lines=None, hgnc_lines=None, exac_lines=None, mim2gene_lines=None,
genemap_lines=None, hpo_lines=None, build='37', omim_api_key=''):
"""Load genes into the database
link_genes will collect information from all the different sources and
merge it into a dictionary with hgnc_id as key and gene information as values.
Args:
adapter(scout.adapter.MongoAdapter)
genes(dict): If genes are already parsed
ensembl_lines(iterable(str)): Lines formated with ensembl gene information
hgnc_lines(iterable(str)): Lines with gene information from genenames.org
exac_lines(iterable(str)): Lines with information pLi-scores from ExAC
mim2gene(iterable(str)): Lines with map from omim id to gene symbol
genemap_lines(iterable(str)): Lines with information of omim entries
hpo_lines(iterable(str)): Lines information about map from hpo terms to genes
build(str): What build to use. Defaults to '37'
Returns:
gene_objects(list): A list with all gene_objects that was loaded into database
"""
gene_objects = list()
if not genes:
# Fetch the resources if not provided
if ensembl_lines is None:
ensembl_lines = fetch_ensembl_genes(build=build)
hgnc_lines = hgnc_lines or fetch_hgnc()
exac_lines = exac_lines or fetch_exac_constraint()
if not (mim2gene_lines and genemap_lines):
if not omim_api_key:
raise SyntaxError("Need to provide omim api key")
mim_files = fetch_mim_files(omim_api_key, mim2genes=True, genemap2=True)
mim2gene_lines = mim_files['mim2genes']
genemap_lines = mim_files['genemap2']
if not hpo_lines:
hpo_files = fetch_hpo_files(hpogenes=True)
hpo_lines = hpo_files['hpogenes']
# Link the resources
genes = link_genes(
ensembl_lines=ensembl_lines,
hgnc_lines=hgnc_lines,
exac_lines=exac_lines,
mim2gene_lines=mim2gene_lines,
genemap_lines=genemap_lines,
hpo_lines=hpo_lines
)
non_existing = 0
nr_genes = len(genes)
with progressbar(genes.values(), label="Building genes", length=nr_genes) as bar:
for gene_data in bar:
if not gene_data.get('chromosome'):
LOG.debug("skipping gene: %s. No coordinates found", gene_data.get('hgnc_symbol', '?'))
non_existing += 1
continue
gene_obj = build_hgnc_gene(gene_data, build=build)
gene_objects.append(gene_obj)
LOG.info("Loading genes build %s", build)
adapter.load_hgnc_bulk(gene_objects)
LOG.info("Loading done. %s genes loaded", len(gene_objects))
LOG.info("Nr of genes without coordinates in build %s: %s", build,non_existing)
return gene_objects | python | {
"resource": ""
} |
q273307 | hpo | test | def hpo(context, term, description):
"""Show all hpo terms in the database"""
LOG.info("Running scout view hpo")
adapter = context.obj['adapter']
if term:
term = term.upper()
if not term.startswith('HP:'):
while len(term) < 7:
term = '0' + term
term = 'HP:' + term
LOG.info("Searching for term %s", term)
hpo_terms = adapter.hpo_terms(hpo_term=term)
elif description:
sorted_terms = sorted(adapter.hpo_terms(query=description), key=itemgetter('hpo_number'))
for term in sorted_terms:
term.pop('genes')
print("name: {} | {} | {}".format(term['_id'], term['description'], term['hpo_number']))
# pp(hpo_terms)
context.abort()
else:
hpo_terms = adapter.hpo_terms()
if hpo_terms.count() == 0:
LOG.warning("No matching terms found")
return
click.echo("hpo_id\tdescription\tnr_genes")
for hpo_obj in hpo_terms:
click.echo("{0}\t{1}\t{2}".format(
hpo_obj['hpo_id'],
hpo_obj['description'],
len(hpo_obj.get('genes',[]))
)) | python | {
"resource": ""
} |
q273308 | create_app | test | def create_app(config_file=None, config=None):
"""Flask app factory function."""
app = Flask(__name__)
app.config.from_pyfile('config.py')
app.jinja_env.add_extension('jinja2.ext.do')
if config:
app.config.update(config)
if config_file:
app.config.from_pyfile(config_file)
# If there is a MatchMaker Exchange server
# collect the connected external nodes
app.mme_nodes = mme_nodes(app.config.get('MME_URL'), app.config.get('MME_TOKEN'))
app.config["JSON_SORT_KEYS"] = False
current_log_level = logger.getEffectiveLevel()
coloredlogs.install(level='DEBUG' if app.debug else current_log_level)
configure_extensions(app)
register_blueprints(app)
register_filters(app)
if not (app.debug or app.testing) and app.config.get('MAIL_USERNAME'):
# setup email logging of errors
configure_email_logging(app)
@app.before_request
def check_user():
if not app.config.get('LOGIN_DISABLED') and request.endpoint:
# check if the endpoint requires authentication
static_endpoint = 'static' in request.endpoint or 'report' in request.endpoint
public_endpoint = getattr(app.view_functions[request.endpoint],
'is_public', False)
relevant_endpoint = not (static_endpoint or public_endpoint)
# if endpoint requires auth, check if user is authenticated
if relevant_endpoint and not current_user.is_authenticated:
# combine visited URL (convert byte string query string to unicode!)
next_url = "{}?{}".format(request.path, request.query_string.decode())
login_url = url_for('login.login', next=next_url)
return redirect(login_url)
return app | python | {
"resource": ""
} |
q273309 | configure_extensions | test | def configure_extensions(app):
"""Configure Flask extensions."""
extensions.toolbar.init_app(app)
extensions.bootstrap.init_app(app)
extensions.mongo.init_app(app)
extensions.store.init_app(app)
extensions.login_manager.init_app(app)
extensions.oauth.init_app(app)
extensions.mail.init_app(app)
Markdown(app)
if app.config.get('SQLALCHEMY_DATABASE_URI'):
configure_coverage(app)
if app.config.get('LOQUSDB_SETTINGS'):
# setup LoqusDB
extensions.loqusdb.init_app(app) | python | {
"resource": ""
} |
q273310 | register_blueprints | test | def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.public_bp)
app.register_blueprint(genes.genes_bp)
app.register_blueprint(cases.cases_bp)
app.register_blueprint(login.login_bp)
app.register_blueprint(variants.variants_bp)
app.register_blueprint(panels.panels_bp)
app.register_blueprint(dashboard.dashboard_bp)
app.register_blueprint(api.api_bp)
app.register_blueprint(alignviewers.alignviewers_bp)
app.register_blueprint(phenotypes.hpo_bp)
app.register_blueprint(institutes.overview) | python | {
"resource": ""
} |
q273311 | configure_coverage | test | def configure_coverage(app):
"""Setup coverage related extensions."""
# setup chanjo report
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True if app.debug else False
if chanjo_api:
chanjo_api.init_app(app)
configure_template_filters(app)
# register chanjo report blueprint
app.register_blueprint(report_bp, url_prefix='/reports')
babel = Babel(app)
@babel.localeselector
def get_locale():
"""Determine locale to use for translations."""
accept_languages = current_app.config.get('ACCEPT_LANGUAGES', ['en'])
# first check request args
session_language = request.args.get('lang')
if session_language in accept_languages:
current_app.logger.info("using session language: %s", session_language)
return session_language
# language can be forced in config
user_language = current_app.config.get('REPORT_LANGUAGE')
if user_language:
return user_language
# try to guess the language from the user accept header that
# the browser transmits. We support de/fr/en in this example.
# The best match wins.
return request.accept_languages.best_match(accept_languages) | python | {
"resource": ""
} |
q273312 | aliases | test | def aliases(context, build, symbol):
"""Show all alias symbols and how they map to ids"""
LOG.info("Running scout view aliases")
adapter = context.obj['adapter']
if symbol:
alias_genes = {}
res = adapter.gene_by_alias(symbol, build=build)
for gene_obj in res:
hgnc_id = gene_obj['hgnc_id']
# Collect the true symbol given by hgnc
hgnc_symbol = gene_obj['hgnc_symbol']
# Loop aver all aliases
for alias in gene_obj['aliases']:
true_id = None
# If the alias is the same as hgnc symbol we know the true id
if alias == hgnc_symbol:
true_id = hgnc_id
# If the alias is already in the list we add the id
if alias in alias_genes:
alias_genes[alias]['ids'].add(hgnc_id)
if true_id:
alias_genes[alias]['true'] = hgnc_id
else:
alias_genes[alias] = {
'true': hgnc_id,
'ids': set([hgnc_id])
}
else:
alias_genes = adapter.genes_by_alias(build=build)
if len(alias_genes) == 0:
LOG.info("No gene found for build %s", build)
return
click.echo("#hgnc_symbol\ttrue_id\thgnc_ids")
for alias_symbol in alias_genes:
info = alias_genes[alias_symbol]
# pp(info)
click.echo("{0}\t{1}\t{2}\t".format(
alias_symbol,
(alias_genes[alias_symbol]['true'] or 'None'),
', '.join([str(gene_id) for gene_id in alias_genes[alias_symbol]['ids']])
)
) | python | {
"resource": ""
} |
q273313 | build_panel | test | def build_panel(panel_info, adapter):
"""Build a gene_panel object
Args:
panel_info(dict): A dictionary with panel information
adapter (scout.adapter.MongoAdapter)
Returns:
panel_obj(dict)
gene_panel = dict(
panel_id = str, # required
institute = str, # institute_id, required
version = float, # required
date = datetime, # required
display_name = str, # default is panel_name
genes = list, # list of panel genes, sorted on panel_gene['symbol']
)
"""
panel_name = panel_info.get('panel_id', panel_info.get('panel_name'))
if not panel_name:
raise KeyError("Panel has to have a id")
panel_obj = dict(panel_name = panel_name)
LOG.info("Building panel with name: {0}".format(panel_name))
try:
institute_id = panel_info['institute']
except KeyError as err:
raise KeyError("Panel has to have a institute")
# Check if institute exists in database
if adapter.institute(institute_id) is None:
raise IntegrityError("Institute %s could not be found" % institute_id)
panel_obj['institute'] = panel_info['institute']
panel_obj['version'] = float(panel_info['version'])
try:
panel_obj['date'] = panel_info['date']
except KeyError as err:
raise KeyError("Panel has to have a date")
panel_obj['display_name'] = panel_info.get('display_name', panel_obj['panel_name'])
gene_objs = []
fail = False
for gene_info in panel_info.get('genes', []):
try:
gene_obj = build_gene(gene_info, adapter)
gene_objs.append(gene_obj)
except IntegrityError as err:
LOG.warning(err)
fail=True
if fail:
raise IntegrityError("Some genes did not exist in database. Please see log messages.")
panel_obj['genes'] = gene_objs
return panel_obj | python | {
"resource": ""
} |
q273314 | verified | test | def verified(context, collaborator, test, outpath=None):
"""Export variants which have been verified for an institute
and write them to an excel file.
Args:
collaborator(str): institute id
test(bool): True if the function is called for testing purposes
outpath(str): path to output file
Returns:
written_files(int): number of written or simulated files
"""
written_files = 0
collaborator = collaborator or 'cust000'
LOG.info('Exporting verified variants for cust {}'.format(collaborator))
adapter = context.obj['adapter']
verified_vars = adapter.verified(institute_id=collaborator)
LOG.info('FOUND {} verified variants for institute {}'.format(len(verified_vars), collaborator))
if not verified_vars:
LOG.warning('There are no verified variants for institute {} in database!'.format(collaborator))
return None
document_lines = export_verified_variants(verified_vars)
today = datetime.datetime.now().strftime('%Y-%m-%d')
document_name = '.'.join(['verified_variants', collaborator, today]) + '.xlsx'
# If this was a test and lines are created return success
if test and document_lines:
written_files +=1
LOG.info('Success. Verified variants file contains {} lines'.format(len(document_lines)))
return written_files
# create workbook and new sheet
# set up outfolder
if not outpath:
outpath = str(os.getcwd())
workbook = Workbook(os.path.join(outpath,document_name))
Report_Sheet = workbook.add_worksheet()
# Write the column header
row = 0
for col,field in enumerate(VERIFIED_VARIANTS_HEADER):
Report_Sheet.write(row,col,field)
# Write variant lines, after header (start at line 1)
for row, line in enumerate(document_lines,1): # each line becomes a row in the document
for col, field in enumerate(line): # each field in line becomes a cell
Report_Sheet.write(row,col,field)
workbook.close()
if os.path.exists(os.path.join(outpath,document_name)):
LOG.info('Success. Verified variants file of {} lines was written to disk'. format(len(document_lines)))
written_files += 1
return written_files | python | {
"resource": ""
} |
q273315 | variants | test | def variants(context, collaborator, document_id, case_id, json):
"""Export causatives for a collaborator in .vcf format"""
LOG.info("Running scout export variants")
adapter = context.obj['adapter']
collaborator = collaborator or 'cust000'
variants = export_variants(
adapter,
collaborator,
document_id=document_id,
case_id=case_id
)
if json:
click.echo(dumps([var for var in variants]))
return
vcf_header = VCF_HEADER
#If case_id is given, print more complete vcf entries, with INFO,
#and genotypes
if case_id:
vcf_header[-1] = vcf_header[-1] + "\tFORMAT"
case_obj = adapter.case(case_id=case_id)
for individual in case_obj['individuals']:
vcf_header[-1] = vcf_header[-1] + "\t" + individual['individual_id']
#print header
for line in vcf_header:
click.echo(line)
for variant_obj in variants:
variant_string = get_vcf_entry(variant_obj, case_id=case_id)
click.echo(variant_string) | python | {
"resource": ""
} |
q273316 | get_vcf_entry | test | def get_vcf_entry(variant_obj, case_id=None):
"""
Get vcf entry from variant object
Args:
variant_obj(dict)
Returns:
variant_string(str): string representing variant in vcf format
"""
if variant_obj['category'] == 'snv':
var_type = 'TYPE'
else:
var_type = 'SVTYPE'
info_field = ';'.join(
[
'END='+str(variant_obj['end']),
var_type+'='+variant_obj['sub_category'].upper()
]
)
variant_string = "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}".format(
variant_obj['chromosome'],
variant_obj['position'],
variant_obj['dbsnp_id'],
variant_obj['reference'],
variant_obj['alternative'],
variant_obj['quality'],
';'.join(variant_obj['filters']),
info_field
)
if case_id:
variant_string += "\tGT"
for sample in variant_obj['samples']:
variant_string += "\t" + sample['genotype_call']
return variant_string | python | {
"resource": ""
} |
q273317 | serve | test | def serve(context, config, host, port, debug, livereload):
"""Start the web server."""
pymongo_config = dict(
MONGO_HOST=context.obj['host'],
MONGO_PORT=context.obj['port'],
MONGO_DBNAME=context.obj['mongodb'],
MONGO_USERNAME=context.obj['username'],
MONGO_PASSWORD=context.obj['password'],
)
valid_connection = check_connection(
host=pymongo_config['MONGO_HOST'],
port=pymongo_config['MONGO_PORT'],
username=pymongo_config['MONGO_USERNAME'],
password=pymongo_config['MONGO_PASSWORD'],
authdb=context.obj['authdb'],
)
log.info("Test if mongod is running")
if not valid_connection:
log.warning("Connection could not be established")
log.info("Is mongod running?")
context.abort()
config = os.path.abspath(config) if config else None
app = create_app(config=pymongo_config, config_file=config)
if livereload:
server = Server(app.wsgi_app)
server.serve(host=host, port=port, debug=debug)
else:
app.run(host=host, port=port, debug=debug) | python | {
"resource": ""
} |
q273318 | generate_md5_key | test | def generate_md5_key(list_of_arguments):
"""
Generate an md5-key from a list of arguments.
Args:
list_of_arguments: A list of strings
Returns:
A md5-key object generated from the list of strings.
"""
for arg in list_of_arguments:
if not isinstance(arg, string_types):
raise SyntaxError("Error in generate_md5_key: "
"Argument: {0} is a {1}".format(arg, type(arg)))
hash = hashlib.md5()
hash.update(' '.join(list_of_arguments).encode('utf-8'))
return hash.hexdigest() | python | {
"resource": ""
} |
q273319 | MongoAdapter.init_app | test | def init_app(self, app):
"""Setup via Flask."""
host = app.config.get('MONGO_HOST', 'localhost')
port = app.config.get('MONGO_PORT', 27017)
dbname = app.config['MONGO_DBNAME']
log.info("connecting to database: %s:%s/%s", host, port, dbname)
self.setup(app.config['MONGO_DATABASE']) | python | {
"resource": ""
} |
q273320 | MongoAdapter.setup | test | def setup(self, database):
"""Setup connection to database."""
self.db = database
self.hgnc_collection = database.hgnc_gene
self.user_collection = database.user
self.whitelist_collection = database.whitelist
self.institute_collection = database.institute
self.event_collection = database.event
self.case_collection = database.case
self.panel_collection = database.gene_panel
self.hpo_term_collection = database.hpo_term
self.disease_term_collection = database.disease_term
self.variant_collection = database.variant
self.acmg_collection = database.acmg
self.clinvar_collection = database.clinvar
self.clinvar_submission_collection = database.clinvar_submission
self.exon_collection = database.exon
self.transcript_collection = database.transcript | python | {
"resource": ""
} |
q273321 | index | test | def index(context, update):
"""Create indexes for the database"""
LOG.info("Running scout index")
adapter = context.obj['adapter']
if update:
adapter.update_indexes()
else:
adapter.load_indexes() | python | {
"resource": ""
} |
q273322 | database | test | def database(context, institute_name, user_name, user_mail, api_key):
"""Setup a scout database."""
LOG.info("Running scout setup database")
# Fetch the omim information
api_key = api_key or context.obj.get('omim_api_key')
if not api_key:
LOG.warning("Please provide a omim api key with --api-key")
context.abort()
institute_name = institute_name or context.obj['institute_name']
user_name = user_name or context.obj['user_name']
user_mail = user_mail or context.obj['user_mail']
adapter = context.obj['adapter']
LOG.info("Setting up database %s", context.obj['mongodb'])
setup_scout(
adapter=adapter,
institute_id=institute_name,
user_name=user_name,
user_mail = user_mail,
api_key=api_key
) | python | {
"resource": ""
} |
q273323 | demo | test | def demo(context):
"""Setup a scout demo instance. This instance will be populated with a
case, a gene panel and some variants.
"""
LOG.info("Running scout setup demo")
institute_name = context.obj['institute_name']
user_name = context.obj['user_name']
user_mail = context.obj['user_mail']
adapter = context.obj['adapter']
LOG.info("Setting up database %s", context.obj['mongodb'])
setup_scout(
adapter=adapter,
institute_id=institute_name,
user_name=user_name,
user_mail = user_mail,
demo=True
) | python | {
"resource": ""
} |
q273324 | setup | test | def setup(context, institute, user_mail, user_name):
"""
Setup scout instances.
"""
context.obj['institute_name'] = institute
context.obj['user_name'] = user_name
context.obj['user_mail'] = user_mail
if context.invoked_subcommand == 'demo':
# Update context.obj settings here
LOG.debug("Change database name to scout-demo")
context.obj['mongodb'] = 'scout-demo'
LOG.info("Setting database name to %s", context.obj['mongodb'])
LOG.debug("Setting host to %s", context.obj['host'])
LOG.debug("Setting port to %s", context.obj['port'])
try:
client = get_connection(
host=context.obj['host'],
port=context.obj['port'],
username=context.obj['username'],
password=context.obj['password'],
mongodb = context.obj['mongodb']
)
except ConnectionFailure:
context.abort()
LOG.info("connecting to database %s", context.obj['mongodb'])
database = client[context.obj['mongodb']]
LOG.info("Test if mongod is running")
try:
LOG.info("Test if mongod is running")
database.test.find_one()
except ServerSelectionTimeoutError as err:
LOG.warning("Connection could not be established")
LOG.warning("Please check if mongod is running")
context.abort()
LOG.info("Setting up a mongo adapter")
mongo_adapter = MongoAdapter(database)
context.obj['adapter'] = mongo_adapter | python | {
"resource": ""
} |
q273325 | institutes | test | def institutes(context, institute_id, json):
"""Show all institutes in the database"""
LOG.info("Running scout view institutes")
adapter = context.obj['adapter']
if institute_id:
institute_objs = []
institute_obj = adapter.institute(institute_id)
if not institute_obj:
LOG.info("Institute %s does not exost", institute_id)
return
institute_objs.append(institute_obj)
else:
institute_objs = [ins_obj for ins_obj in adapter.institutes()]
if len(institute_objs) == 0:
click.echo("No institutes found")
context.abort()
header = ''
if not json:
for key in institute_objs[0].keys():
header = header + "{0}\t".format(key)
click.echo(header)
for institute_obj in institute_objs:
if json:
click.echo(institute_obj)
continue
row = ''
for value in institute_obj.values():
row = row + "{0}\t".format(value)
click.echo(row) | python | {
"resource": ""
} |
q273326 | parse_genetic_models | test | def parse_genetic_models(models_info, case_id):
"""Parse the genetic models entry of a vcf
Args:
models_info(str): The raw vcf information
case_id(str)
Returns:
genetic_models(list)
"""
genetic_models = []
if models_info:
for family_info in models_info.split(','):
splitted_info = family_info.split(':')
if splitted_info[0] == case_id:
genetic_models = splitted_info[1].split('|')
return genetic_models | python | {
"resource": ""
} |
q273327 | panels | test | def panels(context, institute):
"""Show all gene panels in the database"""
LOG.info("Running scout view panels")
adapter = context.obj['adapter']
panel_objs = adapter.gene_panels(institute_id=institute)
if panel_objs.count() == 0:
LOG.info("No panels found")
context.abort()
click.echo("#panel_name\tversion\tnr_genes\tdate")
for panel_obj in panel_objs:
click.echo("{0}\t{1}\t{2}\t{3}".format(
panel_obj['panel_name'],
str(panel_obj['version']),
len(panel_obj['genes']),
str(panel_obj['date'].strftime('%Y-%m-%d'))
)) | python | {
"resource": ""
} |
q273328 | InstituteHandler.add_institute | test | def add_institute(self, institute_obj):
"""Add a institute to the database
Args:
institute_obj(Institute)
"""
internal_id = institute_obj['internal_id']
display_name = institute_obj['internal_id']
# Check if institute already exists
if self.institute(institute_id=internal_id):
raise IntegrityError("Institute {0} already exists in database"
.format(display_name))
LOG.info("Adding institute with internal_id: {0} and "
"display_name: {1}".format(internal_id,
display_name))
insert_info = self.institute_collection.insert_one(institute_obj)
##TODO check if insert info was ok
LOG.info("Institute saved") | python | {
"resource": ""
} |
q273329 | InstituteHandler.update_institute | test | def update_institute(self, internal_id, sanger_recipient=None, coverage_cutoff=None,
frequency_cutoff=None, display_name=None, remove_sanger=None,
phenotype_groups=None, group_abbreviations=None, add_groups=None):
"""Update the information for an institute
Args:
internal_id(str): The internal institute id
sanger_recipient(str): Email adress to add for sanger order
coverage_cutoff(int): Update coverage cutoff
frequency_cutoff(float): New frequency cutoff
display_name(str): New display name
remove_sanger(str): Email adress for sanger user to be removed
phenotype_groups(iterable(str)): New phenotype groups
group_abbreviations(iterable(str))
add_groups: If groups should be added. If False replace groups
Returns:
updated_institute(dict)
"""
add_groups = add_groups or False
institute_obj = self.institute(internal_id)
if not institute_obj:
raise IntegrityError("Institute {} does not exist in database".format(internal_id))
updates = {}
updated_institute = institute_obj
if sanger_recipient:
user_obj = self.user(sanger_recipient)
if not user_obj:
raise IntegrityError("user {} does not exist in database".format(sanger_recipient))
LOG.info("Updating sanger recipients for institute: {0} with {1}".format(
internal_id, sanger_recipient))
updates['$push'] = {'sanger_recipients':remove_sanger}
if remove_sanger:
LOG.info("Removing sanger recipient {0} from institute: {1}".format(
remove_sanger, internal_id))
updates['$pull'] = {'sanger_recipients':remove_sanger}
if coverage_cutoff:
LOG.info("Updating coverage cutoff for institute: {0} to {1}".format(
internal_id, coverage_cutoff))
updates['$set'] = {'coverage_cutoff': coverage_cutoff}
if frequency_cutoff:
LOG.info("Updating frequency cutoff for institute: {0} to {1}".format(
internal_id, frequency_cutoff))
if not '$set' in updates:
updates['$set'] = {}
updates['$set'] = {'frequency_cutoff': frequency_cutoff}
if display_name:
LOG.info("Updating display name for institute: {0} to {1}".format(
internal_id, display_name))
if not '$set' in updates:
updates['$set'] = {}
updates['$set'] = {'display_name': display_name}
if phenotype_groups:
if group_abbreviations:
group_abbreviations = list(group_abbreviations)
existing_groups = {}
if add_groups:
existing_groups = institute_obj.get('phenotype_groups', PHENOTYPE_GROUPS)
for i,hpo_term in enumerate(phenotype_groups):
hpo_obj = self.hpo_term(hpo_term)
if not hpo_obj:
raise IntegrityError("Term {} does not exist".format(hpo_term))
hpo_id = hpo_obj['hpo_id']
description = hpo_obj['description']
abbreviation = None
if group_abbreviations:
abbreviation = group_abbreviations[i]
existing_groups[hpo_term] = {'name': description, 'abbr':abbreviation}
updates['$set'] = {'phenotype_groups': existing_groups}
if updates:
if not '$set' in updates:
updates['$set'] = {}
updates['$set']['updated_at'] = datetime.now()
updated_institute = self.institute_collection.find_one_and_update(
{'_id':internal_id}, updates, return_document = pymongo.ReturnDocument.AFTER)
LOG.info("Institute updated")
return updated_institute | python | {
"resource": ""
} |
q273330 | InstituteHandler.institute | test | def institute(self, institute_id):
"""Featch a single institute from the backend
Args:
institute_id(str)
Returns:
Institute object
"""
LOG.debug("Fetch institute {}".format(institute_id))
institute_obj = self.institute_collection.find_one({
'_id': institute_id
})
if institute_obj is None:
LOG.debug("Could not find institute {0}".format(institute_id))
return institute_obj | python | {
"resource": ""
} |
q273331 | match_date | test | def match_date(date):
"""Check if a string is a valid date
Args:
date(str)
Returns:
bool
"""
date_pattern = re.compile("^(19|20)\d\d[- /.](0[1-9]|1[012])[- /.](0[1-9]|[12][0-9]|3[01])")
if re.match(date_pattern, date):
return True
return False | python | {
"resource": ""
} |
q273332 | get_date | test | def get_date(date, date_format = None):
"""Return a datetime object if there is a valid date
Raise exception if date is not valid
Return todays date if no date where added
Args:
date(str)
date_format(str)
Returns:
date_obj(datetime.datetime)
"""
date_obj = datetime.datetime.now()
if date:
if date_format:
date_obj = datetime.datetime.strptime(date, date_format)
else:
if match_date(date):
if len(date.split('-')) == 3:
date = date.split('-')
elif len(date.split(' ')) == 3:
date = date.split(' ')
elif len(date.split('.')) == 3:
date = date.split('.')
else:
date = date.split('/')
date_obj = datetime.datetime(*(int(number) for number in date))
else:
raise ValueError("Date %s is invalid" % date)
return date_obj | python | {
"resource": ""
} |
q273333 | hpo_genes | test | def hpo_genes(context, hpo_term):
"""Export a list of genes based on hpo terms"""
LOG.info("Running scout export hpo_genes")
adapter = context.obj['adapter']
header = ["#Gene_id\tCount"]
if not hpo_term:
LOG.warning("Please use at least one hpo term")
context.abort()
for line in header:
click.echo(line)
for term in adapter.generate_hpo_gene_list(*hpo_term):
click.echo("{0}\t{1}".format(term[0], term[1])) | python | {
"resource": ""
} |
q273334 | parse_rank_score | test | def parse_rank_score(rank_score_entry, case_id):
"""Parse the rank score
Args:
rank_score_entry(str): The raw rank score entry
case_id(str)
Returns:
rank_score(float)
"""
rank_score = None
if rank_score_entry:
for family_info in rank_score_entry.split(','):
splitted_info = family_info.split(':')
if case_id == splitted_info[0]:
rank_score = float(splitted_info[1])
return rank_score | python | {
"resource": ""
} |
q273335 | user | test | def user(context, institute_id, user_name, user_mail, admin):
"""Add a user to the database."""
adapter = context.obj['adapter']
institutes = []
for institute in institute_id:
institute_obj = adapter.institute(institute_id=institute)
if not institute_obj:
LOG.warning("Institute % does not exist", institute)
context.abort()
institutes.append(institute)
roles = []
if admin:
LOG.info("User is admin")
roles.append('admin')
user_info = dict(email=user_mail.lower(), name=user_name, roles=roles, institutes=institutes)
user_obj = build_user(user_info)
try:
adapter.add_user(user_obj)
except Exception as err:
LOG.warning(err)
context.abort() | python | {
"resource": ""
} |
q273336 | check_connection | test | def check_connection(host='localhost', port=27017, username=None, password=None,
authdb=None, max_delay=1):
"""Check if a connection could be made to the mongo process specified
Args:
host(str)
port(int)
username(str)
password(str)
authdb (str): database to to for authentication
max_delay(int): Number of milliseconds to wait for connection
Returns:
bool: If connection could be established
"""
#uri looks like:
#mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]]
if username and password:
uri = ("mongodb://{}:{}@{}:{}/{}"
.format(quote_plus(username), quote_plus(password), host, port, authdb))
log_uri = ("mongodb://{}:****@{}:{}/{}"
.format(quote_plus(username), host, port, authdb))
else:
log_uri = uri = "mongodb://%s:%s" % (host, port)
LOG.info("Test connection with uri: %s", log_uri)
client = MongoClient(uri, serverSelectionTimeoutMS=max_delay)
try:
client.server_info()
except (ServerSelectionTimeoutError,OperationFailure) as err:
LOG.warning(err)
return False
return True | python | {
"resource": ""
} |
q273337 | MongoDB.init_app | test | def init_app(self, app):
"""Initialize from flask"""
uri = app.config.get("MONGO_URI", None)
db_name = app.config.get("MONGO_DBNAME", 'scout')
try:
client = get_connection(
host = app.config.get("MONGO_HOST", 'localhost'),
port=app.config.get("MONGO_PORT", 27017),
username=app.config.get("MONGO_USERNAME", None),
password=app.config.get("MONGO_PASSWORD", None),
uri=uri,
mongodb= db_name
)
except ConnectionFailure:
context.abort()
app.config["MONGO_DATABASE"] = client[db_name]
app.config['MONGO_CLIENT'] = client | python | {
"resource": ""
} |
q273338 | load_delivery_report | test | def load_delivery_report(adapter: MongoAdapter,
report_path: str,
case_id: str,
update: bool = False):
""" Load a delivery report into a case in the database
If the report already exists the function will exit.
If the user want to load a report that is already in the database
'update' has to be 'True'
Args:
adapter (MongoAdapter): Connection to the database
report_path (string): Path to delivery report
case_id (string): Optional case identifier
update (bool): If an existing report should be replaced
Returns:
updated_case(dict)
"""
case_obj = adapter.case(
case_id=case_id,
)
if case_obj is None:
raise DataNotFoundError("no case found")
if not case_obj.get('delivery_report'):
_put_report_in_case_root(case_obj, report_path)
else:
if update:
_put_report_in_case_root(case_obj, report_path)
else:
raise IntegrityError('Existing delivery report found, use update = True to '
'overwrite')
logger.info('Saving report for case {} in database'.format(case_obj['_id']))
return adapter.replace_case(case_obj) | python | {
"resource": ""
} |
q273339 | UserHandler.add_user | test | def add_user(self, user_obj):
"""Add a user object to the database
Args:
user_obj(scout.models.User): A dictionary with user information
Returns:
user_info(dict): a copy of what was inserted
"""
LOG.info("Adding user %s to the database", user_obj['email'])
if not '_id' in user_obj:
user_obj['_id'] = user_obj['email']
try:
self.user_collection.insert_one(user_obj)
LOG.debug("User inserted")
except DuplicateKeyError as err:
raise IntegrityError("User {} already exists in database".format(user_obj['email']))
return user_obj | python | {
"resource": ""
} |
q273340 | pileup | test | def pileup():
"""Visualize BAM alignments."""
vcf_file = request.args.get('vcf')
bam_files = request.args.getlist('bam')
bai_files = request.args.getlist('bai')
samples = request.args.getlist('sample')
alignments = [{'bam': bam, 'bai': bai, 'sample': sample}
for bam, bai, sample in zip(bam_files, bai_files, samples)]
position = {
'contig': request.args['contig'],
'start': request.args['start'],
'stop': request.args['stop']
}
genome = current_app.config.get('PILEUP_GENOME')
if genome:
if not os.path.isfile(genome):
flash("The pilup genome path ({}) provided does not exist".format(genome))
genome = None
LOG.debug("Use pileup genome %s", genome)
exons = current_app.config.get('PILEUP_EXONS')
if exons:
if not os.path.isfile(exons):
flash("The pilup exons path ({}) provided does not exist".format(exons))
genome = None
LOG.debug("Use pileup exons %s", exons)
LOG.debug("View alignment for positions Chrom:{0}, Start:{1}, End: {2}".format(
position['contig'], position['start'], position['stop']))
LOG.debug("Use alignment files {}".format(alignments))
return render_template('alignviewers/pileup.html', alignments=alignments,
position=position, vcf_file=vcf_file,
genome=genome, exons=exons) | python | {
"resource": ""
} |
q273341 | load_exons | test | def load_exons(adapter, exon_lines, build='37', ensembl_genes=None):
"""Load all the exons
Transcript information is from ensembl.
Check that the transcript that the exon belongs to exists in the database
Args:
adapter(MongoAdapter)
exon_lines(iterable): iterable with ensembl exon lines
build(str)
ensembl_transcripts(dict): Existing ensembl transcripts
"""
# Fetch all genes with ensemblid as keys
ensembl_genes = ensembl_genes or adapter.ensembl_genes(build)
hgnc_id_transcripts = adapter.id_transcripts_by_gene(build=build)
if isinstance(exon_lines, DataFrame):
exons = parse_ensembl_exon_request(exon_lines)
nr_exons = exon_lines.shape[0]
else:
exons = parse_ensembl_exons(exon_lines)
nr_exons = 1000000
start_insertion = datetime.now()
loaded_exons = 0
LOG.info("Loading exons...")
with progressbar(exons, label="Loading exons", length=nr_exons) as bar:
for exon in bar:
ensg_id = exon['gene']
enst_id = exon['transcript']
gene_obj = ensembl_genes.get(ensg_id)
if not gene_obj:
continue
hgnc_id = gene_obj['hgnc_id']
if not enst_id in hgnc_id_transcripts[hgnc_id]:
continue
exon['hgnc_id'] = hgnc_id
exon_obj = build_exon(exon, build)
adapter.load_exon(exon_obj)
loaded_exons += 1
LOG.info('Number of exons in build {0}: {1}'.format(build, nr_exons))
LOG.info('Number loaded: {0}'.format(loaded_exons))
LOG.info('Time to load exons: {0}'.format(datetime.now() - start_insertion)) | python | {
"resource": ""
} |
q273342 | compounds | test | def compounds(context, case_id):
"""
Update all compounds for a case
"""
adapter = context.obj['adapter']
LOG.info("Running scout update compounds")
# Check if the case exists
case_obj = adapter.case(case_id)
if not case_obj:
LOG.warning("Case %s could not be found", case_id)
context.abort()
try:
adapter.update_case_compounds(case_obj)
except Exception as err:
LOG.warning(err)
context.abort() | python | {
"resource": ""
} |
q273343 | add_gene_links | test | def add_gene_links(gene_obj, build=37):
"""Update a gene object with links
Args:
gene_obj(dict)
build(int)
Returns:
gene_obj(dict): gene_obj updated with many links
"""
try:
build = int(build)
except ValueError:
build = 37
# Add links that use the hgnc_id
hgnc_id = gene_obj['hgnc_id']
gene_obj['hgnc_link'] = genenames(hgnc_id)
gene_obj['omim_link'] = omim(hgnc_id)
# Add links that use ensembl_id
if not 'ensembl_id' in gene_obj:
ensembl_id = gene_obj.get('common',{}).get('ensembl_id')
else:
ensembl_id = gene_obj['ensembl_id']
ensembl_37_link = ensembl(ensembl_id, build=37)
ensembl_38_link = ensembl(ensembl_id, build=38)
gene_obj['ensembl_37_link'] = ensembl_37_link
gene_obj['ensembl_38_link'] = ensembl_38_link
gene_obj['ensembl_link'] = ensembl_37_link
if build == 38:
gene_obj['ensembl_link'] = ensembl_38_link
gene_obj['hpa_link'] = hpa(ensembl_id)
gene_obj['string_link'] = string(ensembl_id)
gene_obj['reactome_link'] = reactome(ensembl_id)
gene_obj['clingen_link'] = clingen(hgnc_id)
gene_obj['expression_atlas_link'] = expression_atlas(ensembl_id)
gene_obj['exac_link'] = exac(ensembl_id)
# Add links that use entrez_id
gene_obj['entrez_link'] = entrez(gene_obj.get('entrez_id'))
# Add links that use omim id
gene_obj['omim_link'] = omim(gene_obj.get('omim_id'))
# Add links that use hgnc_symbol
gene_obj['ppaint_link'] = ppaint(gene_obj['hgnc_symbol'])
# Add links that use vega_id
gene_obj['vega_link'] = vega(gene_obj.get('vega_id'))
# Add links that use ucsc link
gene_obj['ucsc_link'] = ucsc(gene_obj.get('ucsc_id')) | python | {
"resource": ""
} |
q273344 | hgnc | test | def hgnc(ctx, hgnc_symbol, hgnc_id, build):
"""
Query the hgnc aliases
"""
adapter = ctx.obj['adapter']
if not (hgnc_symbol or hgnc_id):
log.warning("Please provide a hgnc symbol or hgnc id")
ctx.abort()
if hgnc_id:
result = adapter.hgnc_gene(hgnc_id, build=build)
if result:
hgnc_symbol = result['hgnc_symbol']
else:
log.warning("Gene with id %s could not be found", hgnc_id)
ctx.abort()
result = adapter.hgnc_genes(hgnc_symbol, build=build)
if result.count() == 0:
log.info("No results found")
else:
click.echo("#hgnc_id\thgnc_symbol\taliases\ttranscripts")
for gene in result:
click.echo("{0}\t{1}\t{2}\t{3}".format(
gene['hgnc_id'],
gene['hgnc_symbol'],
', '.join(gene['aliases']),
', '.join(tx['ensembl_transcript_id'] for tx in gene['transcripts']),
)) | python | {
"resource": ""
} |
q273345 | parse_hgnc_line | test | def parse_hgnc_line(line, header):
"""Parse an hgnc formated line
Args:
line(list): A list with hgnc gene info
header(list): A list with the header info
Returns:
hgnc_info(dict): A dictionary with the relevant info
"""
hgnc_gene = {}
line = line.rstrip().split('\t')
raw_info = dict(zip(header, line))
# Skip all genes that have status withdrawn
if 'Withdrawn' in raw_info['status']:
return hgnc_gene
hgnc_symbol = raw_info['symbol']
hgnc_gene['hgnc_symbol'] = hgnc_symbol
hgnc_gene['hgnc_id'] = int(raw_info['hgnc_id'].split(':')[-1])
hgnc_gene['description'] = raw_info['name']
# We want to have the current symbol as an alias
aliases = set([hgnc_symbol, hgnc_symbol.upper()])
# We then need to add both the previous symbols and
# alias symbols
previous_names = raw_info['prev_symbol']
if previous_names:
for alias in previous_names.strip('"').split('|'):
aliases.add(alias)
alias_symbols = raw_info['alias_symbol']
if alias_symbols:
for alias in alias_symbols.strip('"').split('|'):
aliases.add(alias)
hgnc_gene['previous_symbols'] = list(aliases)
# We need the ensembl_gene_id to link the genes with ensembl
hgnc_gene['ensembl_gene_id'] = raw_info.get('ensembl_gene_id')
omim_id = raw_info.get('omim_id')
if omim_id:
hgnc_gene['omim_id'] = int(omim_id.strip('"').split('|')[0])
else:
hgnc_gene['omim_id'] = None
entrez_id = hgnc_gene['entrez_id'] = raw_info.get('entrez_id')
if entrez_id:
hgnc_gene['entrez_id'] = int(entrez_id)
else:
hgnc_gene['entrez_id'] = None
# These are the primary transcripts according to HGNC
ref_seq = raw_info.get('refseq_accession')
if ref_seq:
hgnc_gene['ref_seq'] = ref_seq.strip('"').split('|')
else:
hgnc_gene['ref_seq'] = []
uniprot_ids = raw_info.get('uniprot_ids')
if uniprot_ids:
hgnc_gene['uniprot_ids'] = uniprot_ids.strip('""').split('|')
else:
hgnc_gene['uniprot_ids'] = []
ucsc_id = raw_info.get('ucsc_id')
if ucsc_id:
hgnc_gene['ucsc_id'] = ucsc_id
else:
hgnc_gene['ucsc_id'] = None
vega_id = raw_info.get('vega_id')
if vega_id:
hgnc_gene['vega_id'] = vega_id
else:
hgnc_gene['vega_id'] = None
return hgnc_gene | python | {
"resource": ""
} |
q273346 | parse_hgnc_genes | test | def parse_hgnc_genes(lines):
"""Parse lines with hgnc formated genes
This is designed to take a dump with genes from HGNC.
This is downloaded from:
ftp://ftp.ebi.ac.uk/pub/databases/genenames/new/tsv/hgnc_complete_set.txt
Args:
lines(iterable(str)): An iterable with HGNC formated genes
Yields:
hgnc_gene(dict): A dictionary with the relevant information
"""
header = []
logger.info("Parsing hgnc genes...")
for index, line in enumerate(lines):
if index == 0:
header = line.split('\t')
elif len(line) > 1:
hgnc_gene = parse_hgnc_line(line=line, header=header)
if hgnc_gene:
yield hgnc_gene | python | {
"resource": ""
} |
q273347 | ClinVarHandler.get_open_clinvar_submission | test | def get_open_clinvar_submission(self, user_id, institute_id):
"""Retrieve the database id of an open clinvar submission for a user and institute,
if none is available then create a new submission and return it
Args:
user_id(str): a user ID
institute_id(str): an institute ID
Returns:
submission(obj) : an open clinvar submission object
"""
LOG.info("Retrieving an open clinvar submission for user '%s' and institute %s", user_id, institute_id)
query = dict(user_id=user_id, institute_id=institute_id, status='open')
submission = self.clinvar_submission_collection.find_one(query)
# If there is no open submission for this user and institute, create one
if submission is None:
submission_id = self.create_submission(user_id, institute_id)
submission = self.clinvar_submission_collection.find_one({'_id':submission_id})
return submission | python | {
"resource": ""
} |
q273348 | ClinVarHandler.update_clinvar_id | test | def update_clinvar_id(self, clinvar_id, submission_id ):
"""saves an official clinvar submission ID in a clinvar submission object
Args:
clinvar_id(str): a string with a format: SUB[0-9]. It is obtained from clinvar portal when starting a new submission
submission_id(str): submission_id(str) : id of the submission to be updated
Returns:
updated_submission(obj): a clinvar submission object, updated
"""
updated_submission = self.clinvar_submission_collection.find_one_and_update( {'_id': ObjectId(submission_id)}, { '$set' : {'clinvar_subm_id' : clinvar_id, 'updated_at': datetime.now()} }, upsert=True, return_document=pymongo.ReturnDocument.AFTER )
return updated_submission | python | {
"resource": ""
} |
q273349 | ClinVarHandler.get_clinvar_id | test | def get_clinvar_id(self, submission_id):
"""Returns the official Clinvar submission ID for a submission object
Args:
submission_id(str): submission_id(str) : id of the submission
Returns:
clinvar_subm_id(str): a string with a format: SUB[0-9]. It is obtained from clinvar portal when starting a new submission
"""
submission_obj = self.clinvar_submission_collection.find_one({'_id': ObjectId(submission_id)})
clinvar_subm_id = submission_obj.get('clinvar_subm_id') # This key does not exist if it was not previously provided by user
return clinvar_subm_id | python | {
"resource": ""
} |
q273350 | ClinVarHandler.add_to_submission | test | def add_to_submission(self, submission_id, submission_objects):
"""Adds submission_objects to clinvar collection and update the coresponding submission object with their id
Args:
submission_id(str) : id of the submission to be updated
submission_objects(tuple): a tuple of 2 elements coresponding to a list of variants and a list of case data objects to add to submission
Returns:
updated_submission(obj): an open clinvar submission object, updated
"""
LOG.info("Adding new variants and case data to clinvar submission '%s'", submission_id)
# Insert variant submission_objects into clinvar collection
# Loop over the objects
for var_obj in submission_objects[0]:
try:
result = self.clinvar_collection.insert_one(var_obj)
self.clinvar_submission_collection.update_one({'_id':submission_id}, {'$push': { 'variant_data' : str(result.inserted_id) }}, upsert=True)
except pymongo.errors.DuplicateKeyError:
LOG.error("Attepted to insert a clinvar variant which is already in DB!")
# Insert casedata submission_objects into clinvar collection
if submission_objects[1]:
# Loop over the objects
for case_obj in submission_objects[1]:
try:
result = self.clinvar_collection.insert_one(case_obj)
self.clinvar_submission_collection.update_one({'_id':submission_id}, {'$push': { 'case_data': str(result.inserted_id)}}, upsert=True)
except pymongo.errors.DuplicateKeyError:
LOG.error("One or more casedata object is already present in clinvar collection!")
updated_submission = self.clinvar_submission_collection.find_one_and_update( {'_id':submission_id}, { '$set' : {'updated_at': datetime.now()} }, return_document=pymongo.ReturnDocument.AFTER )
return updated_submission | python | {
"resource": ""
} |
q273351 | ClinVarHandler.update_clinvar_submission_status | test | def update_clinvar_submission_status(self, user_id, submission_id, status):
"""Set a clinvar submission ID to 'closed'
Args:
submission_id(str): the ID of the clinvar submission to close
Return
updated_submission(obj): the submission object with a 'closed' status
"""
LOG.info('closing clinvar submission "%s"', submission_id)
if status == 'open': # just close the submission its status does not affect the other submissions for this user
# Close all other submissions for this user and then open the desired one
self.clinvar_submission_collection.update_many(
{'user_id' : user_id},
{'$set' :
{'status' : 'closed', 'updated_at' : datetime.now()}
}
)
updated_submission = self.clinvar_submission_collection.find_one_and_update(
{'_id' : ObjectId(submission_id)},
{'$set' :
{'status' : status, 'updated_at' : datetime.now()}
},
return_document=pymongo.ReturnDocument.AFTER
)
return updated_submission | python | {
"resource": ""
} |
q273352 | ClinVarHandler.clinvar_submissions | test | def clinvar_submissions(self, user_id, institute_id):
"""Collect all open and closed clinvar submission created by a user for an institute
Args:
user_id(str): a user ID
institute_id(str): an institute ID
Returns:
submissions(list): a list of clinvar submission objects
"""
LOG.info("Retrieving all clinvar submissions for user '%s', institute '%s'", user_id, institute_id)
# get first all submission objects
query = dict(user_id=user_id, institute_id=institute_id)
results = list(self.clinvar_submission_collection.find(query))
submissions = []
for result in results:
submission = {}
submission['_id'] = result.get('_id')
submission['status'] = result.get('status')
submission['user_id'] = result.get('user_id')
submission['institute_id'] = result.get('institute_id')
submission['created_at'] = result.get('created_at')
submission['updated_at'] = result.get('updated_at')
if 'clinvar_subm_id' in result:
submission['clinvar_subm_id'] = result['clinvar_subm_id']
if result.get('variant_data'):
submission['variant_data'] = self.clinvar_collection.find({'_id': { "$in": result['variant_data'] } })
if result.get('case_data'):
submission['case_data'] = self.clinvar_collection.find({'_id' : { "$in": result['case_data'] } })
submissions.append(submission)
return submissions | python | {
"resource": ""
} |
q273353 | ClinVarHandler.delete_clinvar_object | test | def delete_clinvar_object(self, object_id, object_type, submission_id):
"""Remove a variant object from clinvar database and update the relative submission object
Args:
object_id(str) : the id of an object to remove from clinvar_collection database collection (a variant of a case)
object_type(str) : either 'variant_data' or 'case_data'. It's a key in the clinvar_submission object.
submission_id(str): the _id key of a clinvar submission
Returns:
updated_submission(obj): an updated clinvar submission
"""
LOG.info("Deleting clinvar object %s (%s)", object_id, object_type)
# If it's a variant object to be removed:
# remove reference to it in the submission object 'variant_data' list field
# remove the variant object from clinvar collection
# remove casedata object from clinvar collection
# remove reference to it in the submission object 'caset_data' list field
# if it's a casedata object to be removed:
# remove reference to it in the submission object 'caset_data' list field
# remove casedata object from clinvar collection
result = ''
if object_type == 'variant_data':
# pull out a variant from submission object
self.clinvar_submission_collection.find_one_and_update( {'_id': ObjectId(submission_id)}, {'$pull': {'variant_data': object_id} })
variant_object = self.clinvar_collection.find_one( {'_id': object_id} )
linking_id = variant_object.get("linking_id") #it's the original ID of the variant in scout, it's linking clinvar variants and casedata objects together
# remove any object with that linking_id from clinvar_collection. This removes variant and casedata
result = self.clinvar_collection.delete_many( {'linking_id': linking_id } )
else: # remove case_data but keep variant in submission
# delete the object itself from clinvar_collection
result = self.clinvar_collection.delete_one( {'_id': object_id } )
# in any case remove reference to it in the submission object 'caset_data' list field
self.clinvar_submission_collection.find_one_and_update( {'_id': ObjectId(submission_id)}, {'$pull': {'case_data': object_id} })
updated_submission = self.clinvar_submission_collection.find_one_and_update( {'_id':submission_id}, { '$set' : {'updated_at': datetime.now()} }, return_document=pymongo.ReturnDocument.AFTER )
return updated_submission | python | {
"resource": ""
} |
q273354 | ClinVarHandler.case_to_clinVars | test | def case_to_clinVars(self, case_id):
"""Get all variants included in clinvar submissions for a case
Args:
case_id(str): a case _id
Returns:
submission_variants(dict): keys are variant ids and values are variant submission objects
"""
query = dict(case_id=case_id, csv_type='variant')
clinvar_objs = list(self.clinvar_collection.find(query))
submitted_vars = {}
for clinvar in clinvar_objs:
submitted_vars[clinvar.get('local_id')] = clinvar
return submitted_vars | python | {
"resource": ""
} |
q273355 | parse_hpo_obo | test | def parse_hpo_obo(hpo_lines):
"""Parse a .obo formated hpo line"""
term = {}
for line in hpo_lines:
if len(line) == 0:
continue
line = line.rstrip()
# New term starts with [Term]
if line == '[Term]':
if term:
yield term
term = {}
elif line.startswith('id'):
term['hpo_id'] = line[4:]
elif line.startswith('name'):
term['description'] = line[6:]
elif line.startswith('alt_id'):
if 'aliases' not in term:
term['aliases'] = []
term['aliases'].append(line[8:])
elif line.startswith('is_a'):
if 'ancestors' not in term:
term['ancestors'] = []
term['ancestors'].append(line[6:16])
if term:
yield term | python | {
"resource": ""
} |
q273356 | genes | test | def genes():
"""Render seach box for genes."""
query = request.args.get('query', '')
if '|' in query:
hgnc_id = int(query.split(' | ', 1)[0])
return redirect(url_for('.gene', hgnc_id=hgnc_id))
gene_q = store.all_genes().limit(20)
return dict(genes=gene_q) | python | {
"resource": ""
} |
q273357 | gene | test | def gene(hgnc_id=None, hgnc_symbol=None):
"""Render information about a gene."""
if hgnc_symbol:
query = store.hgnc_genes(hgnc_symbol)
if query.count() == 1:
hgnc_id = query.first()['hgnc_id']
else:
return redirect(url_for('.genes', query=hgnc_symbol))
try:
genes = controllers.gene(store, hgnc_id)
except ValueError as error:
return abort(404)
return genes | python | {
"resource": ""
} |
q273358 | api_genes | test | def api_genes():
"""Return JSON data about genes."""
query = request.args.get('query')
json_out = controllers.genes_to_json(store, query)
return jsonify(json_out) | python | {
"resource": ""
} |
q273359 | check_panels | test | def check_panels(adapter, panels, default_panels=None):
"""Make sure that the gene panels exist in the database
Also check if the default panels are defined in gene panels
Args:
adapter(MongoAdapter)
panels(list(str)): A list with panel names
Returns:
panels_exists(bool)
"""
default_panels = default_panels or []
panels_exist = True
for panel in default_panels:
if panel not in panels:
log.warning("Default panels have to be defined in panels")
panels_exist = False
for panel in panels:
if not adapter.gene_panel(panel):
log.warning("Panel {} does not exist in database".format(panel))
panels_exist = False
return panels_exist | python | {
"resource": ""
} |
q273360 | load_region | test | def load_region(adapter, case_id, hgnc_id=None, chrom=None, start=None, end=None):
"""Load all variants in a region defined by a HGNC id
Args:
adapter (MongoAdapter)
case_id (str): Case id
hgnc_id (int): If all variants from a gene should be uploaded
chrom (str): If variants from coordinates should be uploaded
start (int): Start position for region
end (int): Stop position for region
"""
if hgnc_id:
gene_obj = adapter.hgnc_gene(hgnc_id)
if not gene_obj:
ValueError("Gene {} does not exist in database".format(hgnc_id))
chrom = gene_obj['chromosome']
start = gene_obj['start']
end = gene_obj['end']
case_obj = adapter.case(case_id=case_id)
if not case_obj:
raise ValueError("Case {} does not exist in database".format(case_id))
log.info("Load clinical SNV variants for case: {0} region: chr {1}, start"
" {2}, end {3}".format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='clinical',
category='snv', chrom=chrom, start=start, end=end)
vcf_sv_file = case_obj['vcf_files'].get('vcf_sv')
if vcf_sv_file:
log.info("Load clinical SV variants for case: {0} region: chr {1}, "
"start {2}, end {3}".format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='clinical',
category='sv', chrom=chrom, start=start, end=end)
vcf_str_file = case_obj['vcf_files'].get('vcf_str')
if vcf_str_file:
log.info("Load clinical STR variants for case: {0} region: chr {1}, "
"start {2}, end {3}".format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='clinical',
category='str', chrom=chrom, start=start, end=end)
if case_obj['is_research']:
log.info("Load research SNV variants for case: {0} region: chr {1}, "
"start {2}, end {3}".format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='research',
category='snv', chrom=chrom, start=start, end=end)
vcf_sv_research = case_obj['vcf_files'].get('vcf_sv_research')
if vcf_sv_research:
log.info("Load research SV variants for case: {0} region: chr {1},"
" start {2}, end {3}".format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='research',
category='sv', chrom=chrom, start=start, end=end) | python | {
"resource": ""
} |
q273361 | load_scout | test | def load_scout(adapter, config, ped=None, update=False):
"""Load a new case from a Scout config.
Args:
adapter(MongoAdapter)
config(dict): loading info
ped(Iterable(str)): Pedigree ingformation
update(bool): If existing case should be updated
"""
log.info("Check that the panels exists")
if not check_panels(adapter, config.get('gene_panels', []),
config.get('default_gene_panels')):
raise ConfigError("Some panel(s) does not exist in the database")
case_obj = adapter.load_case(config, update=update)
return case_obj | python | {
"resource": ""
} |
q273362 | templated | test | def templated(template=None):
"""Template decorator.
Ref: http://flask.pocoo.org/docs/patterns/viewdecorators/
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
template_name = template
if template_name is None:
template_name = request.endpoint.replace('.', '/') + '.html'
context = f(*args, **kwargs)
if context is None:
context = {}
elif not isinstance(context, dict):
return context
return render_template(template_name, **context)
return decorated_function
return decorator | python | {
"resource": ""
} |
q273363 | institute_and_case | test | def institute_and_case(store, institute_id, case_name=None):
"""Fetch insitiute and case objects."""
institute_obj = store.institute(institute_id)
if institute_obj is None and institute_id != 'favicon.ico':
flash("Can't find institute: {}".format(institute_id), 'warning')
return abort(404)
if case_name:
if case_name:
case_obj = store.case(institute_id=institute_id, display_name=case_name)
if case_obj is None:
return abort(404)
# validate that user has access to the institute
if not current_user.is_admin:
if institute_id not in current_user.institutes:
if not case_name or not any(inst_id in case_obj['collaborators'] for inst_id in
current_user.institutes):
# you don't have access!!
flash("You don't have acccess to: {}".format(institute_id),'danger')
return abort(403)
# you have access!
if case_name:
return institute_obj, case_obj
else:
return institute_obj | python | {
"resource": ""
} |
q273364 | user_institutes | test | def user_institutes(store, login_user):
"""Preprocess institute objects."""
if login_user.is_admin:
institutes = store.institutes()
else:
institutes = [store.institute(inst_id) for inst_id in login_user.institutes]
return institutes | python | {
"resource": ""
} |
q273365 | get_hgnc_id | test | def get_hgnc_id(gene_info, adapter):
"""Get the hgnc id for a gene
The proprity order will be
1. if there is a hgnc id this one will be choosen
2. if the hgnc symbol matches a genes proper hgnc symbol
3. if the symbol ony matches aliases on several genes one will be
choosen at random
Args:
gene_info(dict)
adapter
Returns:
true_id(int)
"""
hgnc_id = gene_info.get('hgnc_id')
hgnc_symbol = gene_info.get('hgnc_symbol')
true_id = None
if hgnc_id:
true_id = int(hgnc_id)
else:
gene_result = adapter.hgnc_genes(hgnc_symbol)
if gene_result.count() == 0:
raise Exception("No gene could be found for {}".format(hgnc_symbol))
for gene in gene_result:
if hgnc_symbol.upper() == gene.hgnc_symbol.upper():
true_id = gene.hgnc_id
if not gene_info['hgnc_id']:
true_id = gene.hgnc_id
return true_id | python | {
"resource": ""
} |
q273366 | panel | test | def panel(context, panel, version, update_date, update_version):
"""
Update a panel in the database
"""
adapter = context.obj['adapter']
# Check that the panel exists
panel_obj = adapter.gene_panel(panel, version=version)
if not panel_obj:
LOG.warning("Panel %s (version %s) could not be found" % (panel, version))
context.abort()
date_obj = None
if update_date:
try:
date_obj = get_date(update_date)
except Exception as err:
LOG.warning(err)
context.abort()
update_panel(
adapter,
panel,
panel_version=panel_obj['version'],
new_version=update_version,
new_date=date_obj
) | python | {
"resource": ""
} |
q273367 | diseases | test | def diseases(context, api_key):
"""
Update disease terms in mongo database.
"""
adapter = context.obj['adapter']
# Fetch the omim information
api_key = api_key or context.obj.get('omim_api_key')
if not api_key:
LOG.warning("Please provide a omim api key to load the omim gene panel")
context.abort()
try:
mim_files = fetch_mim_files(api_key, genemap2=True)
except Exception as err:
LOG.warning(err)
context.abort()
LOG.info("Dropping DiseaseTerms")
adapter.disease_term_collection.drop()
LOG.debug("DiseaseTerms dropped")
load_disease_terms(
adapter=adapter,
genemap_lines=mim_files['genemap2'],
)
LOG.info("Successfully loaded all disease terms") | python | {
"resource": ""
} |
q273368 | load_disease_terms | test | def load_disease_terms(adapter, genemap_lines, genes=None, hpo_disease_lines=None):
"""Load the omim phenotypes into the database
Parse the phenotypes from genemap2.txt and find the associated hpo terms
from ALL_SOURCES_ALL_FREQUENCIES_diseases_to_genes_to_phenotypes.txt.
Args:
adapter(MongoAdapter)
genemap_lines(iterable(str))
genes(dict): Dictionary with all genes found in database
hpo_disease_lines(iterable(str))
"""
# Get a map with hgnc symbols to hgnc ids from scout
if not genes:
genes = adapter.genes_by_alias()
# Fetch the disease terms from omim
disease_terms = get_mim_phenotypes(genemap_lines=genemap_lines)
if not hpo_disease_lines:
hpo_disease_lines = fetch_hpo_phenotype_to_terms()
hpo_diseases = parse_hpo_diseases(hpo_disease_lines)
start_time = datetime.now()
nr_diseases = None
LOG.info("Loading the hpo disease...")
for nr_diseases, disease_number in enumerate(disease_terms):
disease_info = disease_terms[disease_number]
disease_id = "OMIM:{0}".format(disease_number)
if disease_id in hpo_diseases:
hpo_terms = hpo_diseases[disease_id]['hpo_terms']
if hpo_terms:
disease_info['hpo_terms'] = hpo_terms
disease_obj = build_disease_term(disease_info, genes)
adapter.load_disease_term(disease_obj)
LOG.info("Loading done. Nr of diseases loaded {0}".format(nr_diseases))
LOG.info("Time to load diseases: {0}".format(datetime.now() - start_time)) | python | {
"resource": ""
} |
q273369 | parse_frequencies | test | def parse_frequencies(variant, transcripts):
"""Add the frequencies to a variant
Frequencies are parsed either directly from keys in info fieds or from the
transcripts is they are annotated there.
Args:
variant(cyvcf2.Variant): A parsed vcf variant
transcripts(iterable(dict)): Parsed transcripts
Returns:
frequencies(dict): A dictionary with the relevant frequencies
"""
frequencies = {}
# These lists could be extended...
thousand_genomes_keys = ['1000GAF']
thousand_genomes_max_keys = ['1000G_MAX_AF']
exac_keys = ['EXACAF']
exac_max_keys = ['ExAC_MAX_AF', 'EXAC_MAX_AF']
gnomad_keys = ['GNOMADAF', 'GNOMAD_AF']
gnomad_max_keys = ['GNOMADAF_POPMAX', 'GNOMADAF_MAX']
for test_key in thousand_genomes_keys:
thousand_g = parse_frequency(variant, test_key)
if thousand_g:
frequencies['thousand_g'] = thousand_g
break
for test_key in thousand_genomes_max_keys:
thousand_g_max = parse_frequency(variant, test_key)
if thousand_g_max:
frequencies['thousand_g_max'] = thousand_g_max
break
for test_key in exac_keys:
exac = parse_frequency(variant, test_key)
if exac:
frequencies['exac'] = exac
break
for test_key in exac_max_keys:
exac_max = parse_frequency(variant, test_key)
if exac_max:
frequencies['exac_max'] = exac_max
break
for test_key in gnomad_keys:
gnomad = parse_frequency(variant, test_key)
if gnomad:
frequencies['gnomad'] = gnomad
break
for test_key in gnomad_max_keys:
gnomad_max = parse_frequency(variant, test_key)
if gnomad_max:
frequencies['gnomad_max'] = gnomad_max
break
# Search transcripts if not found in VCF
if not frequencies:
for transcript in transcripts:
exac = transcript.get('exac_maf')
exac_max = transcript.get('exac_max')
thousand_g = transcript.get('thousand_g_maf')
thousandg_max = transcript.get('thousandg_max')
gnomad = transcript.get('gnomad_maf')
gnomad_max = transcript.get('gnomad_max')
if exac:
frequencies['exac'] = exac
if exac_max:
frequencies['exac_max'] = exac_max
if thousand_g:
frequencies['thousand_g'] = thousand_g
if thousandg_max:
frequencies['thousand_g_max'] = thousandg_max
if gnomad:
frequencies['gnomad'] = gnomad
if gnomad_max:
frequencies['gnomad_max'] = gnomad_max
#These are SV-specific frequencies
thousand_g_left = parse_frequency(variant, 'left_1000GAF')
if thousand_g_left:
frequencies['thousand_g_left'] = thousand_g_left
thousand_g_right = parse_frequency(variant, 'right_1000GAF')
if thousand_g_right:
frequencies['thousand_g_right'] = thousand_g_right
return frequencies | python | {
"resource": ""
} |
q273370 | parse_frequency | test | def parse_frequency(variant, info_key):
"""Parse any frequency from the info dict
Args:
variant(cyvcf2.Variant)
info_key(str)
Returns:
frequency(float): or None if frequency does not exist
"""
raw_annotation = variant.INFO.get(info_key)
raw_annotation = None if raw_annotation == '.' else raw_annotation
frequency = float(raw_annotation) if raw_annotation else None
return frequency | python | {
"resource": ""
} |
q273371 | parse_sv_frequencies | test | def parse_sv_frequencies(variant):
"""Parsing of some custom sv frequencies
These are very specific at the moment, this will hopefully get better over time when the
field of structural variants is more developed.
Args:
variant(cyvcf2.Variant)
Returns:
sv_frequencies(dict)
"""
frequency_keys = [
'clingen_cgh_benignAF',
'clingen_cgh_benign',
'clingen_cgh_pathogenicAF',
'clingen_cgh_pathogenic',
'clingen_ngi',
'clingen_ngiAF',
'swegen',
'swegenAF',
'decipherAF',
'decipher'
]
sv_frequencies = {}
for key in frequency_keys:
value = variant.INFO.get(key, 0)
if 'AF' in key:
value = float(value)
else:
value = int(value)
if value > 0:
sv_frequencies[key] = value
return sv_frequencies | python | {
"resource": ""
} |
q273372 | users | test | def users(context):
"""Show all users in the database"""
LOG.info("Running scout view users")
adapter = context.obj['adapter']
user_objs = adapter.users()
if user_objs.count() == 0:
LOG.info("No users found")
context.abort()
click.echo("#name\temail\troles\tinstitutes")
for user_obj in user_objs:
click.echo("{0}\t{1}\t{2}\t{3}\t".format(
user_obj['name'],
user_obj.get('mail', user_obj['_id']),
', '.join(user_obj.get('roles', [])),
', '.join(user_obj.get('institutes', [])),
)
) | python | {
"resource": ""
} |
q273373 | build_hgnc_gene | test | def build_hgnc_gene(gene_info, build='37'):
"""Build a hgnc_gene object
Args:
gene_info(dict): Gene information
Returns:
gene_obj(dict)
{
'_id': ObjectId(),
# This is the hgnc id, required:
'hgnc_id': int,
# The primary symbol, required
'hgnc_symbol': str,
'ensembl_id': str, # required
'build': str, # '37' or '38', defaults to '37', required
'chromosome': str, # required
'start': int, # required
'end': int, # required
'description': str, # Gene description
'aliases': list(), # Gene symbol aliases, includes hgnc_symbol, str
'entrez_id': int,
'omim_id': int,
'pli_score': float,
'primary_transcripts': list(), # List of refseq transcripts (str)
'ucsc_id': str,
'uniprot_ids': list(), # List of str
'vega_id': str,
'transcripts': list(), # List of hgnc_transcript
# Inheritance information
'inheritance_models': list(), # List of model names
'incomplete_penetrance': bool, # Acquired from HPO
# Phenotype information
'phenotypes': list(), # List of dictionaries with phenotype information
}
"""
try:
hgnc_id = int(gene_info['hgnc_id'])
except KeyError as err:
raise KeyError("Gene has to have a hgnc_id")
except ValueError as err:
raise ValueError("hgnc_id has to be integer")
try:
hgnc_symbol = gene_info['hgnc_symbol']
except KeyError as err:
raise KeyError("Gene has to have a hgnc_symbol")
try:
ensembl_id = gene_info['ensembl_gene_id']
except KeyError as err:
raise KeyError("Gene has to have a ensembl_id")
try:
chromosome = gene_info['chromosome']
except KeyError as err:
raise KeyError("Gene has to have a chromosome")
try:
start = int(gene_info['start'])
except KeyError as err:
raise KeyError("Gene has to have a start position")
except TypeError as err:
raise TypeError("Gene start has to be a integer")
try:
end = int(gene_info['end'])
except KeyError as err:
raise KeyError("Gene has to have a end position")
except TypeError as err:
raise TypeError("Gene end has to be a integer")
gene_obj = HgncGene(
hgnc_id=hgnc_id,
hgnc_symbol=hgnc_symbol,
ensembl_id=ensembl_id,
chrom=chromosome,
start=start,
end=end,
build=build,
)
if gene_info.get('description'):
gene_obj['description'] = gene_info['description']
# LOG.debug("Adding info %s", gene_info['description'])
if gene_info.get('previous_symbols'):
gene_obj['aliases'] = gene_info['previous_symbols']
if gene_info.get('entrez_id'):
gene_obj['entrez_id'] = int(gene_info['entrez_id'])
if gene_info.get('omim_id'):
gene_obj['omim_id'] = int(gene_info['omim_id'])
if gene_info.get('pli_score'):
gene_obj['pli_score'] = float(gene_info['pli_score'])
if gene_info.get('ref_seq'):
gene_obj['primary_transcripts'] = gene_info['ref_seq']
if gene_info.get('ucsc_id'):
gene_obj['ucsc_id'] = gene_info['ucsc_id']
if gene_info.get('uniprot_ids'):
gene_obj['uniprot_ids'] = gene_info['uniprot_ids']
if gene_info.get('vega_id'):
gene_obj['vega_id'] = gene_info['vega_id']
if gene_info.get('incomplete_penetrance'):
gene_obj['incomplete_penetrance'] = True
if gene_info.get('inheritance_models'):
gene_obj['inheritance_models'] = gene_info['inheritance_models']
phenotype_objs = []
for phenotype_info in gene_info.get('phenotypes', []):
phenotype_objs.append(build_phenotype(phenotype_info))
if phenotype_objs:
gene_obj['phenotypes'] = phenotype_objs
for key in list(gene_obj):
if gene_obj[key] is None:
gene_obj.pop(key)
return gene_obj | python | {
"resource": ""
} |
q273374 | PanelHandler.load_panel | test | def load_panel(self, parsed_panel):
"""Load a gene panel based on the info sent
A panel object is built and integrity checks are made.
The panel object is then loaded into the database.
Args:
path(str): Path to panel file
institute(str): Name of institute that owns the panel
panel_id(str): Panel id
date(datetime.datetime): Date of creation
version(float)
full_name(str): Option to have a long name
panel_info(dict): {
'file': <path to panel file>(str),
'institute': <institute>(str),
'type': <panel type>(str),
'date': date,
'version': version,
'panel_name': panel_id,
'full_name': name,
}
"""
panel_obj = build_panel(parsed_panel, self)
self.add_gene_panel(panel_obj) | python | {
"resource": ""
} |
q273375 | PanelHandler.load_omim_panel | test | def load_omim_panel(self, api_key, institute=None):
"""Create and load the OMIM-AUTO panel"""
existing_panel = self.gene_panel(panel_id='OMIM-AUTO')
if not existing_panel:
LOG.warning("OMIM-AUTO does not exists in database")
LOG.info('Creating a first version')
version = 1.0
if existing_panel:
version = float(math.floor(existing_panel['version']) + 1)
LOG.info("Setting version to %s", version)
try:
mim_files = fetch_mim_files(api_key=api_key, genemap2=True, mim2genes=True)
except Exception as err:
raise err
date_string = None
# Get the correct date when omim files where released
for line in mim_files['genemap2']:
if 'Generated' in line:
date_string = line.split(':')[-1].lstrip().rstrip()
date_obj = get_date(date_string)
if existing_panel:
if existing_panel['date'] == date_obj:
LOG.warning("There is no new version of OMIM")
return
panel_data = {}
panel_data['path'] = None
panel_data['type'] = 'clinical'
panel_data['date'] = date_obj
panel_data['panel_id'] = 'OMIM-AUTO'
panel_data['institute'] = institute or 'cust002'
panel_data['version'] = version
panel_data['display_name'] = 'OMIM-AUTO'
panel_data['genes'] = []
alias_genes = self.genes_by_alias()
genes = get_omim_panel_genes(
genemap2_lines = mim_files['genemap2'],
mim2gene_lines = mim_files['mim2genes'],
alias_genes = alias_genes,
)
for gene in genes:
panel_data['genes'].append(gene)
panel_obj = build_panel(panel_data, self)
if existing_panel:
new_genes = self.compare_mim_panels(existing_panel, panel_obj)
if new_genes:
self.update_mim_version(new_genes, panel_obj, old_version=existing_panel['version'])
else:
LOG.info("The new version of omim does not differ from the old one")
LOG.info("No update is added")
return
self.add_gene_panel(panel_obj) | python | {
"resource": ""
} |
q273376 | PanelHandler.compare_mim_panels | test | def compare_mim_panels(self, existing_panel, new_panel):
"""Check if the latest version of OMIM differs from the most recent in database
Return all genes that where not in the previous version.
Args:
existing_panel(dict)
new_panel(dict)
Returns:
new_genes(set(str))
"""
existing_genes = set([gene['hgnc_id'] for gene in existing_panel['genes']])
new_genes = set([gene['hgnc_id'] for gene in new_panel['genes']])
return new_genes.difference(existing_genes) | python | {
"resource": ""
} |
q273377 | PanelHandler.update_mim_version | test | def update_mim_version(self, new_genes, new_panel, old_version):
"""Set the correct version for each gene
Loop over the genes in the new panel
Args:
new_genes(set(str)): Set with the new gene symbols
new_panel(dict)
"""
LOG.info('Updating versions for new genes')
version = new_panel['version']
for gene in new_panel['genes']:
gene_symbol = gene['hgnc_id']
# If the gene is new we add the version
if gene_symbol in new_genes:
gene['database_entry_version'] = version
continue
# If the gene is old it will have the previous version
gene['database_entry_version'] = old_version
return | python | {
"resource": ""
} |
q273378 | PanelHandler.add_gene_panel | test | def add_gene_panel(self, panel_obj):
"""Add a gene panel to the database
Args:
panel_obj(dict)
"""
panel_name = panel_obj['panel_name']
panel_version = panel_obj['version']
display_name = panel_obj.get('display_name', panel_name)
if self.gene_panel(panel_name, panel_version):
raise IntegrityError("Panel {0} with version {1} already"
" exist in database".format(panel_name, panel_version))
LOG.info("loading panel {0}, version {1} to database".format(
display_name, panel_version
))
result = self.panel_collection.insert_one(panel_obj)
LOG.debug("Panel saved")
return result.inserted_id | python | {
"resource": ""
} |
q273379 | PanelHandler.panel | test | def panel(self, panel_id):
"""Fetch a gene panel by '_id'.
Args:
panel_id (str, ObjectId): str or ObjectId of document ObjectId
Returns:
dict: panel object or `None` if panel not found
"""
if not isinstance(panel_id, ObjectId):
panel_id = ObjectId(panel_id)
panel_obj = self.panel_collection.find_one({'_id': panel_id})
return panel_obj | python | {
"resource": ""
} |
q273380 | PanelHandler.delete_panel | test | def delete_panel(self, panel_obj):
"""Delete a panel by '_id'.
Args:
panel_obj(dict)
Returns:
res(pymongo.DeleteResult)
"""
res = self.panel_collection.delete_one({'_id': panel_obj['_id']})
LOG.warning("Deleting panel %s, version %s" % (panel_obj['panel_name'], panel_obj['version']))
return res | python | {
"resource": ""
} |
q273381 | PanelHandler.gene_panel | test | def gene_panel(self, panel_id, version=None):
"""Fetch a gene panel.
If no panel is sent return all panels
Args:
panel_id (str): unique id for the panel
version (str): version of the panel. If 'None' latest version will be returned
Returns:
gene_panel: gene panel object
"""
query = {'panel_name': panel_id}
if version:
LOG.info("Fetch gene panel {0}, version {1} from database".format(
panel_id, version
))
query['version'] = version
return self.panel_collection.find_one(query)
else:
LOG.info("Fetching gene panels %s from database", panel_id)
res = self.panel_collection.find(query).sort('version', -1)
if res.count() > 0:
return res[0]
else:
LOG.info("No gene panel found")
return None | python | {
"resource": ""
} |
q273382 | PanelHandler.gene_panels | test | def gene_panels(self, panel_id=None, institute_id=None, version=None):
"""Return all gene panels
If panel_id return all versions of panels by that panel name
Args:
panel_id(str)
Returns:
cursor(pymongo.cursor)
"""
query = {}
if panel_id:
query['panel_name'] = panel_id
if version:
query['version'] = version
if institute_id:
query['institute'] = institute_id
return self.panel_collection.find(query) | python | {
"resource": ""
} |
q273383 | PanelHandler.gene_to_panels | test | def gene_to_panels(self, case_obj):
"""Fetch all gene panels and group them by gene
Args:
case_obj(scout.models.Case)
Returns:
gene_dict(dict): A dictionary with gene as keys and a set of
panel names as value
"""
LOG.info("Building gene to panels")
gene_dict = {}
for panel_info in case_obj.get('panels', []):
panel_name = panel_info['panel_name']
panel_version = panel_info['version']
panel_obj = self.gene_panel(panel_name, version=panel_version)
if not panel_obj:
## Raise exception here???
LOG.warning("Panel: {0}, version {1} does not exist in database".format(panel_name, panel_version))
for gene in panel_obj['genes']:
hgnc_id = gene['hgnc_id']
if hgnc_id not in gene_dict:
gene_dict[hgnc_id] = set([panel_name])
continue
gene_dict[hgnc_id].add(panel_name)
LOG.info("Gene to panels done")
return gene_dict | python | {
"resource": ""
} |
q273384 | PanelHandler.update_panel | test | def update_panel(self, panel_obj, version=None, date_obj=None):
"""Replace a existing gene panel with a new one
Keeps the object id
Args:
panel_obj(dict)
version(float)
date_obj(datetime.datetime)
Returns:
updated_panel(dict)
"""
LOG.info("Updating panel %s", panel_obj['panel_name'])
# update date of panel to "today"
date = panel_obj['date']
if version:
LOG.info("Updating version from {0} to version {1}".format(
panel_obj['version'], version))
panel_obj['version'] = version
# Updating version should not update date
if date_obj:
date = date_obj
else:
date = date_obj or dt.datetime.now()
panel_obj['date'] = date
updated_panel = self.panel_collection.find_one_and_replace(
{'_id': panel_obj['_id']},
panel_obj,
return_document=pymongo.ReturnDocument.AFTER
)
return updated_panel | python | {
"resource": ""
} |
q273385 | PanelHandler.add_pending | test | def add_pending(self, panel_obj, hgnc_gene, action, info=None):
"""Add a pending action to a gene panel
Store the pending actions in panel.pending
Args:
panel_obj(dict): The panel that is about to be updated
hgnc_gene(dict)
action(str): choices=['add','delete','edit']
info(dict): additional gene info (disease_associated_transcripts,
reduced_penetrance, mosaicism, database_entry_version ,
inheritance_models, comment)
Returns:
updated_panel(dict):
"""
valid_actions = ['add', 'delete', 'edit']
if action not in valid_actions:
raise ValueError("Invalid action {0}".format(action))
info = info or {}
pending_action = {
'hgnc_id': hgnc_gene['hgnc_id'],
'action': action,
'info': info,
'symbol': hgnc_gene['hgnc_symbol'],
}
updated_panel = self.panel_collection.find_one_and_update(
{'_id': panel_obj['_id']},
{
'$addToSet': {
'pending': pending_action
}
},
return_document=pymongo.ReturnDocument.AFTER
)
return updated_panel | python | {
"resource": ""
} |
q273386 | PanelHandler.apply_pending | test | def apply_pending(self, panel_obj, version):
"""Apply the pending changes to an existing gene panel or create a new version of the same panel.
Args:
panel_obj(dict): panel in database to update
version(double): panel version to update
Returns:
inserted_id(str): id of updated panel or the new one
"""
updates = {}
new_panel = deepcopy(panel_obj)
new_panel['pending'] = []
new_panel['date'] = dt.datetime.now()
info_fields = ['disease_associated_transcripts', 'inheritance_models', 'reduced_penetrance',
'mosaicism', 'database_entry_version', 'comment']
new_genes = []
for update in panel_obj.get('pending', []):
hgnc_id = update['hgnc_id']
# If action is add we create a new gene object
if update['action'] != 'add':
updates[hgnc_id] = update
continue
info = update.get('info', {})
gene_obj = {
'hgnc_id': hgnc_id,
'symbol': update['symbol']
}
for field in info_fields:
if field in info:
gene_obj[field] = info[field]
new_genes.append(gene_obj)
for gene in panel_obj['genes']:
hgnc_id = gene['hgnc_id']
if hgnc_id not in updates:
new_genes.append(gene)
continue
current_update = updates[hgnc_id]
action = current_update['action']
info = current_update['info']
# If action is delete we do not add the gene to new genes
if action == 'delete':
continue
elif action == 'edit':
for field in info_fields:
if field in info:
gene[field] = info[field]
new_genes.append(gene)
new_panel['genes'] = new_genes
new_panel['version'] = float(version)
inserted_id = None
# if the same version of the panel should be updated
if new_panel['version'] == panel_obj['version']:
# replace panel_obj with new_panel
result = self.panel_collection.find_one_and_replace(
{'_id':panel_obj['_id']},
new_panel,
return_document=pymongo.ReturnDocument.AFTER
)
inserted_id = result['_id']
else: # create a new version of the same panel
new_panel.pop('_id')
# archive the old panel
panel_obj['is_archived'] = True
self.update_panel(panel_obj=panel_obj, date_obj=panel_obj['date'])
# insert the new panel
inserted_id = self.panel_collection.insert_one(new_panel).inserted_id
return inserted_id | python | {
"resource": ""
} |
q273387 | PanelHandler.clinical_symbols | test | def clinical_symbols(self, case_obj):
"""Return all the clinical gene symbols for a case."""
panel_ids = [panel['panel_id'] for panel in case_obj['panels']]
query = self.panel_collection.aggregate([
{'$match': {'_id': {'$in': panel_ids}}},
{'$unwind': '$genes'},
{'$group': {'_id': '$genes.symbol'}}
])
return set(item['_id'] for item in query) | python | {
"resource": ""
} |
q273388 | cases | test | def cases(context, case_id, institute, reruns, finished, causatives, research_requested,
is_research, status, json):
"""Interact with cases existing in the database."""
adapter = context.obj['adapter']
models = []
if case_id:
case_obj = adapter.case(case_id=case_id)
if case_obj:
models.append(case_obj)
else:
LOG.info("No case with id {}".format(case_id))
else:
models = adapter.cases(collaborator=institute, reruns=reruns,
finished=finished, has_causatives=causatives,
research_requested=research_requested,
is_research=is_research, status=status)
models = [case_obj for case_obj in models]
if len(models) == 0:
LOG.info("No cases could be found")
if json:
click.echo(dumps(models))
return
for model in models:
pp(model) | python | {
"resource": ""
} |
q273389 | TlsSMTPHandler.emit | test | def emit(self, record):
"""Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
try:
from email.utils import formatdate
except ImportError:
formatdate = self.date_time
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
','.join(self.toaddrs),
self.getSubject(record),
formatdate(), msg
)
if self.username:
smtp.ehlo() # For 'tls', add this line
smtp.starttls() # For 'tls', add this line
smtp.ehlo() # For 'tls', add this line
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record) | python | {
"resource": ""
} |
q273390 | IndexHandler.load_indexes | test | def load_indexes(self):
"""Add the proper indexes to the scout instance.
All indexes are specified in scout/constants/indexes.py
If this method is utilised when new indexes are defined those should be added
"""
for collection_name in INDEXES:
existing_indexes = self.indexes(collection_name)
indexes = INDEXES[collection_name]
for index in indexes:
index_name = index.document.get('name')
if index_name in existing_indexes:
LOG.info("Deleting old index: %s" % index_name)
self.db[collection_name].drop_index(index_name)
LOG.info("creating indexes for {0} collection: {1}".format(
collection_name,
', '.join([index.document.get('name') for index in indexes])
))
self.db[collection_name].create_indexes(indexes) | python | {
"resource": ""
} |
q273391 | IndexHandler.update_indexes | test | def update_indexes(self):
"""Update the indexes
If there are any indexes that are not added to the database, add those.
"""
LOG.info("Updating indexes...")
nr_updated = 0
for collection_name in INDEXES:
existing_indexes = self.indexes(collection_name)
indexes = INDEXES[collection_name]
for index in indexes:
index_name = index.document.get('name')
if index_name not in existing_indexes:
nr_updated += 1
LOG.info("Adding index : %s" % index_name)
self.db[collection_name].create_indexes(indexes)
if nr_updated == 0:
LOG.info("All indexes in place") | python | {
"resource": ""
} |
q273392 | IndexHandler.drop_indexes | test | def drop_indexes(self):
"""Delete all indexes for the database"""
LOG.warning("Dropping all indexe")
for collection_name in INDEXES:
LOG.warning("Dropping all indexes for collection name %s", collection_name)
self.db[collection_name].drop_indexes() | python | {
"resource": ""
} |
q273393 | QueryHandler.build_variant_query | test | def build_variant_query(self, query=None, category='snv', variant_type=['clinical']):
"""Build a mongo query across multiple cases.
Translate query options from a form into a complete mongo query dictionary.
Beware that unindexed queries against a large variant collection will
be extremely slow.
Currently indexed query options:
hgnc_symbols
rank_score
variant_type
category
Args:
query(dict): A query dictionary for the database, from a query form.
category(str): 'snv', 'sv', 'str' or 'cancer'
variant_type(str): 'clinical' or 'research'
Returns:
mongo_query : A dictionary in the mongo query format.
"""
query = query or {}
mongo_variant_query = {}
LOG.debug("Building a mongo query for %s" % query)
if query.get('hgnc_symbols'):
mongo_variant_query['hgnc_symbols'] = {'$in': query['hgnc_symbols']}
mongo_variant_query['variant_type'] = {'$in': variant_type}
mongo_variant_query['category'] = category
rank_score = query.get('rank_score') or 15
mongo_variant_query['rank_score'] = {'$gte': rank_score}
LOG.debug("Querying %s" % mongo_variant_query)
return mongo_variant_query | python | {
"resource": ""
} |
q273394 | QueryHandler.build_query | test | def build_query(self, case_id, query=None, variant_ids=None, category='snv'):
"""Build a mongo query
These are the different query options:
{
'genetic_models': list,
'chrom': str,
'thousand_genomes_frequency': float,
'exac_frequency': float,
'clingen_ngi': int,
'cadd_score': float,
'cadd_inclusive": boolean,
'genetic_models': list(str),
'hgnc_symbols': list,
'region_annotations': list,
'functional_annotations': list,
'clinsig': list,
'clinsig_confident_always_returned': boolean,
'variant_type': str(('research', 'clinical')),
'chrom': str,
'start': int,
'end': int,
'svtype': list,
'size': int,
'size_shorter': boolean,
'gene_panels': list(str),
'mvl_tag": boolean,
'decipher": boolean,
}
Arguments:
case_id(str)
query(dict): a dictionary of query filters specified by the users
variant_ids(list(str)): A list of md5 variant ids
Returns:
mongo_query : A dictionary in the mongo query format
"""
query = query or {}
mongo_query = {}
gene_query = None
##### Base query params
# set up the fundamental query params: case_id, category, type and
# restrict to list of variants (if var list is provided)
for criterion in FUNDAMENTAL_CRITERIA:
if criterion == 'case_id':
LOG.debug("Building a mongo query for %s" % case_id)
mongo_query['case_id'] = case_id
elif criterion == 'variant_ids' and variant_ids:
LOG.debug("Adding variant_ids %s to query" % ', '.join(variant_ids))
mongo_query['variant_id'] = {'$in': variant_ids}
elif criterion == 'category':
LOG.debug("Querying category %s" % category)
mongo_query['category'] = category
elif criterion == 'variant_type':
mongo_query['variant_type'] = query.get('variant_type', 'clinical')
LOG.debug("Set variant type to %s", mongo_query['variant_type'])
# Requests to filter based on gene panels, hgnc_symbols or
# coordinate ranges must always be honored. They are always added to
# query as top level, implicit '$and'. When both hgnc_symbols and a
# panel is used, addition of this is delayed until after the rest of
# the query content is clear.
elif criterion in ['hgnc_symbols', 'gene_panels'] and gene_query is None:
gene_query = self.gene_filter(query, mongo_query)
elif criterion == 'chrom' and query.get('chrom'): # filter by coordinates
self.coordinate_filter(query, mongo_query)
elif criterion == 'variant_ids' and variant_ids:
LOG.debug("Adding variant_ids %s to query" % ', '.join(variant_ids))
mongo_query['variant_id'] = {'$in': variant_ids}
##### end of fundamental query params
##### start of the custom query params
# there is only 'clinsig' criterion among the primary terms right now
primary_terms = False
# gnomad_frequency, local_obs, clingen_ngi, swegen, spidex_human, cadd_score, genetic_models, mvl_tag
# functional_annotations, region_annotations, size, svtype, decipher, depth, alt_count, control_frequency
secondary_terms = False
# check if any of the primary criteria was specified in the query
for term in PRIMARY_CRITERIA:
if query.get(term):
primary_terms = True
# check if any of the secondary criteria was specified in the query:
for term in SECONDARY_CRITERIA:
if query.get(term):
secondary_terms = True
if primary_terms is True:
clinsign_filter = self.clinsig_query(query, mongo_query)
# Secondary, excluding filter criteria will hide variants in general,
# but can be overridden by an including, major filter criteria
# such as a Pathogenic ClinSig.
if secondary_terms is True:
secondary_filter = self.secondary_query(query, mongo_query)
# If there are no primary criteria given, all secondary criteria are added as a
# top level '$and' to the query.
if primary_terms is False:
if gene_query:
mongo_query['$and'] = [ {'$or': gene_query}, {'$and': secondary_filter}]
else:
mongo_query['$and'] = secondary_filter
# If there is only one primary criterion given without any secondary, it will also be
# added as a top level '$and'.
# Otherwise, primary criteria are added as a high level '$or' and all secondary criteria
# are joined together with them as a single lower level '$and'.
if primary_terms is True: # clinsig is specified
# Given a request to always return confident clinical variants,
# add the clnsig query as a major criteria, but only
# trust clnsig entries with trusted revstat levels.
if query.get('clinsig_confident_always_returned') == True:
if gene_query:
mongo_query['$and'] = [
{'$or': gene_query},
{
'$or': [
{'$and': secondary_filter}, clinsign_filter
]
}
]
else:
mongo_query['$or'] = [ {'$and': secondary_filter}, clinsign_filter ]
else: # clisig terms are provided but no need for trusted revstat levels
secondary_filter.append(clinsign_filter)
if gene_query:
mongo_query['$and'] = [ {'$or': gene_query}, {'$and': secondary_filter}]
else:
mongo_query['$and'] = secondary_filter
elif primary_terms is True: # clisig is provided without secondary terms query
# use implicit and
mongo_query['clnsig'] = clinsign_filter['clnsig']
if gene_query:
mongo_query['$and'] = [{ '$or': gene_query }]
elif gene_query: # no primary or secondary filters provided
mongo_query['$and'] = [{ '$or': gene_query }]
LOG.info("mongo query: %s", mongo_query)
return mongo_query | python | {
"resource": ""
} |
q273395 | QueryHandler.clinsig_query | test | def clinsig_query(self, query, mongo_query):
""" Add clinsig filter values to the mongo query object
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
clinsig_query(dict): a dictionary with clinsig key-values
"""
LOG.debug('clinsig is a query parameter')
trusted_revision_level = ['mult', 'single', 'exp', 'guideline']
rank = []
str_rank = []
clnsig_query = {}
for item in query['clinsig']:
rank.append(int(item))
# search for human readable clinsig values in newer cases
rank.append(CLINSIG_MAP[int(item)])
str_rank.append(CLINSIG_MAP[int(item)])
if query.get('clinsig_confident_always_returned') == True:
LOG.debug("add CLINSIG filter with trusted_revision_level")
clnsig_query = { "clnsig":
{
'$elemMatch': {
'$or' : [
{
'$and' : [
{'value' : { '$in': rank }},
{'revstat': { '$in': trusted_revision_level }}
]
},
{
'$and': [
{'value' : re.compile('|'.join(str_rank))},
{'revstat' : re.compile('|'.join(trusted_revision_level))}
]
}
]
}
}
}
else:
LOG.debug("add CLINSIG filter for rank: %s" %
', '.join(str(query['clinsig'])))
clnsig_query = {
"clnsig":
{
'$elemMatch': {
'$or' : [
{ 'value' : { '$in': rank }},
{ 'value' : re.compile('|'.join(str_rank)) }
]
}
}
}
return clnsig_query | python | {
"resource": ""
} |
q273396 | QueryHandler.coordinate_filter | test | def coordinate_filter(self, query, mongo_query):
""" Adds genomic coordinated-related filters to the query object
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
mongo_query(dict): returned object contains coordinate filters
"""
LOG.debug('Adding genomic coordinates to the query')
chromosome = query['chrom']
mongo_query['chromosome'] = chromosome
if (query.get('start') and query.get('end')):
mongo_query['position'] = {'$lte': int(query['end'])}
mongo_query['end'] = {'$gte': int(query['start'])}
return mongo_query | python | {
"resource": ""
} |
q273397 | QueryHandler.gene_filter | test | def gene_filter(self, query, mongo_query):
""" Adds gene-related filters to the query object
Args:
query(dict): a dictionary of query filters specified by the users
mongo_query(dict): the query that is going to be submitted to the database
Returns:
mongo_query(dict): returned object contains gene and panel-related filters
"""
LOG.debug('Adding panel and genes-related parameters to the query')
gene_query = []
if query.get('hgnc_symbols') and query.get('gene_panels'):
gene_query.append({'hgnc_symbols': {'$in': query['hgnc_symbols']}})
gene_query.append({'panels': {'$in': query['gene_panels']}})
mongo_query['$or']=gene_query
else:
if query.get('hgnc_symbols'):
hgnc_symbols = query['hgnc_symbols']
mongo_query['hgnc_symbols'] = {'$in': hgnc_symbols}
LOG.debug("Adding hgnc_symbols: %s to query" %
', '.join(hgnc_symbols))
if query.get('gene_panels'):
gene_panels = query['gene_panels']
mongo_query['panels'] = {'$in': gene_panels}
return gene_query | python | {
"resource": ""
} |
q273398 | wipe | test | def wipe(ctx):
"""Drop the mongo database given."""
LOG.info("Running scout wipe")
db_name = ctx.obj['mongodb']
LOG.info("Dropping database %s", db_name)
try:
ctx.obj['client'].drop_database(db_name)
except Exception as err:
LOG.warning(err)
ctx.abort()
LOG.info("Dropped whole database") | python | {
"resource": ""
} |
q273399 | parse_panel | test | def parse_panel(csv_stream):
"""Parse user submitted panel."""
reader = csv.DictReader(csv_stream, delimiter=';', quoting=csv.QUOTE_NONE)
genes = []
for gene_row in reader:
if not gene_row['HGNC_IDnumber'].strip().isdigit():
continue
transcripts_raw = gene_row.get('Disease_associated_transcript')
if transcripts_raw:
transcripts_list = [tx.split(':', 1)[-1].strip() for tx in transcripts_raw.split(',')]
else:
transcripts_list = []
models_raw = gene_row.get('Genetic_disease_model')
models_list = [model.strip() for model in models_raw.split(',')] if models_raw else []
panel_gene = dict(
symbol=gene_row['HGNC_symbol'].strip() if gene_row.get('HGNC_symbol') else None,
hgnc_id=int(gene_row['HGNC_IDnumber'].strip()),
disease_associated_transcripts=transcripts_list,
reduced_penetrance=True if gene_row.get('Reduced_penetrance') else None,
mosaicism=True if gene_row.get('Mosaicism') else None,
inheritance_models=models_list,
database_entry_version=gene_row.get('Database_entry_version'),
)
genes.append(panel_gene)
return genes | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.