idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
250,200
def parse ( self , limit : Optional [ int ] = None ) : if limit is not None : LOG . info ( "Only parsing first %d rows" , limit ) LOG . info ( "Parsing files..." ) file_path = '/' . join ( ( self . rawdir , self . files [ 'developmental_disorders' ] [ 'file' ] ) ) with gzip . open ( file_path , 'rt' ) as csvfile : reader = csv . reader ( csvfile ) next ( reader ) # header for row in reader : if limit is None or reader . line_num <= ( limit + 1 ) : self . _add_gene_disease ( row ) else : break LOG . info ( "Done parsing." )
Here we parse each row of the gene to phenotype file
165
11
250,201
def _add_gene_disease ( self , row ) : # ::List getting syntax error here col = self . files [ 'developmental_disorders' ] [ 'columns' ] if len ( row ) != len ( col ) : raise ValueError ( "Unexpected number of fields for row {}" . format ( row ) ) variant_label = "variant of {}" . format ( row [ col . index ( 'gene_symbol' ) ] ) disease_omim_id = row [ col . index ( 'disease_omim_id' ) ] if disease_omim_id == 'No disease mim' : # check if we've manually curated disease_label = row [ col . index ( 'disease_label' ) ] if disease_label in self . mondo_map : disease_id = self . mondo_map [ disease_label ] else : return # sorry for this else : disease_id = 'OMIM:' + disease_omim_id hgnc_curie = 'HGNC:' + row [ col . index ( 'hgnc_id' ) ] relation_curie = self . resolve ( row [ col . index ( 'g2p_relation_label' ) ] ) mutation_consequence = row [ col . index ( 'mutation_consequence' ) ] if mutation_consequence not in ( 'uncertain' , '' ) : consequence_relation = self . resolve ( self . _get_consequence_predicate ( mutation_consequence ) ) consequence_curie = self . resolve ( mutation_consequence ) variant_label = "{} {}" . format ( mutation_consequence , variant_label ) else : consequence_relation = None consequence_curie = None allelic_requirement = row [ col . index ( 'allelic_requirement' ) ] if allelic_requirement != '' : requirement_curie = self . resolve ( allelic_requirement ) else : requirement_curie = None pmids = row [ col . index ( 'pmids' ) ] if pmids != '' : pmid_list = [ 'PMID:' + pmid for pmid in pmids . split ( ';' ) ] else : pmid_list = [ ] # build the model # Should we build a reusable object and/or tuple that # could be passed to a more general model builder for # this and orphanet (and maybe clinvar) self . _build_gene_disease_model ( hgnc_curie , relation_curie , disease_id , variant_label , consequence_relation , consequence_curie , requirement_curie , pmid_list )
Parse and add gene variant disease model Model building happens in _build_gene_disease_model
583
23
250,202
def _build_gene_disease_model ( self , gene_id , relation_id , disease_id , variant_label , consequence_predicate = None , consequence_id = None , allelic_requirement = None , pmids = None ) : model = Model ( self . graph ) geno = Genotype ( self . graph ) pmids = [ ] if pmids is None else pmids is_variant = False variant_or_gene = gene_id variant_id_string = variant_label variant_bnode = self . make_id ( variant_id_string , "_" ) if consequence_predicate is not None and consequence_id is not None : is_variant = True model . addTriple ( variant_bnode , consequence_predicate , consequence_id ) # Hack to add labels to terms that # don't exist in an ontology if consequence_id . startswith ( ':' ) : model . addLabel ( consequence_id , consequence_id . strip ( ':' ) . replace ( '_' , ' ' ) ) if is_variant : variant_or_gene = variant_bnode # Typically we would type the variant using the # molecular consequence, but these are not specific # enough for us to make mappings (see translation table) model . addIndividualToGraph ( variant_bnode , variant_label , self . globaltt [ 'variant_locus' ] ) geno . addAffectedLocus ( variant_bnode , gene_id ) model . addBlankNodeAnnotation ( variant_bnode ) assoc = G2PAssoc ( self . graph , self . name , variant_or_gene , disease_id , relation_id ) assoc . source = pmids assoc . add_association_to_graph ( ) if allelic_requirement is not None and is_variant is False : model . addTriple ( assoc . assoc_id , self . globaltt [ 'has_allelic_requirement' ] , allelic_requirement ) if allelic_requirement . startswith ( ':' ) : model . addLabel ( allelic_requirement , allelic_requirement . strip ( ':' ) . replace ( '_' , ' ' ) )
Builds gene variant disease model
498
6
250,203
def _get_identifiers ( self , limit ) : LOG . info ( "getting identifier mapping" ) line_counter = 0 f = '/' . join ( ( self . rawdir , self . files [ 'identifiers' ] [ 'file' ] ) ) myzip = ZipFile ( f , 'r' ) # assume that the first entry is the item fname = myzip . namelist ( ) [ 0 ] foundheader = False # TODO align this species filter with the one above # speciesfilters = 'Homo sapiens,Mus musculus,Drosophila melanogaster, # Danio rerio, Caenorhabditis elegans,Xenopus laevis'.split(',') speciesfilters = 'Homo sapiens,Mus musculus' . split ( ',' ) with myzip . open ( fname , 'r' ) as csvfile : for line in csvfile : # skip header lines if not foundheader : if re . match ( r'BIOGRID_ID' , line . decode ( ) ) : foundheader = True continue line = line . decode ( ) . strip ( ) # BIOGRID_ID # IDENTIFIER_VALUE # IDENTIFIER_TYPE # ORGANISM_OFFICIAL_NAME # 1 814566 ENTREZ_GENE Arabidopsis thaliana ( biogrid_num , id_num , id_type , organism_label ) = line . split ( '\t' ) if self . test_mode : graph = self . testgraph # skip any genes that don't match our test set if int ( biogrid_num ) not in self . biogrid_ids : continue else : graph = self . graph model = Model ( graph ) # for each one of these, # create the node and add equivalent classes biogrid_id = 'BIOGRID:' + biogrid_num prefix = self . localtt [ id_type ] # TODO make these filters available as commandline options # geneidtypefilters='NCBIGene,OMIM,MGI,FlyBase,ZFIN,MGI,HGNC, # WormBase,XenBase,ENSEMBL,miRBase'.split(',') geneidtypefilters = 'NCBIGene,MGI,ENSEMBL,ZFIN,HGNC' . split ( ',' ) # proteinidtypefilters='HPRD,Swiss-Prot,NCBIProtein' if ( speciesfilters is not None ) and ( organism_label . strip ( ) in speciesfilters ) : line_counter += 1 if ( geneidtypefilters is not None ) and ( prefix in geneidtypefilters ) : mapped_id = ':' . join ( ( prefix , id_num ) ) model . addEquivalentClass ( biogrid_id , mapped_id ) # this symbol will only get attached to the biogrid class elif id_type == 'OFFICIAL_SYMBOL' : model . addClassToGraph ( biogrid_id , id_num ) # elif (id_type == 'SYNONYM'): # FIXME - i am not sure these are synonyms, altids? # gu.addSynonym(g,biogrid_id,id_num) if not self . test_mode and limit is not None and line_counter > limit : break myzip . close ( ) return
This will process the id mapping file provided by Biogrid . The file has a very large header which we scan past then pull the identifiers and make equivalence axioms
742
35
250,204
def add_supporting_evidence ( self , evidence_line , evidence_type = None , label = None ) : self . graph . addTriple ( self . association , self . globaltt [ 'has_supporting_evidence_line' ] , evidence_line ) if evidence_type is not None : self . model . addIndividualToGraph ( evidence_line , label , evidence_type ) return
Add supporting line of evidence node to association id
86
9
250,205
def add_association_to_graph ( self ) : Assoc . add_association_to_graph ( self ) # make a blank stage if self . start_stage_id or self . end_stage_id is not None : stage_process_id = '-' . join ( ( str ( self . start_stage_id ) , str ( self . end_stage_id ) ) ) stage_process_id = '_:' + re . sub ( r':' , '' , stage_process_id ) self . model . addIndividualToGraph ( stage_process_id , None , self . globaltt [ 'developmental_process' ] ) self . graph . addTriple ( stage_process_id , self . globaltt [ 'starts during' ] , self . start_stage_id ) self . graph . addTriple ( stage_process_id , self . globaltt [ 'ends during' ] , self . end_stage_id ) self . stage_process_id = stage_process_id self . graph . addTriple ( self . assoc_id , self . globaltt [ 'has_qualifier' ] , self . stage_process_id ) if self . environment_id is not None : self . graph . addTriple ( self . assoc_id , self . globaltt [ 'has_qualifier' ] , self . environment_id ) return
Overrides Association by including bnode support
304
9
250,206
def parse ( self , limit = None ) : if limit is not None : LOG . info ( "Only parsing first %s rows fo each file" , str ( limit ) ) LOG . info ( "Parsing files..." ) self . _process_straininfo ( limit ) # the following will provide us the hash-lookups # These must be processed in a specific order # mapping between assays and ontology terms self . _process_ontology_mappings_file ( limit ) # this is the metadata about the measurements self . _process_measurements_file ( limit ) # get all the measurements per strain self . _process_strainmeans_file ( limit ) # The following will use the hash populated above # to lookup the ids when filling in the graph self . _fill_provenance_graph ( limit ) LOG . info ( "Finished parsing." ) return
MPD data is delivered in four separate csv files and one xml file which we process iteratively and write out as one large graph .
188
28
250,207
def _add_g2p_assoc ( self , graph , strain_id , sex , assay_id , phenotypes , comment ) : geno = Genotype ( graph ) model = Model ( graph ) eco_id = self . globaltt [ 'experimental phenotypic evidence' ] strain_label = self . idlabel_hash . get ( strain_id ) # strain genotype genotype_id = '_:' + '-' . join ( ( re . sub ( r':' , '' , strain_id ) , 'genotype' ) ) genotype_label = '[' + strain_label + ']' sex_specific_genotype_id = '_:' + '-' . join ( ( re . sub ( r':' , '' , strain_id ) , sex , 'genotype' ) ) if strain_label is not None : sex_specific_genotype_label = strain_label + ' (' + sex + ')' else : sex_specific_genotype_label = strain_id + '(' + sex + ')' genotype_type = self . globaltt [ 'sex_qualified_genotype' ] if sex == 'm' : genotype_type = self . globaltt [ 'male_genotype' ] elif sex == 'f' : genotype_type = self . globaltt [ 'female_genotype' ] # add the genotype to strain connection geno . addGenotype ( genotype_id , genotype_label , self . globaltt [ 'genomic_background' ] ) graph . addTriple ( strain_id , self . globaltt [ 'has_genotype' ] , genotype_id ) geno . addGenotype ( sex_specific_genotype_id , sex_specific_genotype_label , genotype_type ) # add the strain as the background for the genotype graph . addTriple ( sex_specific_genotype_id , self . globaltt [ 'has_sex_agnostic_part' ] , genotype_id ) # ############# BUILD THE G2P ASSOC ############# # TODO add more provenance info when that model is completed if phenotypes is not None : for phenotype_id in phenotypes : assoc = G2PAssoc ( graph , self . name , sex_specific_genotype_id , phenotype_id ) assoc . add_evidence ( assay_id ) assoc . add_evidence ( eco_id ) assoc . add_association_to_graph ( ) assoc_id = assoc . get_association_id ( ) model . addComment ( assoc_id , comment ) model . _addSexSpecificity ( assoc_id , self . resolve ( sex ) ) return
Create an association between a sex - specific strain id and each of the phenotypes . Here we create a genotype from the strain and a sex - specific genotype . Each of those genotypes are created as anonymous nodes .
593
45
250,208
def parse ( self , limit = None ) : if limit is not None : LOG . info ( "Only parsing first %s rows fo each file" , str ( limit ) ) LOG . info ( "Parsing files..." ) if self . test_only : self . test_mode = True # for f in ['impc', 'euro', 'mgd', '3i']: for f in [ 'all' ] : file = '/' . join ( ( self . rawdir , self . files [ f ] [ 'file' ] ) ) self . _process_data ( file , limit ) LOG . info ( "Finished parsing" ) return
IMPC data is delivered in three separate csv files OR in one integrated file each with the same file format .
139
23
250,209
def addGeneToPathway ( self , gene_id , pathway_id ) : gene_product = '_:' + re . sub ( r':' , '' , gene_id ) + 'product' self . model . addIndividualToGraph ( gene_product , None , self . globaltt [ 'gene_product' ] ) self . graph . addTriple ( gene_id , self . globaltt [ 'has gene product' ] , gene_product ) self . addComponentToPathway ( gene_product , pathway_id ) return
When adding a gene to a pathway we create an intermediate gene product that is involved in the pathway through a blank node .
117
24
250,210
def addComponentToPathway ( self , component_id , pathway_id ) : self . graph . addTriple ( component_id , self . globaltt [ 'involved in' ] , pathway_id ) return
This can be used directly when the component is directly involved in the pathway . If a transforming event is performed on the component first then the addGeneToPathway should be used instead .
46
37
250,211
def write ( self , fmt = 'turtle' , stream = None ) : fmt_ext = { 'rdfxml' : 'xml' , 'turtle' : 'ttl' , 'nt' : 'nt' , # ntriples 'nquads' : 'nq' , 'n3' : 'n3' # notation3 } # make the regular graph output file dest = None if self . name is not None : dest = '/' . join ( ( self . outdir , self . name ) ) if fmt in fmt_ext : dest = '.' . join ( ( dest , fmt_ext . get ( fmt ) ) ) else : dest = '.' . join ( ( dest , fmt ) ) LOG . info ( "Setting outfile to %s" , dest ) # make the dataset_file name, always format as turtle self . datasetfile = '/' . join ( ( self . outdir , self . name + '_dataset.ttl' ) ) LOG . info ( "Setting dataset file to %s" , self . datasetfile ) if self . dataset is not None and self . dataset . version is None : self . dataset . set_version_by_date ( ) LOG . info ( "No version for %s setting to date issued." , self . name ) else : LOG . warning ( "No output file set. Using stdout" ) stream = 'stdout' gu = GraphUtils ( None ) # the _dataset description is always turtle gu . write ( self . dataset . getGraph ( ) , 'turtle' , filename = self . datasetfile ) if self . test_mode : # unless we stop hardcoding, the test dataset is always turtle LOG . info ( "Setting testfile to %s" , self . testfile ) gu . write ( self . testgraph , 'turtle' , filename = self . testfile ) # print graph out if stream is None : outfile = dest elif stream . lower ( ) . strip ( ) == 'stdout' : outfile = None else : LOG . error ( "I don't understand our stream." ) return gu . write ( self . graph , fmt , filename = outfile )
This convenience method will write out all of the graphs associated with the source . Right now these are hardcoded to be a single graph and a src_dataset . ttl and a src_test . ttl If you do not supply stream = stdout it will default write these to files .
472
61
250,212
def declareAsOntology ( self , graph ) : # <http://data.monarchinitiative.org/ttl/biogrid.ttl> a owl:Ontology ; # owl:versionInfo # <https://archive.monarchinitiative.org/YYYYMM/ttl/biogrid.ttl> model = Model ( graph ) # is self.outfile suffix set yet??? ontology_file_id = 'MonarchData:' + self . name + ".ttl" model . addOntologyDeclaration ( ontology_file_id ) # add timestamp as version info cur_time = datetime . now ( ) t_string = cur_time . strftime ( "%Y-%m-%d" ) ontology_version = t_string # TEC this means the MonarchArchive IRI needs the release updated # maybe extract the version info from there # should not hardcode the suffix as it may change archive_url = 'MonarchArchive:' + 'ttl/' + self . name + '.ttl' model . addOWLVersionIRI ( ontology_file_id , archive_url ) model . addOWLVersionInfo ( ontology_file_id , ontology_version )
The file we output needs to be declared as an ontology including it s version information .
270
18
250,213
def remove_backslash_r ( filename , encoding ) : with open ( filename , 'r' , encoding = encoding , newline = r'\n' ) as filereader : contents = filereader . read ( ) contents = re . sub ( r'\r' , '' , contents ) with open ( filename , "w" ) as filewriter : filewriter . truncate ( ) filewriter . write ( contents )
A helpful utility to remove Carriage Return from any file . This will read a file into memory and overwrite the contents of the original file .
91
28
250,214
def load_local_translationtable ( self , name ) : localtt_file = 'translationtable/' + name + '.yaml' try : with open ( localtt_file ) : pass except IOError : # write a stub file as a place holder if none exists with open ( localtt_file , 'w' ) as write_yaml : yaml . dump ( { name : name } , write_yaml ) finally : with open ( localtt_file , 'r' ) as read_yaml : localtt = yaml . safe_load ( read_yaml ) # inverse local translation. # note: keeping this invertable will be work. # Useful to not litter an ingest with external syntax self . localtcid = { v : k for k , v in localtt . items ( ) } return localtt
Load ingest specific translation from whatever they called something to the ontology label we need to map it to . To facilitate seeing more ontology lables in dipper ingests a reverse mapping from ontology lables to external strings is also generated and available as a dict localtcid
179
56
250,215
def addGene ( self , gene_id , gene_label , gene_type = None , gene_description = None ) : if gene_type is None : gene_type = self . globaltt [ 'gene' ] self . model . addClassToGraph ( gene_id , gene_label , gene_type , gene_description ) return
genes are classes
74
4
250,216
def get_ncbi_taxon_num_by_label ( label ) : req = { 'db' : 'taxonomy' , 'retmode' : 'json' , 'term' : label } req . update ( EREQ ) request = SESSION . get ( ESEARCH , params = req ) LOG . info ( 'fetching: %s' , request . url ) request . raise_for_status ( ) result = request . json ( ) [ 'esearchresult' ] # Occasionally eutils returns the json blob # {'ERROR': 'Invalid db name specified: taxonomy'} if 'ERROR' in result : request = SESSION . get ( ESEARCH , params = req ) LOG . info ( 'fetching: %s' , request . url ) request . raise_for_status ( ) result = request . json ( ) [ 'esearchresult' ] tax_num = None if 'count' in result and str ( result [ 'count' ] ) == '1' : tax_num = result [ 'idlist' ] [ 0 ] else : # TODO throw errors LOG . warning ( 'ESEARCH for taxon label "%s" returns %s' , label , str ( result ) ) return tax_num
Here we want to look up the NCBI Taxon id using some kind of label . It will only return a result if there is a unique hit .
271
31
250,217
def set_association_id ( self , assoc_id = None ) : if assoc_id is None : self . assoc_id = self . make_association_id ( self . definedby , self . sub , self . rel , self . obj ) else : self . assoc_id = assoc_id return self . assoc_id
This will set the association ID based on the internal parts of the association . To be used in cases where an external association identifier should be used .
79
29
250,218
def make_association_id ( definedby , sub , pred , obj , attributes = None ) : items_to_hash = [ definedby , sub , pred , obj ] if attributes is not None and len ( attributes ) > 0 : items_to_hash += attributes items_to_hash = [ x for x in items_to_hash if x is not None ] assoc_id = ':' . join ( ( 'MONARCH' , GraphUtils . digest_id ( '+' . join ( items_to_hash ) ) ) ) assert assoc_id is not None return assoc_id
A method to create unique identifiers for OBAN - style associations based on all the parts of the association If any of the items is empty or None it will convert it to blank . It effectively digests the string of concatonated values . Subclasses of Assoc can submit an additional array of attributes that will be appeded to the ID .
132
69
250,219
def toRoman ( num ) : if not 0 < num < 5000 : raise ValueError ( "number %n out of range (must be 1..4999)" , num ) if int ( num ) != num : raise TypeError ( "decimals %n can not be converted" , num ) result = "" for numeral , integer in romanNumeralMap : while num >= integer : result += numeral num -= integer return result
convert integer to Roman numeral
92
7
250,220
def fromRoman ( strng ) : if not strng : raise TypeError ( 'Input can not be blank' ) if not romanNumeralPattern . search ( strng ) : raise ValueError ( 'Invalid Roman numeral: %s' , strng ) result = 0 index = 0 for numeral , integer in romanNumeralMap : while strng [ index : index + len ( numeral ) ] == numeral : result += integer index += len ( numeral ) return result
convert Roman numeral to integer
104
7
250,221
def _process_genotype_backgrounds ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing genotype backgrounds" ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'backgrounds' ] [ 'file' ] ) ) geno = Genotype ( graph ) # Add the taxon as a class taxon_id = self . globaltt [ 'Danio rerio' ] model . addClassToGraph ( taxon_id , None ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 # Genotype_ID Genotype_Name Background Background_Name ( genotype_id , genotype_name , background_id , unused ) = row if self . test_mode and genotype_id not in self . test_ids [ 'genotype' ] : continue genotype_id = 'ZFIN:' + genotype_id . strip ( ) background_id = 'ZFIN:' + background_id . strip ( ) # store this in the hash for later lookup # when building fish genotypes self . genotype_backgrounds [ genotype_id ] = background_id # add the background into the graph, # in case we haven't seen it before geno . addGenomicBackground ( background_id , None ) # hang the taxon from the background geno . addTaxon ( taxon_id , background_id ) # add the intrinsic genotype to the graph # we DO NOT ADD THE LABEL here # as it doesn't include the background geno . addGenotype ( genotype_id , None , self . globaltt [ 'intrinsic_genotype' ] ) # Add background to the intrinsic genotype geno . addGenomicBackgroundToGenotype ( background_id , genotype_id ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with genotype backgrounds" ) return
This table provides a mapping of genotypes to background genotypes Note that the background_id is also a genotype_id .
496
26
250,222
def _process_stages ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing stages" ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'stage' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( stage_id , stage_obo_id , stage_name , begin_hours , end_hours # ,empty # till next time ) = row # Add the stage as a class, and it's obo equivalent stage_id = 'ZFIN:' + stage_id . strip ( ) model . addClassToGraph ( stage_id , stage_name ) model . addEquivalentClass ( stage_id , stage_obo_id ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with stages" ) return
This table provides mappings between ZFIN stage IDs and ZFS terms and includes the starting and ending hours for the developmental stage . Currently only processing the mapping from the ZFIN stage ID to the ZFS ID .
271
43
250,223
def _process_genes ( self , limit = None ) : LOG . info ( "Processing genes" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'gene' ] [ 'file' ] ) ) geno = Genotype ( graph ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , gene_so_id , gene_symbol , ncbi_gene_id # , empty # till next time ) = row if self . test_mode and gene_id not in self . test_ids [ 'gene' ] : continue gene_id = 'ZFIN:' + gene_id . strip ( ) ncbi_gene_id = 'NCBIGene:' + ncbi_gene_id . strip ( ) self . id_label_map [ gene_id ] = gene_symbol if not self . test_mode and limit is not None and line_counter > limit : pass else : geno . addGene ( gene_id , gene_symbol ) model . addEquivalentClass ( gene_id , ncbi_gene_id ) LOG . info ( "Done with genes" ) return
This table provides the ZFIN gene id the SO type of the gene the gene symbol and the NCBI Gene ID .
342
24
250,224
def _process_features ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing features" ) line_counter = 0 geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'features' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( genomic_feature_id , feature_so_id , genomic_feature_abbreviation , genomic_feature_name , genomic_feature_type , mutagen , mutagee , construct_id , construct_name , construct_so_id , talen_crispr_id , talen_crispr_nam # , empty ) = row if self . test_mode and ( genomic_feature_id not in self . test_ids [ 'allele' ] ) : continue genomic_feature_id = 'ZFIN:' + genomic_feature_id . strip ( ) model . addIndividualToGraph ( genomic_feature_id , genomic_feature_name , feature_so_id ) model . addSynonym ( genomic_feature_id , genomic_feature_abbreviation ) if construct_id is not None and construct_id != '' : construct_id = 'ZFIN:' + construct_id . strip ( ) geno . addConstruct ( construct_id , construct_name , construct_so_id ) geno . addSequenceDerivesFrom ( genomic_feature_id , construct_id ) # Note, we don't really care about how the variant was derived. # so we skip that. # add to the id-label map self . id_label_map [ genomic_feature_id ] = genomic_feature_abbreviation self . id_label_map [ construct_id ] = construct_name if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with features" ) return
This module provides information for the intrinsic and extrinsic genotype features of zebrafish . All items here are alterations and are therefore instances .
492
29
250,225
def _process_pubinfo ( self , limit = None ) : line_counter = 0 if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'pubs' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "latin-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 try : ( pub_id , pubmed_id , authors , title , journal , year , vol , pages ) = row except ValueError : try : ( pub_id , pubmed_id , authors , title , journal , year , vol , pages # , empty ) = row except ValueError : LOG . warning ( "Error parsing row %s: " , row ) if self . test_mode and ( 'ZFIN:' + pub_id not in self . test_ids [ 'pub' ] and 'PMID:' + pubmed_id not in self . test_ids [ 'pub' ] ) : continue pub_id = 'ZFIN:' + pub_id . strip ( ) # trim the author list for ease of reading alist = re . split ( r',' , authors ) if len ( alist ) > 1 : astring = ' ' . join ( ( alist [ 0 ] . strip ( ) , 'et al' ) ) else : astring = authors pub_label = '; ' . join ( ( astring , title , journal , year , vol , pages ) ) ref = Reference ( graph , pub_id ) ref . setShortCitation ( pub_label ) ref . setYear ( year ) ref . setTitle ( title ) if pubmed_id is not None and pubmed_id != '' : # let's make an assumption that if there's a pubmed id, # that it is a journal article ref . setType ( self . globaltt [ 'journal article' ] ) pubmed_id = 'PMID:' + pubmed_id . strip ( ) rpm = Reference ( graph , pubmed_id , self . globaltt [ 'journal article' ] ) rpm . addRefToGraph ( ) model . addSameIndividual ( pub_id , pubmed_id ) model . makeLeader ( pubmed_id ) ref . addRefToGraph ( ) if not self . test_mode and limit is not None and line_counter > limit : break return
This will pull the zfin internal publication information and map them to their equivalent pmid and make labels .
559
21
250,226
def _process_pub2pubmed ( self , limit = None ) : line_counter = 0 if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'pub2pubmed' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "latin-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( pub_id , pubmed_id # , empty ) = row if self . test_mode and ( 'ZFIN:' + pub_id not in self . test_ids [ 'pub' ] and 'PMID:' + pubmed_id not in self . test_ids [ 'pub' ] ) : continue pub_id = 'ZFIN:' + pub_id . strip ( ) rtype = None if pubmed_id != '' and pubmed_id is not None : pubmed_id = 'PMID:' + pubmed_id . strip ( ) rtype = self . globaltt [ 'journal article' ] rpm = Reference ( graph , pubmed_id , rtype ) rpm . addRefToGraph ( ) model . addSameIndividual ( pub_id , pubmed_id ) ref = Reference ( graph , pub_id , rtype ) ref . addRefToGraph ( ) if not self . test_mode and limit is not None and line_counter > limit : break return
This will pull the zfin internal publication to pubmed mappings . Somewhat redundant with the process_pubinfo method but this includes additional mappings .
353
31
250,227
def _process_targeting_reagents ( self , reagent_type , limit = None ) : LOG . info ( "Processing Gene Targeting Reagents" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 model = Model ( graph ) geno = Genotype ( graph ) if reagent_type not in [ 'morph' , 'talen' , 'crispr' ] : LOG . error ( "You didn't specify the right kind of file type." ) return raw = '/' . join ( ( self . rawdir , self . files [ reagent_type ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 if reagent_type in [ 'morph' , 'crispr' ] : try : ( gene_num , gene_so_id , gene_symbol , reagent_num , reagent_so_id , reagent_symbol , reagent_sequence , publication , note ) = row except ValueError : # Catch lines without publication or note ( gene_num , gene_so_id , gene_symbol , reagent_num , reagent_so_id , reagent_symbol , reagent_sequence , publication ) = row elif reagent_type == 'talen' : ( gene_num , gene_so_id , gene_symbol , reagent_num , reagent_so_id , reagent_symbol , reagent_sequence , reagent_sequence2 , publication , note ) = row else : # should not get here return reagent_id = 'ZFIN:' + reagent_num . strip ( ) gene_id = 'ZFIN:' + gene_num . strip ( ) self . id_label_map [ reagent_id ] = reagent_symbol if self . test_mode and ( reagent_num not in self . test_ids [ 'morpholino' ] and gene_num not in self . test_ids [ 'gene' ] ) : continue geno . addGeneTargetingReagent ( reagent_id , reagent_symbol , reagent_so_id , gene_id ) # The reagent targeted gene is added # in the pheno_environment processing function. # Add publication # note that the publications can be comma-delimited, # like: ZDB-PUB-100719-4,ZDB-PUB-130703-22 if publication != '' : pubs = re . split ( r',' , publication . strip ( ) ) for pub in pubs : pub_id = 'ZFIN:' + pub . strip ( ) ref = Reference ( graph , pub_id ) ref . addRefToGraph ( ) graph . addTriple ( pub_id , self . globaltt [ 'mentions' ] , reagent_id ) # Add comment? if note != '' : model . addComment ( reagent_id , note ) # use the variant hash for reagents to list the affected genes if reagent_id not in self . variant_loci_genes : self . variant_loci_genes [ reagent_id ] = [ gene_id ] else : if gene_id not in self . variant_loci_genes [ reagent_id ] : self . variant_loci_genes [ reagent_id ] += [ gene_id ] if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with Reagent type %s" , reagent_type ) return
This method processes the gene targeting knockdown reagents such as morpholinos talens and crisprs . We create triples for the reagents and pass the data into a hash map for use in the pheno_enviro method .
828
49
250,228
def _process_uniprot_ids ( self , limit = None ) : LOG . info ( "Processing UniProt IDs" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 model = Model ( graph ) geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'uniprot' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , gene_so_id , gene_symbol , uniprot_id # , empty ) = row if self . test_mode and gene_id not in self . test_ids [ 'gene' ] : continue gene_id = 'ZFIN:' + gene_id . strip ( ) uniprot_id = 'UniProtKB:' + uniprot_id . strip ( ) geno . addGene ( gene_id , gene_symbol ) # TODO: Abstract to one of the model utilities model . addIndividualToGraph ( uniprot_id , None , self . globaltt [ 'polypeptide' ] ) graph . addTriple ( gene_id , self . globaltt [ 'has gene product' ] , uniprot_id ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with UniProt IDs" ) return
This method processes the mappings from ZFIN gene IDs to UniProtKB IDs .
366
17
250,229
def get_orthology_evidence_code ( self , abbrev ) : # AA Amino acid sequence comparison. # CE Coincident expression. # CL Conserved genome location (synteny). # FC Functional complementation. # FH Formation of functional heteropolymers. # IX Immunological cross-reaction. # NS Not specified. # NT Nucleotide sequence comparison. # SI Similar response to inhibitors. # SL Similar subcellular location. # SS Similar substrate specificity. # SU Similar subunit structure. # XH Cross-hybridization to same molecular probe. # PT Phylogenetic Tree. # OT Other eco_abbrev_map = { 'AA' : 'ECO:0000031' , # BLAST protein sequence similarity evidence 'CE' : 'ECO:0000008' , # expression evidence 'CL' : 'ECO:0000044' , # sequence similarity FIXME 'FC' : 'ECO:0000012' , # functional complementation # functional complementation in a heterologous system 'FH' : 'ECO:0000064' , 'IX' : 'ECO:0000040' , # immunological assay evidence 'NS' : None , 'NT' : 'ECO:0000032' , # nucleotide blast 'SI' : 'ECO:0000094' , # biological assay evidence FIXME 'SL' : 'ECO:0000122' , # protein localization evidence FIXME 'SS' : 'ECO:0000024' , # protein binding evidence FIXME 'SU' : 'ECO:0000027' , # structural similarity evidence 'XH' : 'ECO:0000002' , # direct assay evidence FIXME 'PT' : 'ECO:0000080' , # phylogenetic evidence 'OT' : None , } if abbrev not in eco_abbrev_map : LOG . warning ( "Evidence code for orthology (%s) not mapped" , str ( abbrev ) ) return eco_abbrev_map . get ( abbrev )
move to localtt & globltt
431
8
250,230
def _process_diseases ( self , limit = None ) : LOG . info ( "Processing diseases" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 model = Model ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'disease' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( disease_id , disease_name ) = row disease_id = 'KEGG-' + disease_id . strip ( ) if disease_id not in self . label_hash : self . label_hash [ disease_id ] = disease_name if self . test_mode and disease_id not in self . test_ids [ 'disease' ] : continue # Add the disease as a class. # we don't get all of these from MONDO yet see: # https://github.com/monarch-initiative/human-disease-ontology/issues/3 model . addClassToGraph ( disease_id , disease_name ) # not typing the diseases as DOID:4 yet because # I don't want to bulk up the graph unnecessarily if not self . test_mode and ( limit is not None and line_counter > limit ) : break LOG . info ( "Done with diseases" ) return
This method processes the KEGG disease IDs .
348
10
250,231
def _process_genes ( self , limit = None ) : LOG . info ( "Processing genes" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 family = Family ( graph ) geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'hsa_genes' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , gene_name ) = row gene_id = 'KEGG-' + gene_id . strip ( ) # the gene listing has a bunch of labels # that are delimited, as: # DST, BP240, BPA, BPAG1, CATX-15, CATX15, D6S1101, DMH, DT, # EBSB2, HSAN6, MACF2; dystonin; K10382 dystonin # it looks like the list is semicolon delimited # (symbol, name, gene_class) # where the symbol is a comma-delimited list # here, we split them up. # we will take the first abbreviation and make it the symbol # then take the rest as synonyms gene_stuff = re . split ( 'r;' , gene_name ) symbollist = re . split ( r',' , gene_stuff [ 0 ] ) first_symbol = symbollist [ 0 ] . strip ( ) if gene_id not in self . label_hash : self . label_hash [ gene_id ] = first_symbol if self . test_mode and gene_id not in self . test_ids [ 'genes' ] : continue # Add the gene as a class. geno . addGene ( gene_id , first_symbol ) # add the long name as the description if len ( gene_stuff ) > 1 : description = gene_stuff [ 1 ] . strip ( ) model . addDefinition ( gene_id , description ) # add the rest of the symbols as synonyms for i in enumerate ( symbollist , start = 1 ) : model . addSynonym ( gene_id , i [ 1 ] . strip ( ) ) if len ( gene_stuff ) > 2 : ko_part = gene_stuff [ 2 ] ko_match = re . search ( r'K\d+' , ko_part ) if ko_match is not None and len ( ko_match . groups ( ) ) == 1 : ko = 'KEGG-ko:' + ko_match . group ( 1 ) family . addMemberOf ( gene_id , ko ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with genes" ) return
This method processes the KEGG gene IDs . The label for the gene is pulled as the first symbol in the list of gene symbols ; the rest are added as synonyms . The long - form of the gene name is added as a definition . This is hardcoded to just processes human genes .
664
60
250,232
def _process_ortholog_classes ( self , limit = None ) : LOG . info ( "Processing ortholog classes" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'ortholog_classes' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( orthology_class_id , orthology_class_name ) = row if self . test_mode and orthology_class_id not in self . test_ids [ 'orthology_classes' ] : continue # The orthology class is essentially a KEGG gene ID # that is species agnostic. # Add the ID and label as a gene family class other_labels = re . split ( r'[;,]' , orthology_class_name ) # the first one is the label we'll use orthology_label = other_labels [ 0 ] orthology_class_id = 'KEGG-' + orthology_class_id . strip ( ) orthology_type = self . globaltt [ 'gene_family' ] model . addClassToGraph ( orthology_class_id , orthology_label , orthology_type ) if len ( other_labels ) > 1 : # add the rest as synonyms # todo skip the first for s in other_labels : model . addSynonym ( orthology_class_id , s . strip ( ) ) # add the last one as the description d = other_labels [ len ( other_labels ) - 1 ] model . addDescription ( orthology_class_id , d ) # add the enzyme commission number (EC:1.2.99.5)as an xref # sometimes there's two, like [EC:1.3.5.1 1.3.5.4] # can also have a dash, like EC:1.10.3.- ec_matches = re . findall ( r'((?:\d+|\.|-){5,7})' , d ) if ec_matches is not None : for ecm in ec_matches : model . addXref ( orthology_class_id , 'EC:' + ecm ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with ortholog classes" ) return
This method add the KEGG orthology classes to the graph .
593
14
250,233
def _process_orthologs ( self , raw , limit = None ) : LOG . info ( "Processing orthologs" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , orthology_class_id ) = row orthology_class_id = 'KEGG:' + orthology_class_id . strip ( ) gene_id = 'KEGG:' + gene_id . strip ( ) # note that the panther_id references a group of orthologs, # and is not 1:1 with the rest # add the KO id as a gene-family grouping class OrthologyAssoc ( graph , self . name , gene_id , None ) . add_gene_family_to_graph ( orthology_class_id ) # add gene and orthology class to graph; # assume labels will be taken care of elsewhere model . addClassToGraph ( gene_id , None ) model . addClassToGraph ( orthology_class_id , None ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with orthologs" ) return
This method maps orthologs for a species to the KEGG orthology classes .
332
18
250,234
def _process_kegg_disease2gene ( self , limit = None ) : LOG . info ( "Processing KEGG disease to gene" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 geno = Genotype ( graph ) rel = self . globaltt [ 'is marker for' ] noomimset = set ( ) raw = '/' . join ( ( self . rawdir , self . files [ 'disease_gene' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , disease_id ) = row if self . test_mode and gene_id not in self . test_ids [ 'genes' ] : continue gene_id = 'KEGG-' + gene_id . strip ( ) disease_id = 'KEGG-' + disease_id . strip ( ) # only add diseases for which # there is no omim id and not a grouping class if disease_id not in self . kegg_disease_hash : # add as a class disease_label = None if disease_id in self . label_hash : disease_label = self . label_hash [ disease_id ] if re . search ( r'includ' , str ( disease_label ) ) : # they use 'including' when it's a grouping class LOG . info ( "Skipping this association because " + "it's a grouping class: %s" , disease_label ) continue # type this disease_id as a disease model . addClassToGraph ( disease_id , disease_label ) # , class_type=self.globaltt['disease']) noomimset . add ( disease_id ) alt_locus_id = self . _make_variant_locus_id ( gene_id , disease_id ) alt_label = self . label_hash [ alt_locus_id ] model . addIndividualToGraph ( alt_locus_id , alt_label , self . globaltt [ 'variant_locus' ] ) geno . addAffectedLocus ( alt_locus_id , gene_id ) model . addBlankNodeAnnotation ( alt_locus_id ) # Add the disease to gene relationship. assoc = G2PAssoc ( graph , self . name , alt_locus_id , disease_id , rel ) assoc . add_association_to_graph ( ) if not self . test_mode and ( limit is not None and line_counter > limit ) : break LOG . info ( "Done with KEGG disease to gene" ) LOG . info ( "Found %d diseases with no omim id" , len ( noomimset ) ) return
This method creates an association between diseases and their associated genes . We are being conservative here and only processing those diseases for which there is no mapping to OMIM .
668
32
250,235
def _process_omim2gene ( self , limit = None ) : LOG . info ( "Processing OMIM to KEGG gene" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'omim2gene' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( kegg_gene_id , omim_id , link_type ) = row if self . test_mode and kegg_gene_id not in self . test_ids [ 'genes' ] : continue kegg_gene_id = 'KEGG-' + kegg_gene_id . strip ( ) omim_id = re . sub ( r'omim' , 'OMIM' , omim_id ) if link_type == 'equivalent' : # these are genes! # so add them as a class then make equivalence model . addClassToGraph ( omim_id , None ) geno . addGene ( kegg_gene_id , None ) if not DipperUtil . is_omim_disease ( omim_id ) : model . addEquivalentClass ( kegg_gene_id , omim_id ) elif link_type == 'reverse' : # make an association between an OMIM ID & the KEGG gene ID # we do this with omim ids because # they are more atomic than KEGG ids alt_locus_id = self . _make_variant_locus_id ( kegg_gene_id , omim_id ) alt_label = self . label_hash [ alt_locus_id ] model . addIndividualToGraph ( alt_locus_id , alt_label , self . globaltt [ 'variant_locus' ] ) geno . addAffectedLocus ( alt_locus_id , kegg_gene_id ) model . addBlankNodeAnnotation ( alt_locus_id ) # Add the disease to gene relationship. rel = self . globaltt [ 'is marker for' ] assoc = G2PAssoc ( graph , self . name , alt_locus_id , omim_id , rel ) assoc . add_association_to_graph ( ) elif link_type == 'original' : # these are sometimes a gene, and sometimes a disease LOG . info ( 'Unable to handle original link for %s-%s' , kegg_gene_id , omim_id ) else : # don't know what these are LOG . warning ( 'Unhandled link type for %s-%s: %s' , kegg_gene_id , omim_id , link_type ) if ( not self . test_mode ) and ( limit is not None and line_counter > limit ) : break LOG . info ( "Done with OMIM to KEGG gene" ) return
This method maps the OMIM IDs and KEGG gene ID . Currently split based on the link_type field . Equivalent link types are mapped as gene XRefs . Reverse link types are mapped as disease to gene associations . Original link types are currently skipped .
737
54
250,236
def _process_genes_kegg2ncbi ( self , limit = None ) : LOG . info ( "Processing KEGG gene IDs to NCBI gene IDs" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'ncbi' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( kegg_gene_id , ncbi_gene_id , link_type ) = row if self . test_mode and kegg_gene_id not in self . test_ids [ 'genes' ] : continue # Adjust the NCBI gene ID prefix. ncbi_gene_id = re . sub ( r'ncbi-geneid' , 'NCBIGene' , ncbi_gene_id ) kegg_gene_id = 'KEGG-' + kegg_gene_id # Adding the KEGG gene ID to the graph here is redundant, # unless there happens to be additional gene IDs in this table # not present in the genes table. model . addClassToGraph ( kegg_gene_id , None ) model . addClassToGraph ( ncbi_gene_id , None ) model . addEquivalentClass ( kegg_gene_id , ncbi_gene_id ) if not self . test_mode and ( limit is not None and line_counter > limit ) : break LOG . info ( "Done with KEGG gene IDs to NCBI gene IDs" ) return
This method maps the KEGG human gene IDs to the corresponding NCBI Gene IDs .
419
18
250,237
def _process_pathway_disease ( self , limit ) : LOG . info ( "Processing KEGG pathways to disease ids" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'pathway_disease' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( disease_id , kegg_pathway_num ) = row if self . test_mode and kegg_pathway_num not in self . test_ids [ 'pathway' ] : continue disease_id = 'KEGG-' + disease_id # will look like KEGG-path:map04130 or KEGG-path:hsa04130 pathway_id = 'KEGG-' + kegg_pathway_num graph . addTriple ( pathway_id , self . globaltt [ 'causally upstream of or within' ] , disease_id ) if not self . test_mode and limit is not None and line_counter > limit : break return
We make a link between the pathway identifiers and any diseases associated with them . Since we model diseases as processes we make a triple saying that the pathway may be causally upstream of or within the disease process .
303
41
250,238
def _make_variant_locus_id ( self , gene_id , disease_id ) : alt_locus_id = '_:' + re . sub ( r':' , '' , gene_id ) + '-' + re . sub ( r':' , '' , disease_id ) + 'VL' alt_label = self . label_hash . get ( gene_id ) disease_label = self . label_hash . get ( disease_id ) if alt_label is not None and alt_label != '' : alt_label = 'some variant of ' + str ( alt_label ) if disease_label is not None and disease_label != '' : alt_label += ' that is associated with ' + str ( disease_label ) else : alt_label = None self . label_hash [ alt_locus_id ] = alt_label return alt_locus_id
We actually want the association between the gene and the disease to be via an alternate locus not the wildtype gene itself . so we make an anonymous alternate locus and put that in the association We also make the label for the anonymous class and add it to the label hash
194
55
250,239
def _fetch_disambiguating_assoc ( self ) : disambig_file = '/' . join ( ( self . rawdir , self . static_files [ 'publications' ] [ 'file' ] ) ) assoc_file = '/' . join ( ( self . rawdir , self . files [ 'chemical_disease_interactions' ] [ 'file' ] ) ) # check if there is a local association file, # and download if it's dated later than the original intxn file if os . path . exists ( disambig_file ) : dfile_dt = os . stat ( disambig_file ) afile_dt = os . stat ( assoc_file ) if dfile_dt < afile_dt : LOG . info ( "Local file date before chem-disease assoc file. " " Downloading..." ) else : LOG . info ( "Local file date after chem-disease assoc file. " " Skipping download." ) return all_pubs = set ( ) dual_evidence = re . compile ( r'^marker\/mechanism\|therapeutic$' ) # first get all the unique publications with gzip . open ( assoc_file , 'rt' ) as tsvfile : reader = csv . reader ( tsvfile , delimiter = "\t" ) for row in reader : if re . match ( r'^#' , ' ' . join ( row ) ) : continue self . _check_list_len ( row , 10 ) ( chem_name , chem_id , cas_rn , disease_name , disease_id , direct_evidence , inferred_gene_symbol , inference_score , omim_ids , pubmed_ids ) = row if direct_evidence == '' or not re . match ( dual_evidence , direct_evidence ) : continue if pubmed_ids is not None and pubmed_ids != '' : all_pubs . update ( set ( re . split ( r'\|' , pubmed_ids ) ) ) sorted_pubs = sorted ( list ( all_pubs ) ) # now in batches of 4000, we fetch the chemical-disease associations batch_size = 4000 params = { 'inputType' : 'reference' , 'report' : 'diseases_curated' , 'format' : 'tsv' , 'action' : 'Download' } url = 'http://ctdbase.org/tools/batchQuery.go?q' start = 0 end = min ( ( batch_size , len ( all_pubs ) ) ) # get them in batches of 4000 with open ( disambig_file , 'wb' ) as dmbf : while start < len ( sorted_pubs ) : params [ 'inputTerms' ] = '|' . join ( sorted_pubs [ start : end ] ) # fetch the data from url LOG . info ( 'fetching %d (%d-%d) refs: %s' , len ( re . split ( r'\|' , params [ 'inputTerms' ] ) ) , start , end , params [ 'inputTerms' ] ) data = urllib . parse . urlencode ( params ) encoding = 'utf-8' binary_data = data . encode ( encoding ) req = urllib . request . Request ( url , binary_data ) resp = urllib . request . urlopen ( req ) dmbf . write ( resp . read ( ) ) start = end end = min ( ( start + batch_size , len ( sorted_pubs ) ) ) return
For any of the items in the chemical - disease association file that have ambiguous association types we fetch the disambiguated associations using the batch query API and store these in a file . Elsewhere we can loop through the file and create the appropriate associations .
791
51
250,240
def _make_association ( self , subject_id , object_id , rel_id , pubmed_ids ) : # TODO pass in the relevant Assoc class rather than relying on G2P assoc = G2PAssoc ( self . graph , self . name , subject_id , object_id , rel_id ) if pubmed_ids is not None and len ( pubmed_ids ) > 0 : for pmid in pubmed_ids : ref = Reference ( self . graph , pmid , self . globaltt [ 'journal article' ] ) ref . addRefToGraph ( ) assoc . add_source ( pmid ) assoc . add_evidence ( self . globaltt [ 'traceable author statement' ] ) assoc . add_association_to_graph ( ) return
Make a reified association given an array of pubmed identifiers .
175
13
250,241
def checkIfRemoteIsNewer ( self , localfile , remote_size , remote_modify ) : is_remote_newer = False status = os . stat ( localfile ) LOG . info ( "\nLocal file size: %i" "\nLocal Timestamp: %s" , status [ ST_SIZE ] , datetime . fromtimestamp ( status . st_mtime ) ) remote_dt = Bgee . _convert_ftp_time_to_iso ( remote_modify ) if remote_dt != datetime . fromtimestamp ( status . st_mtime ) or status [ ST_SIZE ] != int ( remote_size ) : is_remote_newer = True LOG . info ( "Object on server is has different size %i and/or date %s" , remote_size , remote_dt ) return is_remote_newer
Overrides checkIfRemoteIsNewer in Source class
191
12
250,242
def _convert_ftp_time_to_iso ( ftp_time ) : date_time = datetime ( int ( ftp_time [ : 4 ] ) , int ( ftp_time [ 4 : 6 ] ) , int ( ftp_time [ 6 : 8 ] ) , int ( ftp_time [ 8 : 10 ] ) , int ( ftp_time [ 10 : 12 ] ) , int ( ftp_time [ 12 : 14 ] ) ) return date_time
Convert datetime in the format 20160705042714 to a datetime object
108
17
250,243
def fetch ( self , is_dl_forced = False ) : cxn = { } cxn [ 'host' ] = 'nif-db.crbs.ucsd.edu' cxn [ 'database' ] = 'disco_crawler' cxn [ 'port' ] = '5432' cxn [ 'user' ] = config . get_config ( ) [ 'user' ] [ 'disco' ] cxn [ 'password' ] = config . get_config ( ) [ 'keys' ] [ cxn [ 'user' ] ] self . dataset . setFileAccessUrl ( 'jdbc:postgresql://' + cxn [ 'host' ] + ':' + cxn [ 'port' ] + '/' + cxn [ 'database' ] , is_object_literal = True ) # process the tables # self.fetch_from_pgdb(self.tables,cxn,100) #for testing self . fetch_from_pgdb ( self . tables , cxn ) self . get_files ( is_dl_forced ) # FIXME: Everything needed for data provenance? fstat = os . stat ( '/' . join ( ( self . rawdir , 'dvp.pr_nlx_157874_1' ) ) ) filedate = datetime . utcfromtimestamp ( fstat [ ST_CTIME ] ) . strftime ( "%Y-%m-%d" ) self . dataset . setVersion ( filedate ) return
connection details for DISCO
332
5
250,244
def parse ( self , limit = None ) : if limit is not None : LOG . info ( "Only parsing first %s rows of each file" , limit ) if self . test_only : self . test_mode = True LOG . info ( "Parsing files..." ) self . _process_nlx_157874_1_view ( '/' . join ( ( self . rawdir , 'dvp.pr_nlx_157874_1' ) ) , limit ) self . _map_eom_terms ( '/' . join ( ( self . rawdir , self . files [ 'map' ] [ 'file' ] ) ) , limit ) LOG . info ( "Finished parsing." ) # since it's so small, # we default to copying the entire graph to the test set self . testgraph = self . graph return
Over ride Source . parse inherited via PostgreSQLSource
183
13
250,245
def _process_gxd_genotype_view ( self , limit = None ) : line_counter = 0 if self . test_mode : graph = self . testgraph else : graph = self . graph geno = Genotype ( graph ) model = Model ( graph ) raw = '/' . join ( ( self . rawdir , 'gxd_genotype_view' ) ) LOG . info ( "getting genotypes and their backgrounds" ) with open ( raw , 'r' ) as f1 : f1 . readline ( ) # read the header row; skip for line in f1 : line = line . rstrip ( "\n" ) line_counter += 1 ( genotype_key , strain_key , strain , mgiid ) = line . split ( '\t' ) if self . test_mode is True : if int ( genotype_key ) not in self . test_keys . get ( 'genotype' ) : continue if self . idhash [ 'genotype' ] . get ( genotype_key ) is None : # just in case we haven't seen it before, # catch and add the id mapping here self . idhash [ 'genotype' ] [ genotype_key ] = mgiid geno . addGenotype ( mgiid , None ) # the label is elsewhere... # need to add the MGI label as a synonym # if it's in the hash, # assume that the individual was created elsewhere strain_id = self . idhash [ 'strain' ] . get ( strain_key ) background_type = self . globaltt [ 'genomic_background' ] if strain_id is None or int ( strain_key ) < 0 : if strain_id is None : # some of the strains don't have public identifiers! # so we make one up, and add it to the hash strain_id = self . _makeInternalIdentifier ( 'strain' , strain_key ) self . idhash [ 'strain' ] . update ( { strain_key : strain_id } ) model . addComment ( strain_id , "strain_key:" + strain_key ) elif int ( strain_key ) < 0 : # these are ones that are unidentified/unknown. # so add instances of each. strain_id = self . _makeInternalIdentifier ( 'strain' , re . sub ( r':' , '' , str ( strain_id ) ) ) strain_id += re . sub ( r':' , '' , str ( mgiid ) ) strain_id = re . sub ( r'^_' , '_:' , strain_id ) strain_id = re . sub ( r'::' , ':' , strain_id ) model . addDescription ( strain_id , "This genomic background is unknown. " + "This is a placeholder background for " + mgiid + "." ) background_type = self . globaltt [ 'unspecified_genomic_background' ] # add it back to the idhash LOG . info ( "adding background as internal id: %s %s: %s" , strain_key , strain , strain_id ) geno . addGenomicBackgroundToGenotype ( strain_id , mgiid , background_type ) self . label_hash [ strain_id ] = strain # add BG to a hash so we can build the genotype label later self . geno_bkgd [ mgiid ] = strain_id if not self . test_mode and limit is not None and line_counter > limit : break return
This table indicates the relationship between a genotype and it s background strain . It leverages the Genotype class methods to do this .
765
27
250,246
def _process_gxd_genotype_summary_view ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 geno_hash = { } raw = '/' . join ( ( self . rawdir , 'gxd_genotype_summary_view' ) ) LOG . info ( "building labels for genotypes" ) with open ( raw , 'r' ) as f : f . readline ( ) # read the header row; skip for line in f : line = line . rstrip ( "\n" ) line_counter += 1 ( object_key , preferred , mgiid , subtype , short_description ) = line . split ( '\t' ) if self . test_mode is True : if int ( object_key ) not in self . test_keys . get ( 'genotype' ) : continue # add the internal genotype to mgi mapping self . idhash [ 'genotype' ] [ object_key ] = mgiid if preferred == '1' : d = re . sub ( r'\,' , '/' , short_description . strip ( ) ) if mgiid not in geno_hash : geno_hash [ mgiid ] = { 'vslcs' : [ d ] , 'subtype' : subtype , 'key' : object_key } else : vslcs = geno_hash [ mgiid ] . get ( 'vslcs' ) vslcs . append ( d ) else : pass # TODO what to do with != preferred if not self . test_mode and limit is not None and line_counter > limit : break # now, loop through the hash and add the genotypes as individuals # we add the mgi genotype as a synonym # (we generate our own label later) geno = Genotype ( graph ) for gt in geno_hash : genotype = geno_hash . get ( gt ) gvc = sorted ( genotype . get ( 'vslcs' ) ) label = '; ' . join ( gvc ) + ' [' + genotype . get ( 'subtype' ) + ']' geno . addGenotype ( gt , None ) model . addComment ( gt , self . _makeInternalIdentifier ( 'genotype' , genotype . get ( 'key' ) ) ) model . addSynonym ( gt , label . strip ( ) ) return
Add the genotype internal id to mgiid mapping to the idhashmap . Also add them as individuals to the graph . We re - format the label to put the background strain in brackets after the gvc .
544
44
250,247
def process_mgi_relationship_transgene_genes ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph LOG . info ( "getting transgene genes" ) raw = '/' . join ( ( self . rawdir , 'mgi_relationship_transgene_genes' ) ) geno = Genotype ( graph ) col = [ 'rel_key' , 'allele_key' , 'allele_id' , 'allele_label' , 'category_key' , 'category_name' , 'property_key' , 'property_name' , 'gene_num' ] with open ( raw , 'r' , encoding = "utf8" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) header = next ( filereader ) if header != col : LOG . error ( 'expected columns: %s\n\tBut got:\n%s' , col , header ) for row in filereader : # rel_key, allele_key = int ( row [ col . index ( 'allele_key' ) ] ) allele_id = row [ col . index ( 'allele_id' ) ] # allele_label, # category_key, # category_name, # property_key, # property_name, gene_num = int ( row [ col . index ( 'gene_num' ) ] ) if self . test_mode and allele_key not in self . test_keys . get ( 'allele' ) and gene_num not in self . test_ids : continue gene_id = 'NCBIGene:' + str ( gene_num ) # geno.addParts(gene_id, allele_id, self.globaltt['has_variant_part']) seqalt_id = self . idhash [ 'seqalt' ] . get ( allele_key ) if seqalt_id is None : seqalt_id = allele_id geno . addSequenceDerivesFrom ( seqalt_id , gene_id ) if not self . test_mode and limit is not None and filereader . line_num > limit : break return
Here we have the relationship between MGI transgene alleles and the non - mouse gene ids that are part of them . We augment the allele with the transgene parts .
499
38
250,248
def _getnode ( self , curie ) : # convention is lowercase names node = None if curie [ 0 ] == '_' : if self . are_bnodes_skized is True : node = self . skolemizeBlankNode ( curie ) else : # delete the leading underscore to make it cleaner node = BNode ( re . sub ( r'^_:|^_' , '' , curie , 1 ) ) # Check if curie string is actually an IRI elif curie [ : 4 ] == 'http' or curie [ : 3 ] == 'ftp' : node = URIRef ( curie ) else : iri = RDFGraph . curie_util . get_uri ( curie ) if iri is not None : node = URIRef ( RDFGraph . curie_util . get_uri ( curie ) ) # Bind prefix map to graph prefix = curie . split ( ':' ) [ 0 ] if prefix not in self . namespace_manager . namespaces ( ) : mapped_iri = self . curie_map [ prefix ] self . bind ( prefix , Namespace ( mapped_iri ) ) else : LOG . error ( "couldn't make URI for %s" , curie ) return node
This is a wrapper for creating a URIRef or Bnode object with a given a curie or iri as a string .
276
27
250,249
def add_association_to_graph ( self ) : # add the basic association nodes # if rel == self.globaltt[['has disposition']: Assoc . add_association_to_graph ( self ) # anticipating trouble with onsets ranges that look like curies if self . onset is not None and self . onset != '' : self . graph . addTriple ( self . assoc_id , self . globaltt [ 'onset' ] , self . onset ) if self . frequency is not None and self . frequency != '' : self . graph . addTriple ( self . assoc_id , self . globaltt [ 'frequency' ] , self . frequency ) return
The reified relationship between a disease and a phenotype is decorated with some provenance information . This makes the assumption that both the disease and phenotype are classes .
147
31
250,250
def make_parent_bands ( self , band , child_bands ) : m = re . match ( r'([pq][A-H\d]+(?:\.\d+)?)' , band ) if len ( band ) > 0 : if m : p = str ( band [ 0 : len ( band ) - 1 ] ) p = re . sub ( r'\.$' , '' , p ) if p is not None : child_bands . add ( p ) self . make_parent_bands ( p , child_bands ) else : child_bands = set ( ) return child_bands
this will determine the grouping bands that it belongs to recursively 13q21 . 31 == > 13 13q 13q2 13q21 13q21 . 3 13q21 . 31
130
39
250,251
def get_curie ( self , uri ) : prefix = self . get_curie_prefix ( uri ) if prefix is not None : key = self . curie_map [ prefix ] return '%s:%s' % ( prefix , uri [ len ( key ) : len ( uri ) ] ) return None
Get a CURIE from a URI
72
8
250,252
def get_uri ( self , curie ) : if curie is None : return None parts = curie . split ( ':' ) if len ( parts ) == 1 : if curie != '' : LOG . error ( "Not a properly formed curie: \"%s\"" , curie ) return None prefix = parts [ 0 ] if prefix in self . curie_map : return '%s%s' % ( self . curie_map . get ( prefix ) , curie [ ( curie . index ( ':' ) + 1 ) : ] ) LOG . error ( "Curie prefix not defined for %s" , curie ) return None
Get a URI from a CURIE
141
8
250,253
def fetch ( self , is_dl_forced = False ) : host = config . get_config ( ) [ 'dbauth' ] [ 'coriell' ] [ 'host' ] key = config . get_config ( ) [ 'dbauth' ] [ 'coriell' ] [ 'private_key' ] user = config . get_config ( ) [ 'user' ] [ 'coriell' ] passwd = config . get_config ( ) [ 'keys' ] [ user ] with pysftp . Connection ( host , username = user , password = passwd , private_key = key ) as sftp : # check to make sure each file is in there # get the remote files remote_files = sftp . listdir_attr ( ) files_by_repo = { } for attr in remote_files : # for each catalog, get the most-recent filename mch = re . match ( '(NIGMS|NIA|NHGRI|NINDS)' , attr . filename ) if mch is not None and len ( mch . groups ( ) ) > 0 : # there should just be one now files_by_repo [ mch . group ( 1 ) ] = attr # sort each array in hash, # & get the name and time of the most-recent file for each catalog for rmt in self . files : LOG . info ( "Checking on %s catalog file" , rmt ) fname = self . files [ rmt ] [ 'file' ] remotef = files_by_repo [ rmt ] target_name = '/' . join ( ( self . rawdir , fname ) ) # check if the local file is out of date, if so, download. # otherwise, skip. # we rename (for simplicity) the original file fstat = None if os . path . exists ( target_name ) : fstat = os . stat ( target_name ) LOG . info ( "Local file date: %s" , datetime . utcfromtimestamp ( fstat [ stat . ST_CTIME ] ) ) if fstat is None or remotef . st_mtime > fstat [ stat . ST_CTIME ] : if fstat is None : LOG . info ( "File does not exist locally; downloading..." ) else : LOG . info ( "New version of %s catalog available; downloading..." , rmt ) sftp . get ( remotef . filename , target_name ) LOG . info ( "Fetched remote %s -> %s" , remotef . filename , target_name ) fstat = os . stat ( target_name ) filedate = datetime . utcfromtimestamp ( remotef . st_mtime ) . strftime ( "%Y-%m-%d" ) LOG . info ( "New file date: %s" , datetime . utcfromtimestamp ( fstat [ stat . ST_CTIME ] ) ) else : LOG . info ( "File %s exists; using local copy" , fname ) filedate = datetime . utcfromtimestamp ( fstat [ stat . ST_CTIME ] ) . strftime ( "%Y-%m-%d" ) self . dataset . setFileAccessUrl ( remotef . filename , True ) self . dataset . setVersion ( filedate ) return
Here we connect to the coriell sftp server using private connection details . They dump bi - weekly files with a timestamp in the filename . For each catalog we ping the remote site and pull the most - recently updated file renaming it to our local latest . csv .
733
57
250,254
def _process_collection ( self , collection_id , label , page ) : # ############# BUILD THE CELL LINE REPOSITORY ############# for graph in [ self . graph , self . testgraph ] : # TODO: How to devise a label for each repository? model = Model ( graph ) reference = Reference ( graph ) repo_id = 'CoriellCollection:' + collection_id repo_label = label repo_page = page model . addIndividualToGraph ( repo_id , repo_label , self . globaltt [ 'collection' ] ) reference . addPage ( repo_id , repo_page ) return
This function will process the data supplied internally about the repository from Coriell .
136
16
250,255
def _process_genotypes ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , 'genotype' ) ) LOG . info ( "building labels for genotypes" ) geno = Genotype ( graph ) fly_tax = self . globaltt [ 'Drosophila melanogaster' ] with open ( raw , 'r' ) as f : f . readline ( ) # read the header row; skip filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) for line in filereader : line_counter += 1 ( genotype_num , uniquename , description , name ) = line # if self.test_mode is True: # if int(object_key) not in self.test_keys.get('genotype'): # continue # add the internal genotype to pub mapping genotype_id = 'MONARCH:FBgeno' + str ( genotype_num ) self . idhash [ 'genotype' ] [ genotype_num ] = genotype_id if description == '' : description = None if not self . test_mode and limit is not None and line_counter > limit : pass else : if self . test_mode and int ( genotype_num ) not in self . test_keys [ 'genotype' ] : continue model . addIndividualToGraph ( genotype_id , uniquename , self . globaltt [ 'intrinsic_genotype' ] , description ) # we know all genotypes are in flies # FIXME we assume here they are in melanogaster, # but that isn't necessarily true!!! # TODO should the taxon be == genomic background? geno . addTaxon ( fly_tax , genotype_id ) genotype_iid = self . _makeInternalIdentifier ( 'genotype' , genotype_num ) model . addComment ( genotype_id , genotype_iid ) if name . strip ( ) != '' : model . addSynonym ( genotype_id , name ) return
Add the genotype internal id to flybase mapping to the idhashmap . Also add them as individuals to the graph .
477
25
250,256
def _process_stocks ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , 'stock' ) ) LOG . info ( "building labels for stocks" ) with open ( raw , 'r' ) as f : f . readline ( ) # read the header row; skip filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) for line in filereader : line_counter += 1 ( stock_id , dbxref_id , organism_id , name , uniquename , description , type_id , is_obsolete ) = line # 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670 stock_num = stock_id stock_id = 'FlyBase:' + uniquename self . idhash [ 'stock' ] [ stock_num ] = stock_id stock_label = description organism_key = organism_id taxon = self . idhash [ 'organism' ] [ organism_key ] # from what i can tell, the dbxrefs are just more FBst, # so no added information vs uniquename if not self . test_mode and limit is not None and line_counter > limit : pass else : if self . test_mode and int ( stock_num ) not in self . test_keys [ 'strain' ] : continue # tax_label = self.label_hash[taxon] # unused # add the tax in case it hasn't been already model . addClassToGraph ( taxon ) model . addIndividualToGraph ( stock_id , stock_label , taxon ) if is_obsolete == 't' : model . addDeprecatedIndividual ( stock_id ) return
Stock definitions . Here we instantiate them as instances of the given taxon .
422
16
250,257
def _process_pubs ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , 'pub' ) ) LOG . info ( "building labels for pubs" ) with open ( raw , 'r' ) as f : f . readline ( ) # read the header row; skip filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) for line in filereader : ( pub_id , title , volumetitle , volume , series_name , issue , pyear , pages , miniref , type_id , is_obsolete , publisher , pubplace , uniquename ) = line # 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670 # if self.test_mode is True: # if int(object_key) not in self.test_keys.get('genotype'): # continue pub_num = pub_id pub_id = 'FlyBase:' + uniquename . strip ( ) self . idhash [ 'publication' ] [ pub_num ] = pub_id # TODO figure out the type of pub by type_id if not re . match ( r'(FBrf|multi)' , uniquename ) : continue line_counter += 1 reference = Reference ( graph , pub_id ) if title != '' : reference . setTitle ( title ) if pyear != '' : reference . setYear ( str ( pyear ) ) if miniref != '' : reference . setShortCitation ( miniref ) if not self . test_mode and limit is not None and line_counter > limit : pass else : if self . test_mode and int ( pub_num ) not in self . test_keys [ 'pub' ] : continue if is_obsolete == 't' : model . addDeprecatedIndividual ( pub_id ) else : reference . addRefToGraph ( ) return
Flybase publications .
466
4
250,258
def _process_environments ( self ) : if self . test_mode : graph = self . testgraph else : graph = self . graph raw = '/' . join ( ( self . rawdir , 'environment' ) ) LOG . info ( "building labels for environment" ) env_parts = { } label_map = { } env = Environment ( graph ) with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) # read the header row; skip for line in filereader : ( environment_id , uniquename , description ) = line # 22 heat sensitive | tetracycline conditional environment_num = environment_id environment_internal_id = self . _makeInternalIdentifier ( 'environment' , environment_num ) if environment_num not in self . idhash [ 'environment' ] : self . idhash [ 'environment' ] [ environment_num ] = environment_internal_id environment_id = self . idhash [ 'environment' ] [ environment_num ] environment_label = uniquename if environment_label == 'unspecified' : environment_label += ' environment' env . addEnvironment ( environment_id , environment_label ) self . label_hash [ environment_id ] = environment_label # split up the environment into parts # if there's parts, then add them to the hash; # we'll match the components in a second pass components = re . split ( r'\|' , uniquename ) if len ( components ) > 1 : env_parts [ environment_id ] = components else : label_map [ environment_label ] = environment_id # ### end loop through file # build the environmental components for eid in env_parts : eid = eid . strip ( ) for e in env_parts [ eid ] : # search for the environmental component by label env_id = label_map . get ( e . strip ( ) ) env . addComponentToEnvironment ( eid , env_id ) return
There s only about 30 environments in which the phenotypes are recorded . There are no externally accessible identifiers for environments so we make anonymous nodes for now . Some of the environments are comprised of > 1 of the other environments ; we do some simple parsing to match the strings of the environmental labels to the other atomic components .
447
63
250,259
def _process_stock_genotype ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph raw = '/' . join ( ( self . rawdir , 'stock_genotype' ) ) LOG . info ( "processing stock genotype" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) # read the header row; skip for line in filereader : ( stock_genotype_id , stock_id , genotype_id ) = line stock_key = stock_id stock_id = self . idhash [ 'stock' ] [ stock_key ] genotype_key = genotype_id genotype_id = self . idhash [ 'genotype' ] [ genotype_key ] if self . test_mode and int ( genotype_key ) not in self . test_keys [ 'genotype' ] : continue graph . addTriple ( stock_id , self . globaltt [ 'has_genotype' ] , genotype_id ) line_counter += 1 if not self . test_mode and limit is not None and line_counter > limit : break return
The genotypes of the stocks .
284
7
250,260
def _process_dbxref ( self ) : raw = '/' . join ( ( self . rawdir , 'dbxref' ) ) LOG . info ( "processing dbxrefs" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) # read the header row; skip for line in filereader : ( dbxref_id , db_id , accession , version , description , url ) = line # dbxref_id db_id accession version description url # 1 2 SO:0000000 "" accession = accession . strip ( ) db_id = db_id . strip ( ) if accession != '' and db_id in self . localtt : # scrub some identifiers here mch = re . match ( r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):' , accession ) if mch : accession = re . sub ( mch . group ( 1 ) + r'\:' , '' , accession ) elif re . match ( r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)' , accession ) : continue elif re . match ( r'\:' , accession ) : # starts with a colon accession = re . sub ( r'\:' , '' , accession ) elif re . search ( r'\s' , accession ) : # skip anything with a space # LOG.debug( # 'dbxref %s accession has a space: %s', dbxref_id, accession) continue if re . match ( r'http' , accession ) : did = accession else : prefix = self . localtt [ db_id ] did = ':' . join ( ( prefix , accession ) ) if re . search ( r'\:' , accession ) and prefix != 'DOI' : LOG . warning ( 'id %s may be malformed; skipping' , did ) self . dbxrefs [ dbxref_id ] = { db_id : did } elif url != '' : self . dbxrefs [ dbxref_id ] = { db_id : url . strip ( ) } else : continue # the following are some special cases that we scrub if int ( db_id ) == 2 and accession . strip ( ) == 'transgenic_transposon' : # transgenic_transposable_element self . dbxrefs [ dbxref_id ] = { db_id : self . globaltt [ 'transgenic_transposable_element' ] } line_counter += 1 return
We bring in the dbxref identifiers and store them in a hashmap for lookup in other functions . Note that some dbxrefs aren t mapped to identifiers . For example 5004018 is mapped to a string endosome & imaginal disc epithelial cell | somatic clone ... In those cases there just isn t a dbxref that s used when referencing with a cvterm ; it ll just use the internal key .
614
89
250,261
def _process_phenotype ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , 'phenotype' ) ) LOG . info ( "processing phenotype" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) # read the header row; skip for line in filereader : ( phenotype_id , uniquename , observable_id , attr_id , value , cvalue_id , assay_id ) = line # 8505 unspecified # 20142 mesothoracic leg disc | somatic clone 87719 60468 60468 60468 # 8507 sex comb | ectopic 88877 60468 60468 60468 # 8508 tarsal segment 83664 60468 60468 60468 # 18404 oocyte | oogenesis stage S9 86769 60468 60468 60468 # for now make these as phenotypic classes # will need to dbxref at some point phenotype_key = phenotype_id phenotype_id = None phenotype_internal_id = self . _makeInternalIdentifier ( 'phenotype' , phenotype_key ) phenotype_label = None self . label_hash [ phenotype_internal_id ] = uniquename cvterm_id = None if observable_id != '' and int ( observable_id ) == 60468 : # undefined - typically these are already phenotypes if cvalue_id in self . idhash [ 'cvterm' ] : cvterm_id = self . idhash [ 'cvterm' ] [ cvalue_id ] phenotype_id = self . idhash [ 'cvterm' ] [ cvalue_id ] elif observable_id in self . idhash [ 'cvterm' ] : # observations to anatomical classes cvterm_id = self . idhash [ 'cvterm' ] [ observable_id ] phenotype_id = self . idhash [ 'cvterm' ] [ observable_id ] + 'PHENOTYPE' if cvterm_id is not None and cvterm_id in self . label_hash : phenotype_label = self . label_hash [ cvterm_id ] phenotype_label += ' phenotype' self . label_hash [ phenotype_id ] = phenotype_label else : LOG . info ( 'cvtermid=%s not in label_hash' , cvterm_id ) else : LOG . info ( "No observable id or label for %s: %s" , phenotype_key , uniquename ) # TODO store this composite phenotype in some way # as a proper class definition? self . idhash [ 'phenotype' ] [ phenotype_key ] = phenotype_id # assay_id is currently only "undefined" key=60468 if not self . test_mode and limit is not None and line_counter > limit : pass else : if phenotype_id is not None : # assume that these fit into the phenotypic uberpheno # elsewhere model . addClassToGraph ( phenotype_id , phenotype_label ) line_counter += 1 return
Get the phenotypes and declare the classes . If the observable is unspecified then we assign the phenotype to the cvalue id ; otherwise we convert the phenotype into a uberpheno - style identifier simply based on the anatomical part that s affected ... that is listed as the observable_id concatenated with the literal PHENOTYPE
719
67
250,262
def _process_cvterm ( self ) : line_counter = 0 raw = '/' . join ( ( self . rawdir , 'cvterm' ) ) LOG . info ( "processing cvterms" ) with open ( raw , 'r' ) as f : f . readline ( ) # read the header row; skip filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) for line in filereader : line_counter += 1 ( cvterm_id , cv_id , definition , dbxref_id , is_obsolete , is_relationshiptype , name ) = line # 316 6 1665919 0 0 rRNA_cleavage_snoRNA_primary_transcript # 28 5 1663309 0 0 synonym # 455 6 1665920 0 0 tmRNA # not sure the following is necessary # cv_prefixes = { # 6 : 'SO', # 20: 'FBcv', # 28: 'GO', # 29: 'GO', # 30: 'GO', # 31: 'FBcv', # not actually FBcv - I think FBbt. # 32: 'FBdv', # 37: 'GO', # these are relationships # 73: 'DOID' # } # if int(cv_id) not in cv_prefixes: # continue cvterm_key = cvterm_id cvterm_id = self . _makeInternalIdentifier ( 'cvterm' , cvterm_key ) self . label_hash [ cvterm_id ] = name self . idhash [ 'cvterm' ] [ cvterm_key ] = cvterm_id # look up the dbxref_id for the cvterm # hopefully it's one-to-one dbxrefs = self . dbxrefs . get ( dbxref_id ) if dbxrefs is not None : if len ( dbxrefs ) > 1 : LOG . info ( ">1 dbxref for this cvterm (%s: %s): %s" , str ( cvterm_id ) , name , dbxrefs . values ( ) ) elif len ( dbxrefs ) == 1 : # replace the cvterm with # the dbxref (external) identifier did = dbxrefs . popitem ( ) [ 1 ] # get the value self . idhash [ 'cvterm' ] [ cvterm_key ] = did # also add the label to the dbxref self . label_hash [ did ] = name return
CVterms are the internal identifiers for any controlled vocab or ontology term . Many are xrefd to actual ontologies . The actual external id is stored in the dbxref table which we place into the internal hashmap for lookup with the cvterm id . The name of the external term is stored in the name element of this table and we add that to the label hashmap for lookup elsewhere
561
82
250,263
def _process_organisms ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , 'organism' ) ) LOG . info ( "processing organisms" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) # read the header row; skip for line in filereader : ( organism_id , abbreviation , genus , species , common_name , comment ) = line # 1 Dmel Drosophila melanogaster fruit fly # 2 Comp Computational result line_counter += 1 tax_internal_id = self . _makeInternalIdentifier ( 'organism' , organism_id ) tax_label = ' ' . join ( ( genus , species ) ) tax_id = tax_internal_id self . idhash [ 'organism' ] [ organism_id ] = tax_id self . label_hash [ tax_id ] = tax_label # we won't actually add the organism to the graph, # unless we actually use it therefore it is added outside of # this function if self . test_mode and int ( organism_id ) not in self . test_keys [ 'organism' ] : continue if not self . test_mode and limit is not None and line_counter > limit : pass else : model . addClassToGraph ( tax_id ) for s in [ common_name , abbreviation ] : if s is not None and s . strip ( ) != '' : model . addSynonym ( tax_id , s ) model . addComment ( tax_id , tax_internal_id ) return
The internal identifiers for the organisms in flybase
392
9
250,264
def _add_gene_equivalencies ( self , xrefs , gene_id , taxon ) : clique_map = self . open_and_parse_yaml ( self . resources [ 'clique_leader' ] ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) filter_out = [ 'Vega' , 'IMGT/GENE-DB' , 'Araport' ] # deal with the dbxrefs # MIM:614444|HGNC:HGNC:16851|Ensembl:ENSG00000136828|HPRD:11479|Vega:OTTHUMG00000020696 for dbxref in xrefs . strip ( ) . split ( '|' ) : prefix = ':' . join ( dbxref . split ( ':' ) [ : - 1 ] ) . strip ( ) if prefix in self . localtt : prefix = self . localtt [ prefix ] dbxref_curie = ':' . join ( ( prefix , dbxref . split ( ':' ) [ - 1 ] ) ) if dbxref_curie is not None and prefix != '' : if prefix == 'HPRD' : # proteins are not == genes. model . addTriple ( gene_id , self . globaltt [ 'has gene product' ] , dbxref_curie ) continue # skip some of these for now based on curie prefix if prefix in filter_out : continue if prefix == 'ENSEMBL' : model . addXref ( gene_id , dbxref_curie ) if prefix == 'OMIM' : if DipperUtil . is_omim_disease ( dbxref_curie ) : continue try : if self . class_or_indiv . get ( gene_id ) == 'C' : model . addEquivalentClass ( gene_id , dbxref_curie ) if taxon in clique_map : if clique_map [ taxon ] == prefix : model . makeLeader ( dbxref_curie ) elif clique_map [ taxon ] == gene_id . split ( ':' ) [ 0 ] : model . makeLeader ( gene_id ) else : model . addSameIndividual ( gene_id , dbxref_curie ) except AssertionError as err : LOG . warning ( "Error parsing %s: %s" , gene_id , err ) return
Add equivalentClass and sameAs relationships
546
7
250,265
def _get_gene2pubmed ( self , limit ) : src_key = 'gene2pubmed' if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing Gene records" ) line_counter = 0 myfile = '/' . join ( ( self . rawdir , self . files [ src_key ] [ 'file' ] ) ) LOG . info ( "FILE: %s" , myfile ) assoc_counter = 0 col = self . files [ src_key ] [ 'columns' ] with gzip . open ( myfile , 'rb' ) as tsv : row = tsv . readline ( ) . decode ( ) . strip ( ) . split ( '\t' ) row [ 0 ] = row [ 0 ] [ 1 : ] # strip comment if col != row : LOG . info ( '%s\nExpected Headers:\t%s\nRecived Headers:\t %s\n' , src_key , col , row ) for line in tsv : line_counter += 1 # skip comments row = line . decode ( ) . strip ( ) . split ( '\t' ) if row [ 0 ] [ 0 ] == '#' : continue # (tax_num, gene_num, pubmed_num) = line.split('\t') # ## set id_filter=None in init if you don't want to have a filter # if self.id_filter is not None: # if ((self.id_filter == 'taxids' and \ # (int(tax_num) not in self.tax_ids)) # or (self.id_filter == 'geneids' and \ # (int(gene_num) not in self.gene_ids))): # continue # #### end filter gene_num = row [ col . index ( 'GeneID' ) ] . strip ( ) if self . test_mode and int ( gene_num ) not in self . gene_ids : continue tax_num = row [ col . index ( 'tax_id' ) ] . strip ( ) if not self . test_mode and tax_num not in self . tax_ids : continue pubmed_num = row [ col . index ( 'PubMed_ID' ) ] . strip ( ) if gene_num == '-' or pubmed_num == '-' : continue gene_id = ':' . join ( ( 'NCBIGene' , gene_num ) ) pubmed_id = ':' . join ( ( 'PMID' , pubmed_num ) ) if self . class_or_indiv . get ( gene_id ) == 'C' : model . addClassToGraph ( gene_id , None ) else : model . addIndividualToGraph ( gene_id , None ) # add the publication as a NamedIndividual # add type publication model . addIndividualToGraph ( pubmed_id , None , None ) reference = Reference ( graph , pubmed_id , self . globaltt [ 'journal article' ] ) reference . addRefToGraph ( ) graph . addTriple ( pubmed_id , self . globaltt [ 'is_about' ] , gene_id ) assoc_counter += 1 if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Processed %d pub-gene associations" , assoc_counter ) return
Loops through the gene2pubmed file and adds a simple triple to say that a given publication is_about a gene . Publications are added as NamedIndividuals .
756
34
250,266
def _process_all ( self , limit ) : omimids = self . _get_omim_ids ( ) LOG . info ( 'Have %i omim numbers to fetch records from their API' , len ( omimids ) ) LOG . info ( 'Have %i omim types ' , len ( self . omim_type ) ) if self . test_mode : graph = self . testgraph else : graph = self . graph geno = Genotype ( graph ) model = Model ( graph ) tax_label = 'Homo sapiens' tax_id = self . globaltt [ tax_label ] # add genome and taxon geno . addGenome ( tax_id , tax_label ) # tax label can get added elsewhere model . addClassToGraph ( tax_id , None ) # label added elsewhere includes = set ( ) includes . add ( 'all' ) self . process_entries ( omimids , self . _transform_entry , includes , graph , limit , self . globaltt )
This takes the list of omim identifiers from the omim . txt . Z file and iteratively queries the omim api for the json - formatted data . This will create OMIM classes with the label definition and some synonyms . If an entry is removed it is added as a deprecated class . If an entry is moved it is deprecated and consider annotations are added .
219
75
250,267
def update ( self , key : bytes , value : bytes , node_updates : Sequence [ Hash32 ] ) : validate_is_bytes ( key ) validate_length ( key , self . _key_size ) # Path diff is the logical XOR of the updated key and this account path_diff = ( to_int ( self . key ) ^ to_int ( key ) ) # Same key (diff of 0), update the tracked value if path_diff == 0 : self . _value = value # No need to update branch else : # Find the first mismatched bit between keypaths. This is # where the branch point occurs, and we should update the # sibling node in the source branch at the branch point. # NOTE: Keys are in MSB->LSB (root->leaf) order. # Node lists are in root->leaf order. # Be sure to convert between them effectively. for bit in reversed ( range ( self . _branch_size ) ) : if path_diff & ( 1 << bit ) > 0 : branch_point = ( self . _branch_size - 1 ) - bit break # NOTE: node_updates only has to be as long as necessary # to obtain the update. This allows an optimization # of pruning updates to the maximum possible depth # that would be required to update, which may be # significantly smaller than the tree depth. if len ( node_updates ) <= branch_point : raise ValidationError ( "Updated node list is not deep enough" ) # Update sibling node in the branch where our key differs from the update self . _branch [ branch_point ] = node_updates [ branch_point ]
Merge an update for another key with the one we are tracking internally .
354
15
250,268
def _get ( self , key : bytes ) -> Tuple [ bytes , Tuple [ Hash32 ] ] : validate_is_bytes ( key ) validate_length ( key , self . _key_size ) branch = [ ] target_bit = 1 << ( self . depth - 1 ) path = to_int ( key ) node_hash = self . root_hash # Append the sibling node to the branch # Iterate on the parent for _ in range ( self . depth ) : node = self . db [ node_hash ] left , right = node [ : 32 ] , node [ 32 : ] if path & target_bit : branch . append ( left ) node_hash = right else : branch . append ( right ) node_hash = left target_bit >>= 1 # Value is the last hash in the chain # NOTE: Didn't do exception here for testing purposes return self . db [ node_hash ] , tuple ( branch )
Returns db value and branch in root - > leaf order
200
11
250,269
def set ( self , key : bytes , value : bytes ) -> Tuple [ Hash32 ] : validate_is_bytes ( key ) validate_length ( key , self . _key_size ) validate_is_bytes ( value ) path = to_int ( key ) node = value _ , branch = self . _get ( key ) proof_update = [ ] # Keep track of proof updates target_bit = 1 # branch is in root->leaf order, so flip for sibling_node in reversed ( branch ) : # Set node_hash = keccak ( node ) proof_update . append ( node_hash ) self . db [ node_hash ] = node # Update if ( path & target_bit ) : node = sibling_node + node_hash else : node = node_hash + sibling_node target_bit <<= 1 # Finally, update root hash self . root_hash = keccak ( node ) self . db [ self . root_hash ] = node # updates need to be in root->leaf order, so flip back return tuple ( reversed ( proof_update ) )
Returns all updated hashes in root - > leaf order
233
10
250,270
def delete ( self , key : bytes ) -> Tuple [ Hash32 ] : validate_is_bytes ( key ) validate_length ( key , self . _key_size ) return self . set ( key , self . _default )
Equals to setting the value to None Returns all updated hashes in root - > leaf order
50
18
250,271
def next_batch ( self , n = 1 ) : if len ( self . queue ) == 0 : return [ ] batch = list ( reversed ( ( self . queue [ - n : ] ) ) ) self . queue = self . queue [ : - n ] return batch
Return the next requests that should be dispatched .
57
9
250,272
def schedule ( self , node_key , parent , depth , leaf_callback , is_raw = False ) : if node_key in self . _existing_nodes : self . logger . debug ( "Node %s already exists in db" % encode_hex ( node_key ) ) return if node_key in self . db : self . _existing_nodes . add ( node_key ) self . logger . debug ( "Node %s already exists in db" % encode_hex ( node_key ) ) return if parent is not None : parent . dependencies += 1 existing = self . requests . get ( node_key ) if existing is not None : self . logger . debug ( "Already requesting %s, will just update parents list" % node_key ) existing . parents . append ( parent ) return request = SyncRequest ( node_key , parent , depth , leaf_callback , is_raw ) # Requests get added to both self.queue and self.requests; the former is used to keep # track which requests should be sent next, and the latter is used to avoid scheduling a # request for a given node multiple times. self . logger . debug ( "Scheduling retrieval of %s" % encode_hex ( request . node_key ) ) self . requests [ request . node_key ] = request bisect . insort ( self . queue , request )
Schedule a request for the node with the given key .
294
12
250,273
def get_children ( self , request ) : node = decode_node ( request . data ) return _get_children ( node , request . depth )
Return all children of the node retrieved by the given request .
32
12
250,274
def process ( self , results ) : for node_key , data in results : request = self . requests . get ( node_key ) if request is None : # This may happen if we resend a request for a node after waiting too long, # and then eventually get two responses with it. self . logger . info ( "No SyncRequest found for %s, maybe we got more than one response for it" % encode_hex ( node_key ) ) return if request . data is not None : raise SyncRequestAlreadyProcessed ( "%s has been processed already" % request ) request . data = data if request . is_raw : self . commit ( request ) continue references , leaves = self . get_children ( request ) for depth , ref in references : self . schedule ( ref , request , depth , request . leaf_callback ) if request . leaf_callback is not None : for leaf in leaves : request . leaf_callback ( leaf , request ) if request . dependencies == 0 : self . commit ( request )
Process request results .
215
4
250,275
def check_if_branch_exist ( db , root_hash , key_prefix ) : validate_is_bytes ( key_prefix ) return _check_if_branch_exist ( db , root_hash , encode_to_bin ( key_prefix ) )
Given a key prefix return whether this prefix is the prefix of an existing key in the trie .
59
20
250,276
def get_branch ( db , root_hash , key ) : validate_is_bytes ( key ) return tuple ( _get_branch ( db , root_hash , encode_to_bin ( key ) ) )
Get a long - format Merkle branch
48
9
250,277
def get_witness_for_key_prefix ( db , node_hash , key ) : validate_is_bytes ( key ) return tuple ( _get_witness_for_key_prefix ( db , node_hash , encode_to_bin ( key ) ) )
Get all witness given a keypath prefix . Include
60
10
250,278
def encode_branch_node ( left_child_node_hash , right_child_node_hash ) : validate_is_bytes ( left_child_node_hash ) validate_length ( left_child_node_hash , 32 ) validate_is_bytes ( right_child_node_hash ) validate_length ( right_child_node_hash , 32 ) return BRANCH_TYPE_PREFIX + left_child_node_hash + right_child_node_hash
Serializes a branch node
107
5
250,279
def encode_leaf_node ( value ) : validate_is_bytes ( value ) if value is None or value == b'' : raise ValidationError ( "Value of leaf node can not be empty" ) return LEAF_TYPE_PREFIX + value
Serializes a leaf node
55
5
250,280
def batch_commit ( self , * , do_deletes = False ) : try : yield except Exception as exc : raise exc else : for key , value in self . cache . items ( ) : if value is not DELETED : self . wrapped_db [ key ] = value elif do_deletes : self . wrapped_db . pop ( key , None ) # if do_deletes is False, ignore deletes to underlying db finally : self . cache = { }
Batch and commit and end of context
102
8
250,281
def _prune_node ( self , node ) : if self . is_pruning : # node is mutable, so capture the key for later pruning now prune_key , node_body = self . _node_to_db_mapping ( node ) should_prune = ( node_body is not None ) else : should_prune = False yield # Prune only if no exception is raised if should_prune : del self . db [ prune_key ]
Prune the given node if context exits cleanly .
105
11
250,282
def _normalize_branch_node ( self , node ) : iter_node = iter ( node ) if any ( iter_node ) and any ( iter_node ) : return node if node [ 16 ] : return [ compute_leaf_key ( [ ] ) , node [ 16 ] ] sub_node_idx , sub_node_hash = next ( ( idx , v ) for idx , v in enumerate ( node [ : 16 ] ) if v ) sub_node = self . get_node ( sub_node_hash ) sub_node_type = get_node_type ( sub_node ) if sub_node_type in { NODE_TYPE_LEAF , NODE_TYPE_EXTENSION } : with self . _prune_node ( sub_node ) : new_subnode_key = encode_nibbles ( tuple ( itertools . chain ( [ sub_node_idx ] , decode_nibbles ( sub_node [ 0 ] ) , ) ) ) return [ new_subnode_key , sub_node [ 1 ] ] elif sub_node_type == NODE_TYPE_BRANCH : subnode_hash = self . _persist_node ( sub_node ) return [ encode_nibbles ( [ sub_node_idx ] ) , subnode_hash ] else : raise Exception ( "Invariant: this code block should be unreachable" )
A branch node which is left with only a single non - blank item should be turned into either a leaf or extension node .
313
25
250,283
def _delete_branch_node ( self , node , trie_key ) : if not trie_key : node [ - 1 ] = BLANK_NODE return self . _normalize_branch_node ( node ) node_to_delete = self . get_node ( node [ trie_key [ 0 ] ] ) sub_node = self . _delete ( node_to_delete , trie_key [ 1 : ] ) encoded_sub_node = self . _persist_node ( sub_node ) if encoded_sub_node == node [ trie_key [ 0 ] ] : return node node [ trie_key [ 0 ] ] = encoded_sub_node if encoded_sub_node == BLANK_NODE : return self . _normalize_branch_node ( node ) return node
Delete a key from inside or underneath a branch node
181
10
250,284
def get ( self , key ) : validate_is_bytes ( key ) return self . _get ( self . root_hash , encode_to_bin ( key ) )
Fetches the value with a given keypath from the given node .
37
15
250,285
def set ( self , key , value ) : validate_is_bytes ( key ) validate_is_bytes ( value ) self . root_hash = self . _set ( self . root_hash , encode_to_bin ( key ) , value )
Sets the value at the given keypath from the given node
54
13
250,286
def _set ( self , node_hash , keypath , value , if_delete_subtrie = False ) : # Empty trie if node_hash == BLANK_HASH : if value : return self . _hash_and_save ( encode_kv_node ( keypath , self . _hash_and_save ( encode_leaf_node ( value ) ) ) ) else : return BLANK_HASH nodetype , left_child , right_child = parse_node ( self . db [ node_hash ] ) # Node is a leaf node if nodetype == LEAF_TYPE : # Keypath must match, there should be no remaining keypath if keypath : raise NodeOverrideError ( "Fail to set the value because the prefix of it's key" " is the same as existing key" ) if if_delete_subtrie : return BLANK_HASH return self . _hash_and_save ( encode_leaf_node ( value ) ) if value else BLANK_HASH # node is a key-value node elif nodetype == KV_TYPE : # Keypath too short if not keypath : if if_delete_subtrie : return BLANK_HASH else : raise NodeOverrideError ( "Fail to set the value because it's key" " is the prefix of other existing key" ) return self . _set_kv_node ( keypath , node_hash , nodetype , left_child , right_child , value , if_delete_subtrie ) # node is a branch node elif nodetype == BRANCH_TYPE : # Keypath too short if not keypath : if if_delete_subtrie : return BLANK_HASH else : raise NodeOverrideError ( "Fail to set the value because it's key" " is the prefix of other existing key" ) return self . _set_branch_node ( keypath , nodetype , left_child , right_child , value , if_delete_subtrie ) raise Exception ( "Invariant: This shouldn't ever happen" )
If if_delete_subtrie is set to True what it will do is that it take in a keypath and traverse til the end of keypath then delete the whole subtrie of that node .
454
42
250,287
def delete ( self , key ) : validate_is_bytes ( key ) self . root_hash = self . _set ( self . root_hash , encode_to_bin ( key ) , b'' )
Equals to setting the value to None
45
8
250,288
def delete_subtrie ( self , key ) : validate_is_bytes ( key ) self . root_hash = self . _set ( self . root_hash , encode_to_bin ( key ) , value = b'' , if_delete_subtrie = True , )
Given a key prefix delete the whole subtrie that starts with the key prefix .
62
16
250,289
def _hash_and_save ( self , node ) : validate_is_bin_node ( node ) node_hash = keccak ( node ) self . db [ node_hash ] = node return node_hash
Saves a node into the database and returns its hash
47
11
250,290
def decode_from_bin ( input_bin ) : for chunk in partition_all ( 8 , input_bin ) : yield sum ( 2 ** exp * bit for exp , bit in enumerate ( reversed ( chunk ) ) )
0100000101010111010000110100100101001001 - > ASCII
48
18
250,291
def encode_to_bin ( value ) : for char in value : for exp in EXP : if char & exp : yield True else : yield False
ASCII - > 0100000101010111010000110100100101001001
31
19
250,292
def encode_from_bin_keypath ( input_bin ) : padded_bin = bytes ( ( 4 - len ( input_bin ) ) % 4 ) + input_bin prefix = TWO_BITS [ len ( input_bin ) % 4 ] if len ( padded_bin ) % 8 == 4 : return decode_from_bin ( PREFIX_00 + prefix + padded_bin ) else : return decode_from_bin ( PREFIX_100000 + prefix + padded_bin )
Encodes a sequence of 0s and 1s into tightly packed bytes Used in encoding key path of a KV - NODE
105
26
250,293
def decode_to_bin_keypath ( path ) : path = encode_to_bin ( path ) if path [ 0 ] == 1 : path = path [ 4 : ] assert path [ 0 : 2 ] == PREFIX_00 padded_len = TWO_BITS . index ( path [ 2 : 4 ] ) return path [ 4 + ( ( 4 - padded_len ) % 4 ) : ]
Decodes bytes into a sequence of 0s and 1s Used in decoding key path of a KV - NODE
86
24
250,294
def encode_nibbles ( nibbles ) : if is_nibbles_terminated ( nibbles ) : flag = HP_FLAG_2 else : flag = HP_FLAG_0 raw_nibbles = remove_nibbles_terminator ( nibbles ) is_odd = len ( raw_nibbles ) % 2 if is_odd : flagged_nibbles = tuple ( itertools . chain ( ( flag + 1 , ) , raw_nibbles , ) ) else : flagged_nibbles = tuple ( itertools . chain ( ( flag , 0 ) , raw_nibbles , ) ) prefixed_value = nibbles_to_bytes ( flagged_nibbles ) return prefixed_value
The Hex Prefix function
160
5
250,295
def decode_nibbles ( value ) : nibbles_with_flag = bytes_to_nibbles ( value ) flag = nibbles_with_flag [ 0 ] needs_terminator = flag in { HP_FLAG_2 , HP_FLAG_2 + 1 } is_odd_length = flag in { HP_FLAG_0 + 1 , HP_FLAG_2 + 1 } if is_odd_length : raw_nibbles = nibbles_with_flag [ 1 : ] else : raw_nibbles = nibbles_with_flag [ 2 : ] if needs_terminator : nibbles = add_nibbles_terminator ( raw_nibbles ) else : nibbles = raw_nibbles return nibbles
The inverse of the Hex Prefix function
163
8
250,296
def get_local_file ( file ) : try : with open ( file . path ) : yield file . path except NotImplementedError : _ , ext = os . path . splitext ( file . name ) with NamedTemporaryFile ( prefix = 'wagtailvideo-' , suffix = ext ) as tmp : try : file . open ( 'rb' ) for chunk in file . chunks ( ) : tmp . write ( chunk ) finally : file . close ( ) tmp . flush ( ) yield tmp . name
Get a local version of the file downloading it from the remote storage if required . The returned value should be used as a context manager to ensure any temporary files are cleaned up afterwards .
110
36
250,297
def rustcall ( func , * args ) : lib . semaphore_err_clear ( ) rv = func ( * args ) err = lib . semaphore_err_get_last_code ( ) if not err : return rv msg = lib . semaphore_err_get_last_message ( ) cls = exceptions_by_code . get ( err , SemaphoreError ) exc = cls ( decode_str ( msg ) ) backtrace = decode_str ( lib . semaphore_err_get_backtrace ( ) ) if backtrace : exc . rust_info = backtrace raise exc
Calls rust method and does some error handling .
136
10
250,298
def decode_str ( s , free = False ) : try : if s . len == 0 : return u"" return ffi . unpack ( s . data , s . len ) . decode ( "utf-8" , "replace" ) finally : if free : lib . semaphore_str_free ( ffi . addressof ( s ) )
Decodes a SymbolicStr
77
6
250,299
def encode_str ( s , mutable = False ) : rv = ffi . new ( "SemaphoreStr *" ) if isinstance ( s , text_type ) : s = s . encode ( "utf-8" ) if mutable : s = bytearray ( s ) rv . data = ffi . from_buffer ( s ) rv . len = len ( s ) # we have to hold a weak reference here to ensure our string does not # get collected before the string is used. attached_refs [ rv ] = s return rv
Encodes a SemaphoreStr
125
7