query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Given a consequence label that describes a variation type create an anonymous variant of the specified gene as an instance of that consequence type .
def make_allele_by_consequence ( self , consequence , gene_id , gene_symbol ) : allele_id = None # Loss of function : Nonsense, frame-shifting indel, # essential splice site mutation, whole gene deletion or any other # mutation where functional analysis demonstrates clear reduction # or loss of function # All missense/in frame : Where all the mutations described in the data # source are either missense or in frame deletions and there is no # evidence favoring either loss-of-function, activating or # dominant negative effect # Dominant negative : Mutation within one allele of a gene that creates # a significantly greater deleterious effect on gene product # function than a monoallelic loss of function mutation # Activating : Mutation, usually missense that results in # a constitutive functional activation of the gene product # Increased gene dosage : Copy number variation that increases # the functional dosage of the gene # Cis-regulatory or promotor mutation : Mutation in cis-regulatory # elements that lies outwith the known transcription unit and # promotor of the controlled gene # Uncertain : Where the exact nature of the mutation is unclear or # not recorded type_id = self . resolve ( consequence , mandatory = False ) if type_id == consequence : LOG . warning ( "Consequence type unmapped: %s" , str ( consequence ) ) type_id = self . globaltt [ 'sequence_variant' ] # make the allele allele_id = '' . join ( ( gene_id , type_id ) ) allele_id = re . sub ( r':' , '' , allele_id ) allele_id = '_:' + allele_id # make this a BNode allele_label = ' ' . join ( ( consequence , 'allele in' , gene_symbol ) ) self . model . addIndividualToGraph ( allele_id , allele_label , type_id ) self . geno . addAlleleOfGene ( allele_id , gene_id ) return allele_id
251,200
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Decipher.py#L228-L277
[ "def", "remove_system", "(", "self", ",", "system", ")", ":", "if", "system", "in", "self", ".", "_systems", ":", "self", ".", "_systems", ".", "remove", "(", "system", ")", "else", ":", "raise", "UnmanagedSystemError", "(", "system", ")" ]
Here we parse each row of the gene to phenotype file
def parse ( self , limit : Optional [ int ] = None ) : if limit is not None : LOG . info ( "Only parsing first %d rows" , limit ) LOG . info ( "Parsing files..." ) file_path = '/' . join ( ( self . rawdir , self . files [ 'developmental_disorders' ] [ 'file' ] ) ) with gzip . open ( file_path , 'rt' ) as csvfile : reader = csv . reader ( csvfile ) next ( reader ) # header for row in reader : if limit is None or reader . line_num <= ( limit + 1 ) : self . _add_gene_disease ( row ) else : break LOG . info ( "Done parsing." )
251,201
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EBIGene2Phen.py#L104-L147
[ "def", "distance_calc", "(", "s1", ",", "s2", ")", ":", "if", "len", "(", "s1", ")", ">", "len", "(", "s2", ")", ":", "s1", ",", "s2", "=", "s2", ",", "s1", "distances", "=", "range", "(", "len", "(", "s1", ")", "+", "1", ")", "for", "i2", ...
Parse and add gene variant disease model Model building happens in _build_gene_disease_model
def _add_gene_disease ( self , row ) : # ::List getting syntax error here col = self . files [ 'developmental_disorders' ] [ 'columns' ] if len ( row ) != len ( col ) : raise ValueError ( "Unexpected number of fields for row {}" . format ( row ) ) variant_label = "variant of {}" . format ( row [ col . index ( 'gene_symbol' ) ] ) disease_omim_id = row [ col . index ( 'disease_omim_id' ) ] if disease_omim_id == 'No disease mim' : # check if we've manually curated disease_label = row [ col . index ( 'disease_label' ) ] if disease_label in self . mondo_map : disease_id = self . mondo_map [ disease_label ] else : return # sorry for this else : disease_id = 'OMIM:' + disease_omim_id hgnc_curie = 'HGNC:' + row [ col . index ( 'hgnc_id' ) ] relation_curie = self . resolve ( row [ col . index ( 'g2p_relation_label' ) ] ) mutation_consequence = row [ col . index ( 'mutation_consequence' ) ] if mutation_consequence not in ( 'uncertain' , '' ) : consequence_relation = self . resolve ( self . _get_consequence_predicate ( mutation_consequence ) ) consequence_curie = self . resolve ( mutation_consequence ) variant_label = "{} {}" . format ( mutation_consequence , variant_label ) else : consequence_relation = None consequence_curie = None allelic_requirement = row [ col . index ( 'allelic_requirement' ) ] if allelic_requirement != '' : requirement_curie = self . resolve ( allelic_requirement ) else : requirement_curie = None pmids = row [ col . index ( 'pmids' ) ] if pmids != '' : pmid_list = [ 'PMID:' + pmid for pmid in pmids . split ( ';' ) ] else : pmid_list = [ ] # build the model # Should we build a reusable object and/or tuple that # could be passed to a more general model builder for # this and orphanet (and maybe clinvar) self . _build_gene_disease_model ( hgnc_curie , relation_curie , disease_id , variant_label , consequence_relation , consequence_curie , requirement_curie , pmid_list )
251,202
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EBIGene2Phen.py#L149-L211
[ "def", "search", "(", "query", ",", "team", "=", "None", ")", ":", "if", "team", "is", "None", ":", "team", "=", "_find_logged_in_team", "(", ")", "if", "team", "is", "not", "None", ":", "session", "=", "_get_session", "(", "team", ")", "response", "...
Builds gene variant disease model
def _build_gene_disease_model ( self , gene_id , relation_id , disease_id , variant_label , consequence_predicate = None , consequence_id = None , allelic_requirement = None , pmids = None ) : model = Model ( self . graph ) geno = Genotype ( self . graph ) pmids = [ ] if pmids is None else pmids is_variant = False variant_or_gene = gene_id variant_id_string = variant_label variant_bnode = self . make_id ( variant_id_string , "_" ) if consequence_predicate is not None and consequence_id is not None : is_variant = True model . addTriple ( variant_bnode , consequence_predicate , consequence_id ) # Hack to add labels to terms that # don't exist in an ontology if consequence_id . startswith ( ':' ) : model . addLabel ( consequence_id , consequence_id . strip ( ':' ) . replace ( '_' , ' ' ) ) if is_variant : variant_or_gene = variant_bnode # Typically we would type the variant using the # molecular consequence, but these are not specific # enough for us to make mappings (see translation table) model . addIndividualToGraph ( variant_bnode , variant_label , self . globaltt [ 'variant_locus' ] ) geno . addAffectedLocus ( variant_bnode , gene_id ) model . addBlankNodeAnnotation ( variant_bnode ) assoc = G2PAssoc ( self . graph , self . name , variant_or_gene , disease_id , relation_id ) assoc . source = pmids assoc . add_association_to_graph ( ) if allelic_requirement is not None and is_variant is False : model . addTriple ( assoc . assoc_id , self . globaltt [ 'has_allelic_requirement' ] , allelic_requirement ) if allelic_requirement . startswith ( ':' ) : model . addLabel ( allelic_requirement , allelic_requirement . strip ( ':' ) . replace ( '_' , ' ' ) )
251,203
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EBIGene2Phen.py#L213-L274
[ "def", "list_sessions", "(", "logged_in_users_only", "=", "False", ")", ":", "ret", "=", "list", "(", ")", "server", "=", "win32ts", ".", "WTS_CURRENT_SERVER_HANDLE", "protocols", "=", "{", "win32ts", ".", "WTS_PROTOCOL_TYPE_CONSOLE", ":", "'console'", ",", "win...
This will process the id mapping file provided by Biogrid . The file has a very large header which we scan past then pull the identifiers and make equivalence axioms
def _get_identifiers ( self , limit ) : LOG . info ( "getting identifier mapping" ) line_counter = 0 f = '/' . join ( ( self . rawdir , self . files [ 'identifiers' ] [ 'file' ] ) ) myzip = ZipFile ( f , 'r' ) # assume that the first entry is the item fname = myzip . namelist ( ) [ 0 ] foundheader = False # TODO align this species filter with the one above # speciesfilters = 'Homo sapiens,Mus musculus,Drosophila melanogaster, # Danio rerio, Caenorhabditis elegans,Xenopus laevis'.split(',') speciesfilters = 'Homo sapiens,Mus musculus' . split ( ',' ) with myzip . open ( fname , 'r' ) as csvfile : for line in csvfile : # skip header lines if not foundheader : if re . match ( r'BIOGRID_ID' , line . decode ( ) ) : foundheader = True continue line = line . decode ( ) . strip ( ) # BIOGRID_ID # IDENTIFIER_VALUE # IDENTIFIER_TYPE # ORGANISM_OFFICIAL_NAME # 1 814566 ENTREZ_GENE Arabidopsis thaliana ( biogrid_num , id_num , id_type , organism_label ) = line . split ( '\t' ) if self . test_mode : graph = self . testgraph # skip any genes that don't match our test set if int ( biogrid_num ) not in self . biogrid_ids : continue else : graph = self . graph model = Model ( graph ) # for each one of these, # create the node and add equivalent classes biogrid_id = 'BIOGRID:' + biogrid_num prefix = self . localtt [ id_type ] # TODO make these filters available as commandline options # geneidtypefilters='NCBIGene,OMIM,MGI,FlyBase,ZFIN,MGI,HGNC, # WormBase,XenBase,ENSEMBL,miRBase'.split(',') geneidtypefilters = 'NCBIGene,MGI,ENSEMBL,ZFIN,HGNC' . split ( ',' ) # proteinidtypefilters='HPRD,Swiss-Prot,NCBIProtein' if ( speciesfilters is not None ) and ( organism_label . strip ( ) in speciesfilters ) : line_counter += 1 if ( geneidtypefilters is not None ) and ( prefix in geneidtypefilters ) : mapped_id = ':' . join ( ( prefix , id_num ) ) model . addEquivalentClass ( biogrid_id , mapped_id ) # this symbol will only get attached to the biogrid class elif id_type == 'OFFICIAL_SYMBOL' : model . addClassToGraph ( biogrid_id , id_num ) # elif (id_type == 'SYNONYM'): # FIXME - i am not sure these are synonyms, altids? # gu.addSynonym(g,biogrid_id,id_num) if not self . test_mode and limit is not None and line_counter > limit : break myzip . close ( ) return
251,204
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/BioGrid.py#L201-L281
[ "def", "do_not_disturb", "(", "self", ")", ":", "return", "bool", "(", "strtobool", "(", "str", "(", "self", ".", "_settings_json", ".", "get", "(", "CONST", ".", "SETTINGS_DO_NOT_DISTURB", ")", ")", ")", ")" ]
Add supporting line of evidence node to association id
def add_supporting_evidence ( self , evidence_line , evidence_type = None , label = None ) : self . graph . addTriple ( self . association , self . globaltt [ 'has_supporting_evidence_line' ] , evidence_line ) if evidence_type is not None : self . model . addIndividualToGraph ( evidence_line , label , evidence_type ) return
251,205
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Evidence.py#L34-L47
[ "def", "render_all", "(", "self", ",", "op", ",", "exclude", "=", "[", "]", ",", "opt", "=", "None", ")", ":", "opt", "=", "self", ".", "default", "(", ")", "if", "opt", "==", "None", "else", "opt", "if", "not", "isinstance", "(", "op", ",", "O...
Overrides Association by including bnode support
def add_association_to_graph ( self ) : Assoc . add_association_to_graph ( self ) # make a blank stage if self . start_stage_id or self . end_stage_id is not None : stage_process_id = '-' . join ( ( str ( self . start_stage_id ) , str ( self . end_stage_id ) ) ) stage_process_id = '_:' + re . sub ( r':' , '' , stage_process_id ) self . model . addIndividualToGraph ( stage_process_id , None , self . globaltt [ 'developmental_process' ] ) self . graph . addTriple ( stage_process_id , self . globaltt [ 'starts during' ] , self . start_stage_id ) self . graph . addTriple ( stage_process_id , self . globaltt [ 'ends during' ] , self . end_stage_id ) self . stage_process_id = stage_process_id self . graph . addTriple ( self . assoc_id , self . globaltt [ 'has_qualifier' ] , self . stage_process_id ) if self . environment_id is not None : self . graph . addTriple ( self . assoc_id , self . globaltt [ 'has_qualifier' ] , self . environment_id ) return
251,206
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/assoc/G2PAssoc.py#L66-L103
[ "def", "_openResources", "(", "self", ")", ":", "try", ":", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "True", ")", "except", "Exception", "as", "ex", ":", "logger", "...
MPD data is delivered in four separate csv files and one xml file which we process iteratively and write out as one large graph .
def parse ( self , limit = None ) : if limit is not None : LOG . info ( "Only parsing first %s rows fo each file" , str ( limit ) ) LOG . info ( "Parsing files..." ) self . _process_straininfo ( limit ) # the following will provide us the hash-lookups # These must be processed in a specific order # mapping between assays and ontology terms self . _process_ontology_mappings_file ( limit ) # this is the metadata about the measurements self . _process_measurements_file ( limit ) # get all the measurements per strain self . _process_strainmeans_file ( limit ) # The following will use the hash populated above # to lookup the ids when filling in the graph self . _fill_provenance_graph ( limit ) LOG . info ( "Finished parsing." ) return
251,207
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MPD.py#L112-L142
[ "def", "on_remove", "(", "self", ",", "callable_", ")", ":", "self", ".", "model", ".", "add_observer", "(", "callable_", ",", "self", ".", "entity_type", ",", "'remove'", ",", "self", ".", "entity_id", ")" ]
Create an association between a sex - specific strain id and each of the phenotypes . Here we create a genotype from the strain and a sex - specific genotype . Each of those genotypes are created as anonymous nodes .
def _add_g2p_assoc ( self , graph , strain_id , sex , assay_id , phenotypes , comment ) : geno = Genotype ( graph ) model = Model ( graph ) eco_id = self . globaltt [ 'experimental phenotypic evidence' ] strain_label = self . idlabel_hash . get ( strain_id ) # strain genotype genotype_id = '_:' + '-' . join ( ( re . sub ( r':' , '' , strain_id ) , 'genotype' ) ) genotype_label = '[' + strain_label + ']' sex_specific_genotype_id = '_:' + '-' . join ( ( re . sub ( r':' , '' , strain_id ) , sex , 'genotype' ) ) if strain_label is not None : sex_specific_genotype_label = strain_label + ' (' + sex + ')' else : sex_specific_genotype_label = strain_id + '(' + sex + ')' genotype_type = self . globaltt [ 'sex_qualified_genotype' ] if sex == 'm' : genotype_type = self . globaltt [ 'male_genotype' ] elif sex == 'f' : genotype_type = self . globaltt [ 'female_genotype' ] # add the genotype to strain connection geno . addGenotype ( genotype_id , genotype_label , self . globaltt [ 'genomic_background' ] ) graph . addTriple ( strain_id , self . globaltt [ 'has_genotype' ] , genotype_id ) geno . addGenotype ( sex_specific_genotype_id , sex_specific_genotype_label , genotype_type ) # add the strain as the background for the genotype graph . addTriple ( sex_specific_genotype_id , self . globaltt [ 'has_sex_agnostic_part' ] , genotype_id ) # ############# BUILD THE G2P ASSOC ############# # TODO add more provenance info when that model is completed if phenotypes is not None : for phenotype_id in phenotypes : assoc = G2PAssoc ( graph , self . name , sex_specific_genotype_id , phenotype_id ) assoc . add_evidence ( assay_id ) assoc . add_evidence ( eco_id ) assoc . add_association_to_graph ( ) assoc_id = assoc . get_association_id ( ) model . addComment ( assoc_id , comment ) model . _addSexSpecificity ( assoc_id , self . resolve ( sex ) ) return
251,208
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MPD.py#L385-L457
[ "def", "mirror", "(", "self", ",", "tables", ",", "dest_url", ")", ":", "from", "mirror", "import", "mirror", "return", "mirror", "(", "self", ",", "tables", ",", "dest_url", ")" ]
IMPC data is delivered in three separate csv files OR in one integrated file each with the same file format .
def parse ( self , limit = None ) : if limit is not None : LOG . info ( "Only parsing first %s rows fo each file" , str ( limit ) ) LOG . info ( "Parsing files..." ) if self . test_only : self . test_mode = True # for f in ['impc', 'euro', 'mgd', '3i']: for f in [ 'all' ] : file = '/' . join ( ( self . rawdir , self . files [ f ] [ 'file' ] ) ) self . _process_data ( file , limit ) LOG . info ( "Finished parsing" ) return
251,209
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/IMPC.py#L119-L143
[ "def", "_bind", "(", "self", ")", ":", "self", ".", "log", ".", "debug", "(", "\"CloudWatch: Attempting to connect to CloudWatch at Region: %s\"", ",", "self", ".", "region", ")", "try", ":", "self", ".", "connection", "=", "boto", ".", "ec2", ".", "cloudwatch...
When adding a gene to a pathway we create an intermediate gene product that is involved in the pathway through a blank node .
def addGeneToPathway ( self , gene_id , pathway_id ) : gene_product = '_:' + re . sub ( r':' , '' , gene_id ) + 'product' self . model . addIndividualToGraph ( gene_product , None , self . globaltt [ 'gene_product' ] ) self . graph . addTriple ( gene_id , self . globaltt [ 'has gene product' ] , gene_product ) self . addComponentToPathway ( gene_product , pathway_id ) return
251,210
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Pathway.py#L50-L71
[ "def", "_check_registry_type", "(", "folder", "=", "None", ")", ":", "folder", "=", "_registry_folder", "(", "folder", ")", "default_file", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "'registry_type.txt'", ")", "try", ":", "with", "open", "("...
This can be used directly when the component is directly involved in the pathway . If a transforming event is performed on the component first then the addGeneToPathway should be used instead .
def addComponentToPathway ( self , component_id , pathway_id ) : self . graph . addTriple ( component_id , self . globaltt [ 'involved in' ] , pathway_id ) return
251,211
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Pathway.py#L73-L85
[ "def", "getreadergroups", "(", "self", ")", ":", "innerreadergroups", ".", "getreadergroups", "(", "self", ")", "hresult", ",", "hcontext", "=", "SCardEstablishContext", "(", "SCARD_SCOPE_USER", ")", "if", "hresult", "!=", "0", ":", "raise", "EstablishContextExcep...
This convenience method will write out all of the graphs associated with the source . Right now these are hardcoded to be a single graph and a src_dataset . ttl and a src_test . ttl If you do not supply stream = stdout it will default write these to files .
def write ( self , fmt = 'turtle' , stream = None ) : fmt_ext = { 'rdfxml' : 'xml' , 'turtle' : 'ttl' , 'nt' : 'nt' , # ntriples 'nquads' : 'nq' , 'n3' : 'n3' # notation3 } # make the regular graph output file dest = None if self . name is not None : dest = '/' . join ( ( self . outdir , self . name ) ) if fmt in fmt_ext : dest = '.' . join ( ( dest , fmt_ext . get ( fmt ) ) ) else : dest = '.' . join ( ( dest , fmt ) ) LOG . info ( "Setting outfile to %s" , dest ) # make the dataset_file name, always format as turtle self . datasetfile = '/' . join ( ( self . outdir , self . name + '_dataset.ttl' ) ) LOG . info ( "Setting dataset file to %s" , self . datasetfile ) if self . dataset is not None and self . dataset . version is None : self . dataset . set_version_by_date ( ) LOG . info ( "No version for %s setting to date issued." , self . name ) else : LOG . warning ( "No output file set. Using stdout" ) stream = 'stdout' gu = GraphUtils ( None ) # the _dataset description is always turtle gu . write ( self . dataset . getGraph ( ) , 'turtle' , filename = self . datasetfile ) if self . test_mode : # unless we stop hardcoding, the test dataset is always turtle LOG . info ( "Setting testfile to %s" , self . testfile ) gu . write ( self . testgraph , 'turtle' , filename = self . testfile ) # print graph out if stream is None : outfile = dest elif stream . lower ( ) . strip ( ) == 'stdout' : outfile = None else : LOG . error ( "I don't understand our stream." ) return gu . write ( self . graph , fmt , filename = outfile )
251,212
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Source.py#L161-L223
[ "def", "groups_unarchive", "(", "self", ",", "room_id", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__call_api_post", "(", "'groups.unarchive'", ",", "roomId", "=", "room_id", ",", "kwargs", "=", "kwargs", ")" ]
The file we output needs to be declared as an ontology including it s version information .
def declareAsOntology ( self , graph ) : # <http://data.monarchinitiative.org/ttl/biogrid.ttl> a owl:Ontology ; # owl:versionInfo # <https://archive.monarchinitiative.org/YYYYMM/ttl/biogrid.ttl> model = Model ( graph ) # is self.outfile suffix set yet??? ontology_file_id = 'MonarchData:' + self . name + ".ttl" model . addOntologyDeclaration ( ontology_file_id ) # add timestamp as version info cur_time = datetime . now ( ) t_string = cur_time . strftime ( "%Y-%m-%d" ) ontology_version = t_string # TEC this means the MonarchArchive IRI needs the release updated # maybe extract the version info from there # should not hardcode the suffix as it may change archive_url = 'MonarchArchive:' + 'ttl/' + self . name + '.ttl' model . addOWLVersionIRI ( ontology_file_id , archive_url ) model . addOWLVersionInfo ( ontology_file_id , ontology_version )
251,213
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Source.py#L614-L660
[ "def", "get", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "private_file", "=", "self", ".", "get_private_file", "(", ")", "if", "not", "self", ".", "can_access_file", "(", "private_file", ")", ":", "return", "HttpR...
A helpful utility to remove Carriage Return from any file . This will read a file into memory and overwrite the contents of the original file .
def remove_backslash_r ( filename , encoding ) : with open ( filename , 'r' , encoding = encoding , newline = r'\n' ) as filereader : contents = filereader . read ( ) contents = re . sub ( r'\r' , '' , contents ) with open ( filename , "w" ) as filewriter : filewriter . truncate ( ) filewriter . write ( contents )
251,214
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Source.py#L664-L683
[ "def", "_WaitForStatusNotRunning", "(", "self", ")", ":", "# We wait slightly longer than the status check sleep time.", "time", ".", "sleep", "(", "2.0", ")", "time_slept", "=", "2.0", "while", "self", ".", "_status_is_running", ":", "time", ".", "sleep", "(", "0.5...
Load ingest specific translation from whatever they called something to the ontology label we need to map it to . To facilitate seeing more ontology lables in dipper ingests a reverse mapping from ontology lables to external strings is also generated and available as a dict localtcid
def load_local_translationtable ( self , name ) : localtt_file = 'translationtable/' + name + '.yaml' try : with open ( localtt_file ) : pass except IOError : # write a stub file as a place holder if none exists with open ( localtt_file , 'w' ) as write_yaml : yaml . dump ( { name : name } , write_yaml ) finally : with open ( localtt_file , 'r' ) as read_yaml : localtt = yaml . safe_load ( read_yaml ) # inverse local translation. # note: keeping this invertable will be work. # Useful to not litter an ingest with external syntax self . localtcid = { v : k for k , v in localtt . items ( ) } return localtt
251,215
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Source.py#L739-L767
[ "def", "resume", "(", "self", ")", ":", "with", "self", ".", "_wake", ":", "self", ".", "_paused", "=", "False", "self", ".", "_wake", ".", "notifyAll", "(", ")" ]
genes are classes
def addGene ( self , gene_id , gene_label , gene_type = None , gene_description = None ) : if gene_type is None : gene_type = self . globaltt [ 'gene' ] self . model . addClassToGraph ( gene_id , gene_label , gene_type , gene_description ) return
251,216
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/Genotype.py#L79-L87
[ "def", "show_analyzer_status", "(", ")", ":", "ecode", "=", "0", "try", ":", "image", "=", "contexts", "[", "'anchore_allimages'", "]", "[", "imagelist", "[", "0", "]", "]", "analyzer_status", "=", "contexts", "[", "'anchore_db'", "]", ".", "load_analyzer_ma...
Here we want to look up the NCBI Taxon id using some kind of label . It will only return a result if there is a unique hit .
def get_ncbi_taxon_num_by_label ( label ) : req = { 'db' : 'taxonomy' , 'retmode' : 'json' , 'term' : label } req . update ( EREQ ) request = SESSION . get ( ESEARCH , params = req ) LOG . info ( 'fetching: %s' , request . url ) request . raise_for_status ( ) result = request . json ( ) [ 'esearchresult' ] # Occasionally eutils returns the json blob # {'ERROR': 'Invalid db name specified: taxonomy'} if 'ERROR' in result : request = SESSION . get ( ESEARCH , params = req ) LOG . info ( 'fetching: %s' , request . url ) request . raise_for_status ( ) result = request . json ( ) [ 'esearchresult' ] tax_num = None if 'count' in result and str ( result [ 'count' ] ) == '1' : tax_num = result [ 'idlist' ] [ 0 ] else : # TODO throw errors LOG . warning ( 'ESEARCH for taxon label "%s" returns %s' , label , str ( result ) ) return tax_num
251,217
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/DipperUtil.py#L47-L78
[ "def", "start", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "is_running", "(", ")", ":", "self", ".", "websock_url", "=", "self", ".", "chrome", ".", "start", "(", "*", "*", "kwargs", ")", "self", ".", "websock", "="...
This will set the association ID based on the internal parts of the association . To be used in cases where an external association identifier should be used .
def set_association_id ( self , assoc_id = None ) : if assoc_id is None : self . assoc_id = self . make_association_id ( self . definedby , self . sub , self . rel , self . obj ) else : self . assoc_id = assoc_id return self . assoc_id
251,218
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/assoc/Association.py#L166-L184
[ "def", "console_wait_for_keypress", "(", "flush", ":", "bool", ")", "->", "Key", ":", "key", "=", "Key", "(", ")", "lib", ".", "TCOD_console_wait_for_keypress_wrapper", "(", "key", ".", "key_p", ",", "flush", ")", "return", "key" ]
A method to create unique identifiers for OBAN - style associations based on all the parts of the association If any of the items is empty or None it will convert it to blank . It effectively digests the string of concatonated values . Subclasses of Assoc can submit an additional array of attributes that will be appeded to the ID .
def make_association_id ( definedby , sub , pred , obj , attributes = None ) : items_to_hash = [ definedby , sub , pred , obj ] if attributes is not None and len ( attributes ) > 0 : items_to_hash += attributes items_to_hash = [ x for x in items_to_hash if x is not None ] assoc_id = ':' . join ( ( 'MONARCH' , GraphUtils . digest_id ( '+' . join ( items_to_hash ) ) ) ) assert assoc_id is not None return assoc_id
251,219
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/assoc/Association.py#L250-L279
[ "def", "command_max_run_time", "(", "self", ",", "event", "=", "None", ")", ":", "try", ":", "max_run_time", "=", "self", ".", "max_run_time_var", ".", "get", "(", ")", "except", "ValueError", ":", "max_run_time", "=", "self", ".", "runtime_cfg", ".", "max...
convert integer to Roman numeral
def toRoman ( num ) : if not 0 < num < 5000 : raise ValueError ( "number %n out of range (must be 1..4999)" , num ) if int ( num ) != num : raise TypeError ( "decimals %n can not be converted" , num ) result = "" for numeral , integer in romanNumeralMap : while num >= integer : result += numeral num -= integer return result
251,220
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/romanplus.py#L38-L50
[ "def", "memsize", "(", "self", ")", ":", "return", "self", ".", "size", "+", "1", "+", "TYPE", ".", "size", "(", "gl", ".", "BOUND_TYPE", ")", "*", "len", "(", "self", ".", "bounds", ")" ]
convert Roman numeral to integer
def fromRoman ( strng ) : if not strng : raise TypeError ( 'Input can not be blank' ) if not romanNumeralPattern . search ( strng ) : raise ValueError ( 'Invalid Roman numeral: %s' , strng ) result = 0 index = 0 for numeral , integer in romanNumeralMap : while strng [ index : index + len ( numeral ) ] == numeral : result += integer index += len ( numeral ) return result
251,221
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/romanplus.py#L70-L83
[ "def", "_visit_te_shape", "(", "self", ",", "shape", ":", "ShExJ", ".", "shapeExpr", ",", "visit_center", ":", "_VisitorCenter", ")", "->", "None", ":", "if", "isinstance", "(", "shape", ",", "ShExJ", ".", "Shape", ")", "and", "shape", ".", "expression", ...
This table provides a mapping of genotypes to background genotypes Note that the background_id is also a genotype_id .
def _process_genotype_backgrounds ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing genotype backgrounds" ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'backgrounds' ] [ 'file' ] ) ) geno = Genotype ( graph ) # Add the taxon as a class taxon_id = self . globaltt [ 'Danio rerio' ] model . addClassToGraph ( taxon_id , None ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 # Genotype_ID Genotype_Name Background Background_Name ( genotype_id , genotype_name , background_id , unused ) = row if self . test_mode and genotype_id not in self . test_ids [ 'genotype' ] : continue genotype_id = 'ZFIN:' + genotype_id . strip ( ) background_id = 'ZFIN:' + background_id . strip ( ) # store this in the hash for later lookup # when building fish genotypes self . genotype_backgrounds [ genotype_id ] = background_id # add the background into the graph, # in case we haven't seen it before geno . addGenomicBackground ( background_id , None ) # hang the taxon from the background geno . addTaxon ( taxon_id , background_id ) # add the intrinsic genotype to the graph # we DO NOT ADD THE LABEL here # as it doesn't include the background geno . addGenotype ( genotype_id , None , self . globaltt [ 'intrinsic_genotype' ] ) # Add background to the intrinsic genotype geno . addGenomicBackgroundToGenotype ( background_id , genotype_id ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with genotype backgrounds" ) return
251,222
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1048-L1113
[ "def", "set_user", "(", "self", ",", "user", ")", ":", "super", "(", "Segment", ",", "self", ")", ".", "_check_ended", "(", ")", "self", ".", "user", "=", "user" ]
This table provides mappings between ZFIN stage IDs and ZFS terms and includes the starting and ending hours for the developmental stage . Currently only processing the mapping from the ZFIN stage ID to the ZFS ID .
def _process_stages ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing stages" ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'stage' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( stage_id , stage_obo_id , stage_name , begin_hours , end_hours # ,empty # till next time ) = row # Add the stage as a class, and it's obo equivalent stage_id = 'ZFIN:' + stage_id . strip ( ) model . addClassToGraph ( stage_id , stage_name ) model . addEquivalentClass ( stage_id , stage_obo_id ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with stages" ) return
251,223
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1174-L1211
[ "def", "remove_all_observers", "(", "self", ")", ":", "for", "weak_observer", "in", "self", ".", "_weak_observers", ":", "observer", "=", "weak_observer", "(", ")", "if", "observer", ":", "self", ".", "remove_observer", "(", "observer", ")" ]
This table provides the ZFIN gene id the SO type of the gene the gene symbol and the NCBI Gene ID .
def _process_genes ( self , limit = None ) : LOG . info ( "Processing genes" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'gene' ] [ 'file' ] ) ) geno = Genotype ( graph ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , gene_so_id , gene_symbol , ncbi_gene_id # , empty # till next time ) = row if self . test_mode and gene_id not in self . test_ids [ 'gene' ] : continue gene_id = 'ZFIN:' + gene_id . strip ( ) ncbi_gene_id = 'NCBIGene:' + ncbi_gene_id . strip ( ) self . id_label_map [ gene_id ] = gene_symbol if not self . test_mode and limit is not None and line_counter > limit : pass else : geno . addGene ( gene_id , gene_symbol ) model . addEquivalentClass ( gene_id , ncbi_gene_id ) LOG . info ( "Done with genes" ) return
251,224
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1390-L1437
[ "def", "to_array", "(", "self", ")", ":", "array", "=", "super", "(", "ResponseParameters", ",", "self", ")", ".", "to_array", "(", ")", "if", "self", ".", "migrate_to_chat_id", "is", "not", "None", ":", "array", "[", "'migrate_to_chat_id'", "]", "=", "i...
This module provides information for the intrinsic and extrinsic genotype features of zebrafish . All items here are alterations and are therefore instances .
def _process_features ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing features" ) line_counter = 0 geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'features' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( genomic_feature_id , feature_so_id , genomic_feature_abbreviation , genomic_feature_name , genomic_feature_type , mutagen , mutagee , construct_id , construct_name , construct_so_id , talen_crispr_id , talen_crispr_nam # , empty ) = row if self . test_mode and ( genomic_feature_id not in self . test_ids [ 'allele' ] ) : continue genomic_feature_id = 'ZFIN:' + genomic_feature_id . strip ( ) model . addIndividualToGraph ( genomic_feature_id , genomic_feature_name , feature_so_id ) model . addSynonym ( genomic_feature_id , genomic_feature_abbreviation ) if construct_id is not None and construct_id != '' : construct_id = 'ZFIN:' + construct_id . strip ( ) geno . addConstruct ( construct_id , construct_name , construct_so_id ) geno . addSequenceDerivesFrom ( genomic_feature_id , construct_id ) # Note, we don't really care about how the variant was derived. # so we skip that. # add to the id-label map self . id_label_map [ genomic_feature_id ] = genomic_feature_abbreviation self . id_label_map [ construct_id ] = construct_name if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with features" ) return
251,225
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1439-L1505
[ "def", "get_failed_requests", "(", "self", ",", "results", ")", ":", "data", "=", "{", "member", "[", "'guid'", "]", ":", "member", "for", "member", "in", "results", "}", "for", "request", "in", "self", ".", "requests", ":", "if", "request", "[", "'gui...
This will pull the zfin internal publication information and map them to their equivalent pmid and make labels .
def _process_pubinfo ( self , limit = None ) : line_counter = 0 if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'pubs' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "latin-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 try : ( pub_id , pubmed_id , authors , title , journal , year , vol , pages ) = row except ValueError : try : ( pub_id , pubmed_id , authors , title , journal , year , vol , pages # , empty ) = row except ValueError : LOG . warning ( "Error parsing row %s: " , row ) if self . test_mode and ( 'ZFIN:' + pub_id not in self . test_ids [ 'pub' ] and 'PMID:' + pubmed_id not in self . test_ids [ 'pub' ] ) : continue pub_id = 'ZFIN:' + pub_id . strip ( ) # trim the author list for ease of reading alist = re . split ( r',' , authors ) if len ( alist ) > 1 : astring = ' ' . join ( ( alist [ 0 ] . strip ( ) , 'et al' ) ) else : astring = authors pub_label = '; ' . join ( ( astring , title , journal , year , vol , pages ) ) ref = Reference ( graph , pub_id ) ref . setShortCitation ( pub_label ) ref . setYear ( year ) ref . setTitle ( title ) if pubmed_id is not None and pubmed_id != '' : # let's make an assumption that if there's a pubmed id, # that it is a journal article ref . setType ( self . globaltt [ 'journal article' ] ) pubmed_id = 'PMID:' + pubmed_id . strip ( ) rpm = Reference ( graph , pubmed_id , self . globaltt [ 'journal article' ] ) rpm . addRefToGraph ( ) model . addSameIndividual ( pub_id , pubmed_id ) model . makeLeader ( pubmed_id ) ref . addRefToGraph ( ) if not self . test_mode and limit is not None and line_counter > limit : break return
251,226
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1775-L1851
[ "def", "override_env_variables", "(", ")", ":", "env_vars", "=", "(", "\"LOGNAME\"", ",", "\"USER\"", ",", "\"LNAME\"", ",", "\"USERNAME\"", ")", "old", "=", "[", "os", ".", "environ", "[", "v", "]", "if", "v", "in", "os", ".", "environ", "else", "None...
This will pull the zfin internal publication to pubmed mappings . Somewhat redundant with the process_pubinfo method but this includes additional mappings .
def _process_pub2pubmed ( self , limit = None ) : line_counter = 0 if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'pub2pubmed' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "latin-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( pub_id , pubmed_id # , empty ) = row if self . test_mode and ( 'ZFIN:' + pub_id not in self . test_ids [ 'pub' ] and 'PMID:' + pubmed_id not in self . test_ids [ 'pub' ] ) : continue pub_id = 'ZFIN:' + pub_id . strip ( ) rtype = None if pubmed_id != '' and pubmed_id is not None : pubmed_id = 'PMID:' + pubmed_id . strip ( ) rtype = self . globaltt [ 'journal article' ] rpm = Reference ( graph , pubmed_id , rtype ) rpm . addRefToGraph ( ) model . addSameIndividual ( pub_id , pubmed_id ) ref = Reference ( graph , pub_id , rtype ) ref . addRefToGraph ( ) if not self . test_mode and limit is not None and line_counter > limit : break return
251,227
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1853-L1901
[ "def", "deleteProfile", "(", "self", ",", "profile", ")", ":", "profGroupName", "=", "self", ".", "profileGroupName", "(", "profile", ")", "logger", ".", "debug", "(", "\"Resetting profile settings: {}\"", ".", "format", "(", "profGroupName", ")", ")", "settings...
This method processes the gene targeting knockdown reagents such as morpholinos talens and crisprs . We create triples for the reagents and pass the data into a hash map for use in the pheno_enviro method .
def _process_targeting_reagents ( self , reagent_type , limit = None ) : LOG . info ( "Processing Gene Targeting Reagents" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 model = Model ( graph ) geno = Genotype ( graph ) if reagent_type not in [ 'morph' , 'talen' , 'crispr' ] : LOG . error ( "You didn't specify the right kind of file type." ) return raw = '/' . join ( ( self . rawdir , self . files [ reagent_type ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 if reagent_type in [ 'morph' , 'crispr' ] : try : ( gene_num , gene_so_id , gene_symbol , reagent_num , reagent_so_id , reagent_symbol , reagent_sequence , publication , note ) = row except ValueError : # Catch lines without publication or note ( gene_num , gene_so_id , gene_symbol , reagent_num , reagent_so_id , reagent_symbol , reagent_sequence , publication ) = row elif reagent_type == 'talen' : ( gene_num , gene_so_id , gene_symbol , reagent_num , reagent_so_id , reagent_symbol , reagent_sequence , reagent_sequence2 , publication , note ) = row else : # should not get here return reagent_id = 'ZFIN:' + reagent_num . strip ( ) gene_id = 'ZFIN:' + gene_num . strip ( ) self . id_label_map [ reagent_id ] = reagent_symbol if self . test_mode and ( reagent_num not in self . test_ids [ 'morpholino' ] and gene_num not in self . test_ids [ 'gene' ] ) : continue geno . addGeneTargetingReagent ( reagent_id , reagent_symbol , reagent_so_id , gene_id ) # The reagent targeted gene is added # in the pheno_environment processing function. # Add publication # note that the publications can be comma-delimited, # like: ZDB-PUB-100719-4,ZDB-PUB-130703-22 if publication != '' : pubs = re . split ( r',' , publication . strip ( ) ) for pub in pubs : pub_id = 'ZFIN:' + pub . strip ( ) ref = Reference ( graph , pub_id ) ref . addRefToGraph ( ) graph . addTriple ( pub_id , self . globaltt [ 'mentions' ] , reagent_id ) # Add comment? if note != '' : model . addComment ( reagent_id , note ) # use the variant hash for reagents to list the affected genes if reagent_id not in self . variant_loci_genes : self . variant_loci_genes [ reagent_id ] = [ gene_id ] else : if gene_id not in self . variant_loci_genes [ reagent_id ] : self . variant_loci_genes [ reagent_id ] += [ gene_id ] if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with Reagent type %s" , reagent_type ) return
251,228
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L1903-L2014
[ "def", "seconds_left", "(", "self", ")", ":", "return", "int", "(", "(", "self", ".", "_ENDDATE", ".", "datetime", "-", "Date", "(", "self", ")", ".", "datetime", ")", ".", "total_seconds", "(", ")", ")" ]
This method processes the mappings from ZFIN gene IDs to UniProtKB IDs .
def _process_uniprot_ids ( self , limit = None ) : LOG . info ( "Processing UniProt IDs" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 model = Model ( graph ) geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'uniprot' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , gene_so_id , gene_symbol , uniprot_id # , empty ) = row if self . test_mode and gene_id not in self . test_ids [ 'gene' ] : continue gene_id = 'ZFIN:' + gene_id . strip ( ) uniprot_id = 'UniProtKB:' + uniprot_id . strip ( ) geno . addGene ( gene_id , gene_symbol ) # TODO: Abstract to one of the model utilities model . addIndividualToGraph ( uniprot_id , None , self . globaltt [ 'polypeptide' ] ) graph . addTriple ( gene_id , self . globaltt [ 'has gene product' ] , uniprot_id ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with UniProt IDs" ) return
251,229
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L2235-L2287
[ "def", "convert_to_experiment_list", "(", "experiments", ")", ":", "exp_list", "=", "experiments", "# Transform list if necessary", "if", "experiments", "is", "None", ":", "exp_list", "=", "[", "]", "elif", "isinstance", "(", "experiments", ",", "Experiment", ")", ...
move to localtt & globltt
def get_orthology_evidence_code ( self , abbrev ) : # AA Amino acid sequence comparison. # CE Coincident expression. # CL Conserved genome location (synteny). # FC Functional complementation. # FH Formation of functional heteropolymers. # IX Immunological cross-reaction. # NS Not specified. # NT Nucleotide sequence comparison. # SI Similar response to inhibitors. # SL Similar subcellular location. # SS Similar substrate specificity. # SU Similar subunit structure. # XH Cross-hybridization to same molecular probe. # PT Phylogenetic Tree. # OT Other eco_abbrev_map = { 'AA' : 'ECO:0000031' , # BLAST protein sequence similarity evidence 'CE' : 'ECO:0000008' , # expression evidence 'CL' : 'ECO:0000044' , # sequence similarity FIXME 'FC' : 'ECO:0000012' , # functional complementation # functional complementation in a heterologous system 'FH' : 'ECO:0000064' , 'IX' : 'ECO:0000040' , # immunological assay evidence 'NS' : None , 'NT' : 'ECO:0000032' , # nucleotide blast 'SI' : 'ECO:0000094' , # biological assay evidence FIXME 'SL' : 'ECO:0000122' , # protein localization evidence FIXME 'SS' : 'ECO:0000024' , # protein binding evidence FIXME 'SU' : 'ECO:0000027' , # structural similarity evidence 'XH' : 'ECO:0000002' , # direct assay evidence FIXME 'PT' : 'ECO:0000080' , # phylogenetic evidence 'OT' : None , } if abbrev not in eco_abbrev_map : LOG . warning ( "Evidence code for orthology (%s) not mapped" , str ( abbrev ) ) return eco_abbrev_map . get ( abbrev )
251,230
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/ZFIN.py#L2798-L2840
[ "def", "handle_api_exception", "(", "f", ")", ":", "def", "wraps", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "SupersetSecurity...
This method processes the KEGG disease IDs .
def _process_diseases ( self , limit = None ) : LOG . info ( "Processing diseases" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 model = Model ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'disease' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( disease_id , disease_name ) = row disease_id = 'KEGG-' + disease_id . strip ( ) if disease_id not in self . label_hash : self . label_hash [ disease_id ] = disease_name if self . test_mode and disease_id not in self . test_ids [ 'disease' ] : continue # Add the disease as a class. # we don't get all of these from MONDO yet see: # https://github.com/monarch-initiative/human-disease-ontology/issues/3 model . addClassToGraph ( disease_id , disease_name ) # not typing the diseases as DOID:4 yet because # I don't want to bulk up the graph unnecessarily if not self . test_mode and ( limit is not None and line_counter > limit ) : break LOG . info ( "Done with diseases" ) return
251,231
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L224-L269
[ "async", "def", "renew", "(", "self", ",", "session", ",", "*", ",", "dc", "=", "None", ")", ":", "session_id", "=", "extract_attr", "(", "session", ",", "keys", "=", "[", "\"ID\"", "]", ")", "response", "=", "await", "self", ".", "_api", ".", "put...
This method processes the KEGG gene IDs . The label for the gene is pulled as the first symbol in the list of gene symbols ; the rest are added as synonyms . The long - form of the gene name is added as a definition . This is hardcoded to just processes human genes .
def _process_genes ( self , limit = None ) : LOG . info ( "Processing genes" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 family = Family ( graph ) geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'hsa_genes' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , gene_name ) = row gene_id = 'KEGG-' + gene_id . strip ( ) # the gene listing has a bunch of labels # that are delimited, as: # DST, BP240, BPA, BPAG1, CATX-15, CATX15, D6S1101, DMH, DT, # EBSB2, HSAN6, MACF2; dystonin; K10382 dystonin # it looks like the list is semicolon delimited # (symbol, name, gene_class) # where the symbol is a comma-delimited list # here, we split them up. # we will take the first abbreviation and make it the symbol # then take the rest as synonyms gene_stuff = re . split ( 'r;' , gene_name ) symbollist = re . split ( r',' , gene_stuff [ 0 ] ) first_symbol = symbollist [ 0 ] . strip ( ) if gene_id not in self . label_hash : self . label_hash [ gene_id ] = first_symbol if self . test_mode and gene_id not in self . test_ids [ 'genes' ] : continue # Add the gene as a class. geno . addGene ( gene_id , first_symbol ) # add the long name as the description if len ( gene_stuff ) > 1 : description = gene_stuff [ 1 ] . strip ( ) model . addDefinition ( gene_id , description ) # add the rest of the symbols as synonyms for i in enumerate ( symbollist , start = 1 ) : model . addSynonym ( gene_id , i [ 1 ] . strip ( ) ) if len ( gene_stuff ) > 2 : ko_part = gene_stuff [ 2 ] ko_match = re . search ( r'K\d+' , ko_part ) if ko_match is not None and len ( ko_match . groups ( ) ) == 1 : ko = 'KEGG-ko:' + ko_match . group ( 1 ) family . addMemberOf ( gene_id , ko ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with genes" ) return
251,232
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L271-L352
[ "def", "_load_state", "(", "self", ",", "context", ")", ":", "try", ":", "state", "=", "cookie_to_state", "(", "context", ".", "cookie", ",", "self", ".", "config", "[", "\"COOKIE_STATE_NAME\"", "]", ",", "self", ".", "config", "[", "\"STATE_ENCRYPTION_KEY\"...
This method add the KEGG orthology classes to the graph .
def _process_ortholog_classes ( self , limit = None ) : LOG . info ( "Processing ortholog classes" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'ortholog_classes' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( orthology_class_id , orthology_class_name ) = row if self . test_mode and orthology_class_id not in self . test_ids [ 'orthology_classes' ] : continue # The orthology class is essentially a KEGG gene ID # that is species agnostic. # Add the ID and label as a gene family class other_labels = re . split ( r'[;,]' , orthology_class_name ) # the first one is the label we'll use orthology_label = other_labels [ 0 ] orthology_class_id = 'KEGG-' + orthology_class_id . strip ( ) orthology_type = self . globaltt [ 'gene_family' ] model . addClassToGraph ( orthology_class_id , orthology_label , orthology_type ) if len ( other_labels ) > 1 : # add the rest as synonyms # todo skip the first for s in other_labels : model . addSynonym ( orthology_class_id , s . strip ( ) ) # add the last one as the description d = other_labels [ len ( other_labels ) - 1 ] model . addDescription ( orthology_class_id , d ) # add the enzyme commission number (EC:1.2.99.5)as an xref # sometimes there's two, like [EC:1.3.5.1 1.3.5.4] # can also have a dash, like EC:1.10.3.- ec_matches = re . findall ( r'((?:\d+|\.|-){5,7})' , d ) if ec_matches is not None : for ecm in ec_matches : model . addXref ( orthology_class_id , 'EC:' + ecm ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with ortholog classes" ) return
251,233
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L354-L423
[ "def", "from_session", "(", "cls", ",", "session", ")", ":", "session", ".", "error_wrapper", "=", "lambda", "e", ":", "NvimError", "(", "e", "[", "1", "]", ")", "channel_id", ",", "metadata", "=", "session", ".", "request", "(", "b'vim_get_api_info'", "...
This method maps orthologs for a species to the KEGG orthology classes .
def _process_orthologs ( self , raw , limit = None ) : LOG . info ( "Processing orthologs" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , orthology_class_id ) = row orthology_class_id = 'KEGG:' + orthology_class_id . strip ( ) gene_id = 'KEGG:' + gene_id . strip ( ) # note that the panther_id references a group of orthologs, # and is not 1:1 with the rest # add the KO id as a gene-family grouping class OrthologyAssoc ( graph , self . name , gene_id , None ) . add_gene_family_to_graph ( orthology_class_id ) # add gene and orthology class to graph; # assume labels will be taken care of elsewhere model . addClassToGraph ( gene_id , None ) model . addClassToGraph ( orthology_class_id , None ) if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Done with orthologs" ) return
251,234
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L425-L473
[ "def", "run", "(", "self", ")", ":", "elapsed", "=", "0", "run_time", "=", "self", ".", "config", "[", "'run_time'", "]", "start_time", "=", "time", ".", "time", "(", ")", "t", "=", "time", ".", "time", "self", ".", "turrets_manager", ".", "start", ...
This method creates an association between diseases and their associated genes . We are being conservative here and only processing those diseases for which there is no mapping to OMIM .
def _process_kegg_disease2gene ( self , limit = None ) : LOG . info ( "Processing KEGG disease to gene" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 geno = Genotype ( graph ) rel = self . globaltt [ 'is marker for' ] noomimset = set ( ) raw = '/' . join ( ( self . rawdir , self . files [ 'disease_gene' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( gene_id , disease_id ) = row if self . test_mode and gene_id not in self . test_ids [ 'genes' ] : continue gene_id = 'KEGG-' + gene_id . strip ( ) disease_id = 'KEGG-' + disease_id . strip ( ) # only add diseases for which # there is no omim id and not a grouping class if disease_id not in self . kegg_disease_hash : # add as a class disease_label = None if disease_id in self . label_hash : disease_label = self . label_hash [ disease_id ] if re . search ( r'includ' , str ( disease_label ) ) : # they use 'including' when it's a grouping class LOG . info ( "Skipping this association because " + "it's a grouping class: %s" , disease_label ) continue # type this disease_id as a disease model . addClassToGraph ( disease_id , disease_label ) # , class_type=self.globaltt['disease']) noomimset . add ( disease_id ) alt_locus_id = self . _make_variant_locus_id ( gene_id , disease_id ) alt_label = self . label_hash [ alt_locus_id ] model . addIndividualToGraph ( alt_locus_id , alt_label , self . globaltt [ 'variant_locus' ] ) geno . addAffectedLocus ( alt_locus_id , gene_id ) model . addBlankNodeAnnotation ( alt_locus_id ) # Add the disease to gene relationship. assoc = G2PAssoc ( graph , self . name , alt_locus_id , disease_id , rel ) assoc . add_association_to_graph ( ) if not self . test_mode and ( limit is not None and line_counter > limit ) : break LOG . info ( "Done with KEGG disease to gene" ) LOG . info ( "Found %d diseases with no omim id" , len ( noomimset ) ) return
251,235
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L475-L551
[ "def", "request_videos", "(", "blink", ",", "time", "=", "None", ",", "page", "=", "0", ")", ":", "timestamp", "=", "get_time", "(", "time", ")", "url", "=", "\"{}/api/v2/videos/changed?since={}&page={}\"", ".", "format", "(", "blink", ".", "urls", ".", "b...
This method maps the OMIM IDs and KEGG gene ID . Currently split based on the link_type field . Equivalent link types are mapped as gene XRefs . Reverse link types are mapped as disease to gene associations . Original link types are currently skipped .
def _process_omim2gene ( self , limit = None ) : LOG . info ( "Processing OMIM to KEGG gene" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'omim2gene' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( kegg_gene_id , omim_id , link_type ) = row if self . test_mode and kegg_gene_id not in self . test_ids [ 'genes' ] : continue kegg_gene_id = 'KEGG-' + kegg_gene_id . strip ( ) omim_id = re . sub ( r'omim' , 'OMIM' , omim_id ) if link_type == 'equivalent' : # these are genes! # so add them as a class then make equivalence model . addClassToGraph ( omim_id , None ) geno . addGene ( kegg_gene_id , None ) if not DipperUtil . is_omim_disease ( omim_id ) : model . addEquivalentClass ( kegg_gene_id , omim_id ) elif link_type == 'reverse' : # make an association between an OMIM ID & the KEGG gene ID # we do this with omim ids because # they are more atomic than KEGG ids alt_locus_id = self . _make_variant_locus_id ( kegg_gene_id , omim_id ) alt_label = self . label_hash [ alt_locus_id ] model . addIndividualToGraph ( alt_locus_id , alt_label , self . globaltt [ 'variant_locus' ] ) geno . addAffectedLocus ( alt_locus_id , kegg_gene_id ) model . addBlankNodeAnnotation ( alt_locus_id ) # Add the disease to gene relationship. rel = self . globaltt [ 'is marker for' ] assoc = G2PAssoc ( graph , self . name , alt_locus_id , omim_id , rel ) assoc . add_association_to_graph ( ) elif link_type == 'original' : # these are sometimes a gene, and sometimes a disease LOG . info ( 'Unable to handle original link for %s-%s' , kegg_gene_id , omim_id ) else : # don't know what these are LOG . warning ( 'Unhandled link type for %s-%s: %s' , kegg_gene_id , omim_id , link_type ) if ( not self . test_mode ) and ( limit is not None and line_counter > limit ) : break LOG . info ( "Done with OMIM to KEGG gene" ) return
251,236
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L553-L634
[ "def", "await_metadata_by_name", "(", "self", ",", "name", ",", "metadata_key", ",", "timeout", ",", "caster", "=", "None", ")", ":", "file_path", "=", "self", ".", "_metadata_file_path", "(", "name", ",", "metadata_key", ")", "self", ".", "_wait_for_file", ...
This method maps the KEGG human gene IDs to the corresponding NCBI Gene IDs .
def _process_genes_kegg2ncbi ( self , limit = None ) : LOG . info ( "Processing KEGG gene IDs to NCBI gene IDs" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'ncbi' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( kegg_gene_id , ncbi_gene_id , link_type ) = row if self . test_mode and kegg_gene_id not in self . test_ids [ 'genes' ] : continue # Adjust the NCBI gene ID prefix. ncbi_gene_id = re . sub ( r'ncbi-geneid' , 'NCBIGene' , ncbi_gene_id ) kegg_gene_id = 'KEGG-' + kegg_gene_id # Adding the KEGG gene ID to the graph here is redundant, # unless there happens to be additional gene IDs in this table # not present in the genes table. model . addClassToGraph ( kegg_gene_id , None ) model . addClassToGraph ( ncbi_gene_id , None ) model . addEquivalentClass ( kegg_gene_id , ncbi_gene_id ) if not self . test_mode and ( limit is not None and line_counter > limit ) : break LOG . info ( "Done with KEGG gene IDs to NCBI gene IDs" ) return
251,237
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L706-L754
[ "def", "parse_rules", "(", "self", ")", ":", "# Load patterns: an app is removed when has no defined patterns.\r", "try", ":", "rule_options", "=", "self", ".", "config", ".", "items", "(", "'rules'", ")", "except", "configparser", ".", "NoSectionError", ":", "raise",...
We make a link between the pathway identifiers and any diseases associated with them . Since we model diseases as processes we make a triple saying that the pathway may be causally upstream of or within the disease process .
def _process_pathway_disease ( self , limit ) : LOG . info ( "Processing KEGG pathways to disease ids" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'pathway_disease' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) for row in filereader : line_counter += 1 ( disease_id , kegg_pathway_num ) = row if self . test_mode and kegg_pathway_num not in self . test_ids [ 'pathway' ] : continue disease_id = 'KEGG-' + disease_id # will look like KEGG-path:map04130 or KEGG-path:hsa04130 pathway_id = 'KEGG-' + kegg_pathway_num graph . addTriple ( pathway_id , self . globaltt [ 'causally upstream of or within' ] , disease_id ) if not self . test_mode and limit is not None and line_counter > limit : break return
251,238
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L792-L832
[ "def", "remove_experiment", "(", "self", ",", "id", ")", ":", "if", "id", "in", "self", ".", "experiments", ":", "self", ".", "experiments", ".", "pop", "(", "id", ")", "self", ".", "write_file", "(", ")" ]
We actually want the association between the gene and the disease to be via an alternate locus not the wildtype gene itself . so we make an anonymous alternate locus and put that in the association We also make the label for the anonymous class and add it to the label hash
def _make_variant_locus_id ( self , gene_id , disease_id ) : alt_locus_id = '_:' + re . sub ( r':' , '' , gene_id ) + '-' + re . sub ( r':' , '' , disease_id ) + 'VL' alt_label = self . label_hash . get ( gene_id ) disease_label = self . label_hash . get ( disease_id ) if alt_label is not None and alt_label != '' : alt_label = 'some variant of ' + str ( alt_label ) if disease_label is not None and disease_label != '' : alt_label += ' that is associated with ' + str ( disease_label ) else : alt_label = None self . label_hash [ alt_locus_id ] = alt_label return alt_locus_id
251,239
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/KEGG.py#L906-L933
[ "def", "evolve", "(", "self", ",", "rho", ":", "Density", ")", "->", "Density", ":", "qubits", "=", "rho", ".", "qubits", "results", "=", "[", "op", ".", "evolve", "(", "rho", ")", "for", "op", "in", "self", ".", "operators", "]", "tensors", "=", ...
For any of the items in the chemical - disease association file that have ambiguous association types we fetch the disambiguated associations using the batch query API and store these in a file . Elsewhere we can loop through the file and create the appropriate associations .
def _fetch_disambiguating_assoc ( self ) : disambig_file = '/' . join ( ( self . rawdir , self . static_files [ 'publications' ] [ 'file' ] ) ) assoc_file = '/' . join ( ( self . rawdir , self . files [ 'chemical_disease_interactions' ] [ 'file' ] ) ) # check if there is a local association file, # and download if it's dated later than the original intxn file if os . path . exists ( disambig_file ) : dfile_dt = os . stat ( disambig_file ) afile_dt = os . stat ( assoc_file ) if dfile_dt < afile_dt : LOG . info ( "Local file date before chem-disease assoc file. " " Downloading..." ) else : LOG . info ( "Local file date after chem-disease assoc file. " " Skipping download." ) return all_pubs = set ( ) dual_evidence = re . compile ( r'^marker\/mechanism\|therapeutic$' ) # first get all the unique publications with gzip . open ( assoc_file , 'rt' ) as tsvfile : reader = csv . reader ( tsvfile , delimiter = "\t" ) for row in reader : if re . match ( r'^#' , ' ' . join ( row ) ) : continue self . _check_list_len ( row , 10 ) ( chem_name , chem_id , cas_rn , disease_name , disease_id , direct_evidence , inferred_gene_symbol , inference_score , omim_ids , pubmed_ids ) = row if direct_evidence == '' or not re . match ( dual_evidence , direct_evidence ) : continue if pubmed_ids is not None and pubmed_ids != '' : all_pubs . update ( set ( re . split ( r'\|' , pubmed_ids ) ) ) sorted_pubs = sorted ( list ( all_pubs ) ) # now in batches of 4000, we fetch the chemical-disease associations batch_size = 4000 params = { 'inputType' : 'reference' , 'report' : 'diseases_curated' , 'format' : 'tsv' , 'action' : 'Download' } url = 'http://ctdbase.org/tools/batchQuery.go?q' start = 0 end = min ( ( batch_size , len ( all_pubs ) ) ) # get them in batches of 4000 with open ( disambig_file , 'wb' ) as dmbf : while start < len ( sorted_pubs ) : params [ 'inputTerms' ] = '|' . join ( sorted_pubs [ start : end ] ) # fetch the data from url LOG . info ( 'fetching %d (%d-%d) refs: %s' , len ( re . split ( r'\|' , params [ 'inputTerms' ] ) ) , start , end , params [ 'inputTerms' ] ) data = urllib . parse . urlencode ( params ) encoding = 'utf-8' binary_data = data . encode ( encoding ) req = urllib . request . Request ( url , binary_data ) resp = urllib . request . urlopen ( req ) dmbf . write ( resp . read ( ) ) start = end end = min ( ( start + batch_size , len ( sorted_pubs ) ) ) return
251,240
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/CTD.py#L243-L323
[ "def", "extend_right_to", "(", "self", ",", "window", ",", "max_size", ")", ":", "self", ".", "size", "=", "min", "(", "self", ".", "size", "+", "(", "window", ".", "ofs", "-", "self", ".", "ofs_end", "(", ")", ")", ",", "max_size", ")" ]
Make a reified association given an array of pubmed identifiers .
def _make_association ( self , subject_id , object_id , rel_id , pubmed_ids ) : # TODO pass in the relevant Assoc class rather than relying on G2P assoc = G2PAssoc ( self . graph , self . name , subject_id , object_id , rel_id ) if pubmed_ids is not None and len ( pubmed_ids ) > 0 : for pmid in pubmed_ids : ref = Reference ( self . graph , pmid , self . globaltt [ 'journal article' ] ) ref . addRefToGraph ( ) assoc . add_source ( pmid ) assoc . add_evidence ( self . globaltt [ 'traceable author statement' ] ) assoc . add_association_to_graph ( ) return
251,241
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/CTD.py#L485-L510
[ "def", "init", "(", "path", "=", "None", ")", ":", "default", "=", "get_default", "(", ")", "if", "default", "is", "not", "None", "and", "not", "isinstance", "(", "default", ",", "VoidLogKeeper", ")", ":", "return", "default", "tee", "=", "LogTee", "("...
Overrides checkIfRemoteIsNewer in Source class
def checkIfRemoteIsNewer ( self , localfile , remote_size , remote_modify ) : is_remote_newer = False status = os . stat ( localfile ) LOG . info ( "\nLocal file size: %i" "\nLocal Timestamp: %s" , status [ ST_SIZE ] , datetime . fromtimestamp ( status . st_mtime ) ) remote_dt = Bgee . _convert_ftp_time_to_iso ( remote_modify ) if remote_dt != datetime . fromtimestamp ( status . st_mtime ) or status [ ST_SIZE ] != int ( remote_size ) : is_remote_newer = True LOG . info ( "Object on server is has different size %i and/or date %s" , remote_size , remote_dt ) return is_remote_newer
251,242
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Bgee.py#L232-L256
[ "async", "def", "_unsubscribe", "(", "self", ",", "channels", ",", "is_mask", ")", ":", "vanished", "=", "[", "]", "if", "channels", ":", "for", "channel", "in", "channels", ":", "key", "=", "channel", ",", "is_mask", "self", ".", "_channels", ".", "re...
Convert datetime in the format 20160705042714 to a datetime object
def _convert_ftp_time_to_iso ( ftp_time ) : date_time = datetime ( int ( ftp_time [ : 4 ] ) , int ( ftp_time [ 4 : 6 ] ) , int ( ftp_time [ 6 : 8 ] ) , int ( ftp_time [ 8 : 10 ] ) , int ( ftp_time [ 10 : 12 ] ) , int ( ftp_time [ 12 : 14 ] ) ) return date_time
251,243
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Bgee.py#L259-L268
[ "def", "remove_volume", "(", "self", ",", "volume_name", ")", ":", "logger", ".", "info", "(", "\"removing volume '%s'\"", ",", "volume_name", ")", "try", ":", "self", ".", "d", ".", "remove_volume", "(", "volume_name", ")", "except", "APIError", "as", "ex",...
connection details for DISCO
def fetch ( self , is_dl_forced = False ) : cxn = { } cxn [ 'host' ] = 'nif-db.crbs.ucsd.edu' cxn [ 'database' ] = 'disco_crawler' cxn [ 'port' ] = '5432' cxn [ 'user' ] = config . get_config ( ) [ 'user' ] [ 'disco' ] cxn [ 'password' ] = config . get_config ( ) [ 'keys' ] [ cxn [ 'user' ] ] self . dataset . setFileAccessUrl ( 'jdbc:postgresql://' + cxn [ 'host' ] + ':' + cxn [ 'port' ] + '/' + cxn [ 'database' ] , is_object_literal = True ) # process the tables # self.fetch_from_pgdb(self.tables,cxn,100) #for testing self . fetch_from_pgdb ( self . tables , cxn ) self . get_files ( is_dl_forced ) # FIXME: Everything needed for data provenance? fstat = os . stat ( '/' . join ( ( self . rawdir , 'dvp.pr_nlx_157874_1' ) ) ) filedate = datetime . utcfromtimestamp ( fstat [ ST_CTIME ] ) . strftime ( "%Y-%m-%d" ) self . dataset . setVersion ( filedate ) return
251,244
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EOM.py#L63-L87
[ "def", "namer", "(", "cls", ",", "imageUrl", ",", "pageUrl", ")", ":", "index", "=", "int", "(", "compile", "(", "r'id=(\\d+)'", ")", ".", "search", "(", "pageUrl", ")", ".", "group", "(", "1", ")", ")", "ext", "=", "imageUrl", ".", "rsplit", "(", ...
Over ride Source . parse inherited via PostgreSQLSource
def parse ( self , limit = None ) : if limit is not None : LOG . info ( "Only parsing first %s rows of each file" , limit ) if self . test_only : self . test_mode = True LOG . info ( "Parsing files..." ) self . _process_nlx_157874_1_view ( '/' . join ( ( self . rawdir , 'dvp.pr_nlx_157874_1' ) ) , limit ) self . _map_eom_terms ( '/' . join ( ( self . rawdir , self . files [ 'map' ] [ 'file' ] ) ) , limit ) LOG . info ( "Finished parsing." ) # since it's so small, # we default to copying the entire graph to the test set self . testgraph = self . graph return
251,245
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/EOM.py#L89-L113
[ "def", "assert_coordinate_consistent", "(", "obj", ",", "coords", ")", ":", "for", "k", "in", "obj", ".", "dims", ":", "# make sure there are no conflict in dimension coordinates", "if", "k", "in", "coords", "and", "k", "in", "obj", ".", "coords", ":", "if", "...
This table indicates the relationship between a genotype and it s background strain . It leverages the Genotype class methods to do this .
def _process_gxd_genotype_view ( self , limit = None ) : line_counter = 0 if self . test_mode : graph = self . testgraph else : graph = self . graph geno = Genotype ( graph ) model = Model ( graph ) raw = '/' . join ( ( self . rawdir , 'gxd_genotype_view' ) ) LOG . info ( "getting genotypes and their backgrounds" ) with open ( raw , 'r' ) as f1 : f1 . readline ( ) # read the header row; skip for line in f1 : line = line . rstrip ( "\n" ) line_counter += 1 ( genotype_key , strain_key , strain , mgiid ) = line . split ( '\t' ) if self . test_mode is True : if int ( genotype_key ) not in self . test_keys . get ( 'genotype' ) : continue if self . idhash [ 'genotype' ] . get ( genotype_key ) is None : # just in case we haven't seen it before, # catch and add the id mapping here self . idhash [ 'genotype' ] [ genotype_key ] = mgiid geno . addGenotype ( mgiid , None ) # the label is elsewhere... # need to add the MGI label as a synonym # if it's in the hash, # assume that the individual was created elsewhere strain_id = self . idhash [ 'strain' ] . get ( strain_key ) background_type = self . globaltt [ 'genomic_background' ] if strain_id is None or int ( strain_key ) < 0 : if strain_id is None : # some of the strains don't have public identifiers! # so we make one up, and add it to the hash strain_id = self . _makeInternalIdentifier ( 'strain' , strain_key ) self . idhash [ 'strain' ] . update ( { strain_key : strain_id } ) model . addComment ( strain_id , "strain_key:" + strain_key ) elif int ( strain_key ) < 0 : # these are ones that are unidentified/unknown. # so add instances of each. strain_id = self . _makeInternalIdentifier ( 'strain' , re . sub ( r':' , '' , str ( strain_id ) ) ) strain_id += re . sub ( r':' , '' , str ( mgiid ) ) strain_id = re . sub ( r'^_' , '_:' , strain_id ) strain_id = re . sub ( r'::' , ':' , strain_id ) model . addDescription ( strain_id , "This genomic background is unknown. " + "This is a placeholder background for " + mgiid + "." ) background_type = self . globaltt [ 'unspecified_genomic_background' ] # add it back to the idhash LOG . info ( "adding background as internal id: %s %s: %s" , strain_key , strain , strain_id ) geno . addGenomicBackgroundToGenotype ( strain_id , mgiid , background_type ) self . label_hash [ strain_id ] = strain # add BG to a hash so we can build the genotype label later self . geno_bkgd [ mgiid ] = strain_id if not self . test_mode and limit is not None and line_counter > limit : break return
251,246
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MGI.py#L335-L430
[ "def", "add_items", "(", "self", ",", "items", ")", ":", "_items", "=", "[", "self", ".", "_listitemify", "(", "item", ")", "for", "item", "in", "items", "]", "tuples", "=", "[", "item", ".", "as_tuple", "(", ")", "for", "item", "in", "_items", "]"...
Add the genotype internal id to mgiid mapping to the idhashmap . Also add them as individuals to the graph . We re - format the label to put the background strain in brackets after the gvc .
def _process_gxd_genotype_summary_view ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 geno_hash = { } raw = '/' . join ( ( self . rawdir , 'gxd_genotype_summary_view' ) ) LOG . info ( "building labels for genotypes" ) with open ( raw , 'r' ) as f : f . readline ( ) # read the header row; skip for line in f : line = line . rstrip ( "\n" ) line_counter += 1 ( object_key , preferred , mgiid , subtype , short_description ) = line . split ( '\t' ) if self . test_mode is True : if int ( object_key ) not in self . test_keys . get ( 'genotype' ) : continue # add the internal genotype to mgi mapping self . idhash [ 'genotype' ] [ object_key ] = mgiid if preferred == '1' : d = re . sub ( r'\,' , '/' , short_description . strip ( ) ) if mgiid not in geno_hash : geno_hash [ mgiid ] = { 'vslcs' : [ d ] , 'subtype' : subtype , 'key' : object_key } else : vslcs = geno_hash [ mgiid ] . get ( 'vslcs' ) vslcs . append ( d ) else : pass # TODO what to do with != preferred if not self . test_mode and limit is not None and line_counter > limit : break # now, loop through the hash and add the genotypes as individuals # we add the mgi genotype as a synonym # (we generate our own label later) geno = Genotype ( graph ) for gt in geno_hash : genotype = geno_hash . get ( gt ) gvc = sorted ( genotype . get ( 'vslcs' ) ) label = '; ' . join ( gvc ) + ' [' + genotype . get ( 'subtype' ) + ']' geno . addGenotype ( gt , None ) model . addComment ( gt , self . _makeInternalIdentifier ( 'genotype' , genotype . get ( 'key' ) ) ) model . addSynonym ( gt , label . strip ( ) ) return
251,247
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MGI.py#L432-L503
[ "def", "on_exception", "(", "self", ",", "exception", ")", ":", "logger", ".", "error", "(", "'Exception from stream!'", ",", "exc_info", "=", "True", ")", "self", ".", "streaming_exception", "=", "exception" ]
Here we have the relationship between MGI transgene alleles and the non - mouse gene ids that are part of them . We augment the allele with the transgene parts .
def process_mgi_relationship_transgene_genes ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph LOG . info ( "getting transgene genes" ) raw = '/' . join ( ( self . rawdir , 'mgi_relationship_transgene_genes' ) ) geno = Genotype ( graph ) col = [ 'rel_key' , 'allele_key' , 'allele_id' , 'allele_label' , 'category_key' , 'category_name' , 'property_key' , 'property_name' , 'gene_num' ] with open ( raw , 'r' , encoding = "utf8" ) as csvfile : filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' ) header = next ( filereader ) if header != col : LOG . error ( 'expected columns: %s\n\tBut got:\n%s' , col , header ) for row in filereader : # rel_key, allele_key = int ( row [ col . index ( 'allele_key' ) ] ) allele_id = row [ col . index ( 'allele_id' ) ] # allele_label, # category_key, # category_name, # property_key, # property_name, gene_num = int ( row [ col . index ( 'gene_num' ) ] ) if self . test_mode and allele_key not in self . test_keys . get ( 'allele' ) and gene_num not in self . test_ids : continue gene_id = 'NCBIGene:' + str ( gene_num ) # geno.addParts(gene_id, allele_id, self.globaltt['has_variant_part']) seqalt_id = self . idhash [ 'seqalt' ] . get ( allele_key ) if seqalt_id is None : seqalt_id = allele_id geno . addSequenceDerivesFrom ( seqalt_id , gene_id ) if not self . test_mode and limit is not None and filereader . line_num > limit : break return
251,248
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/MGI.py#L1891-L1944
[ "def", "setup_ufw_rules", "(", ")", ":", "#current rules", "current_rules", "=", "server_state", "(", "'ufw_rules'", ")", "if", "current_rules", ":", "current_rules", "=", "set", "(", "current_rules", ")", "else", ":", "current_rules", "=", "set", "(", "[", "]...
This is a wrapper for creating a URIRef or Bnode object with a given a curie or iri as a string .
def _getnode ( self , curie ) : # convention is lowercase names node = None if curie [ 0 ] == '_' : if self . are_bnodes_skized is True : node = self . skolemizeBlankNode ( curie ) else : # delete the leading underscore to make it cleaner node = BNode ( re . sub ( r'^_:|^_' , '' , curie , 1 ) ) # Check if curie string is actually an IRI elif curie [ : 4 ] == 'http' or curie [ : 3 ] == 'ftp' : node = URIRef ( curie ) else : iri = RDFGraph . curie_util . get_uri ( curie ) if iri is not None : node = URIRef ( RDFGraph . curie_util . get_uri ( curie ) ) # Bind prefix map to graph prefix = curie . split ( ':' ) [ 0 ] if prefix not in self . namespace_manager . namespaces ( ) : mapped_iri = self . curie_map [ prefix ] self . bind ( prefix , Namespace ( mapped_iri ) ) else : LOG . error ( "couldn't make URI for %s" , curie ) return node
251,249
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/graph/RDFGraph.py#L92-L126
[ "def", "get_experiment_time", "(", "port", ")", ":", "response", "=", "rest_get", "(", "experiment_url", "(", "port", ")", ",", "REST_TIME_OUT", ")", "if", "response", "and", "check_response", "(", "response", ")", ":", "content", "=", "convert_time_stamp_to_dat...
The reified relationship between a disease and a phenotype is decorated with some provenance information . This makes the assumption that both the disease and phenotype are classes .
def add_association_to_graph ( self ) : # add the basic association nodes # if rel == self.globaltt[['has disposition']: Assoc . add_association_to_graph ( self ) # anticipating trouble with onsets ranges that look like curies if self . onset is not None and self . onset != '' : self . graph . addTriple ( self . assoc_id , self . globaltt [ 'onset' ] , self . onset ) if self . frequency is not None and self . frequency != '' : self . graph . addTriple ( self . assoc_id , self . globaltt [ 'frequency' ] , self . frequency ) return
251,250
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/models/assoc/D2PAssoc.py#L50-L75
[ "def", "remove_experiment", "(", "self", ",", "id", ")", ":", "if", "id", "in", "self", ".", "experiments", ":", "self", ".", "experiments", ".", "pop", "(", "id", ")", "self", ".", "write_file", "(", ")" ]
this will determine the grouping bands that it belongs to recursively 13q21 . 31 == > 13 13q 13q2 13q21 13q21 . 3 13q21 . 31
def make_parent_bands ( self , band , child_bands ) : m = re . match ( r'([pq][A-H\d]+(?:\.\d+)?)' , band ) if len ( band ) > 0 : if m : p = str ( band [ 0 : len ( band ) - 1 ] ) p = re . sub ( r'\.$' , '' , p ) if p is not None : child_bands . add ( p ) self . make_parent_bands ( p , child_bands ) else : child_bands = set ( ) return child_bands
251,251
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Monochrom.py#L334-L354
[ "def", "update", "(", "self", ",", "portfolio", ",", "date", ",", "perfs", "=", "None", ")", ":", "# Make the manager aware of current simulation", "self", ".", "portfolio", "=", "portfolio", "self", ".", "perfs", "=", "perfs", "self", ".", "date", "=", "dat...
Get a CURIE from a URI
def get_curie ( self , uri ) : prefix = self . get_curie_prefix ( uri ) if prefix is not None : key = self . curie_map [ prefix ] return '%s:%s' % ( prefix , uri [ len ( key ) : len ( uri ) ] ) return None
251,252
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/CurieUtil.py#L31-L37
[ "def", "RepackAllTemplates", "(", "self", ",", "upload", "=", "False", ",", "token", "=", "None", ")", ":", "for", "template", "in", "os", ".", "listdir", "(", "config", ".", "CONFIG", "[", "\"ClientBuilder.template_dir\"", "]", ")", ":", "template_path", ...
Get a URI from a CURIE
def get_uri ( self , curie ) : if curie is None : return None parts = curie . split ( ':' ) if len ( parts ) == 1 : if curie != '' : LOG . error ( "Not a properly formed curie: \"%s\"" , curie ) return None prefix = parts [ 0 ] if prefix in self . curie_map : return '%s%s' % ( self . curie_map . get ( prefix ) , curie [ ( curie . index ( ':' ) + 1 ) : ] ) LOG . error ( "Curie prefix not defined for %s" , curie ) return None
251,253
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/CurieUtil.py#L46-L60
[ "def", "RepackAllTemplates", "(", "self", ",", "upload", "=", "False", ",", "token", "=", "None", ")", ":", "for", "template", "in", "os", ".", "listdir", "(", "config", ".", "CONFIG", "[", "\"ClientBuilder.template_dir\"", "]", ")", ":", "template_path", ...
Here we connect to the coriell sftp server using private connection details . They dump bi - weekly files with a timestamp in the filename . For each catalog we ping the remote site and pull the most - recently updated file renaming it to our local latest . csv .
def fetch ( self , is_dl_forced = False ) : host = config . get_config ( ) [ 'dbauth' ] [ 'coriell' ] [ 'host' ] key = config . get_config ( ) [ 'dbauth' ] [ 'coriell' ] [ 'private_key' ] user = config . get_config ( ) [ 'user' ] [ 'coriell' ] passwd = config . get_config ( ) [ 'keys' ] [ user ] with pysftp . Connection ( host , username = user , password = passwd , private_key = key ) as sftp : # check to make sure each file is in there # get the remote files remote_files = sftp . listdir_attr ( ) files_by_repo = { } for attr in remote_files : # for each catalog, get the most-recent filename mch = re . match ( '(NIGMS|NIA|NHGRI|NINDS)' , attr . filename ) if mch is not None and len ( mch . groups ( ) ) > 0 : # there should just be one now files_by_repo [ mch . group ( 1 ) ] = attr # sort each array in hash, # & get the name and time of the most-recent file for each catalog for rmt in self . files : LOG . info ( "Checking on %s catalog file" , rmt ) fname = self . files [ rmt ] [ 'file' ] remotef = files_by_repo [ rmt ] target_name = '/' . join ( ( self . rawdir , fname ) ) # check if the local file is out of date, if so, download. # otherwise, skip. # we rename (for simplicity) the original file fstat = None if os . path . exists ( target_name ) : fstat = os . stat ( target_name ) LOG . info ( "Local file date: %s" , datetime . utcfromtimestamp ( fstat [ stat . ST_CTIME ] ) ) if fstat is None or remotef . st_mtime > fstat [ stat . ST_CTIME ] : if fstat is None : LOG . info ( "File does not exist locally; downloading..." ) else : LOG . info ( "New version of %s catalog available; downloading..." , rmt ) sftp . get ( remotef . filename , target_name ) LOG . info ( "Fetched remote %s -> %s" , remotef . filename , target_name ) fstat = os . stat ( target_name ) filedate = datetime . utcfromtimestamp ( remotef . st_mtime ) . strftime ( "%Y-%m-%d" ) LOG . info ( "New file date: %s" , datetime . utcfromtimestamp ( fstat [ stat . ST_CTIME ] ) ) else : LOG . info ( "File %s exists; using local copy" , fname ) filedate = datetime . utcfromtimestamp ( fstat [ stat . ST_CTIME ] ) . strftime ( "%Y-%m-%d" ) self . dataset . setFileAccessUrl ( remotef . filename , True ) self . dataset . setVersion ( filedate ) return
251,254
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Coriell.py#L150-L224
[ "def", "is_unstructured", "(", "self", ")", ":", "return", "[", "arr", ".", "psy", ".", "decoder", ".", "is_unstructured", "(", "arr", ")", "if", "not", "isinstance", "(", "arr", ",", "ArrayList", ")", "else", "arr", ".", "is_unstructured", "for", "arr",...
This function will process the data supplied internally about the repository from Coriell .
def _process_collection ( self , collection_id , label , page ) : # ############# BUILD THE CELL LINE REPOSITORY ############# for graph in [ self . graph , self . testgraph ] : # TODO: How to devise a label for each repository? model = Model ( graph ) reference = Reference ( graph ) repo_id = 'CoriellCollection:' + collection_id repo_label = label repo_page = page model . addIndividualToGraph ( repo_id , repo_label , self . globaltt [ 'collection' ] ) reference . addPage ( repo_id , repo_page ) return
251,255
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Coriell.py#L760-L788
[ "def", "flash", "(", "self", ",", "duration", "=", "0.0", ")", ":", "for", "_", "in", "range", "(", "2", ")", ":", "self", ".", "on", "=", "not", "self", ".", "on", "time", ".", "sleep", "(", "duration", ")" ]
Add the genotype internal id to flybase mapping to the idhashmap . Also add them as individuals to the graph .
def _process_genotypes ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , 'genotype' ) ) LOG . info ( "building labels for genotypes" ) geno = Genotype ( graph ) fly_tax = self . globaltt [ 'Drosophila melanogaster' ] with open ( raw , 'r' ) as f : f . readline ( ) # read the header row; skip filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) for line in filereader : line_counter += 1 ( genotype_num , uniquename , description , name ) = line # if self.test_mode is True: # if int(object_key) not in self.test_keys.get('genotype'): # continue # add the internal genotype to pub mapping genotype_id = 'MONARCH:FBgeno' + str ( genotype_num ) self . idhash [ 'genotype' ] [ genotype_num ] = genotype_id if description == '' : description = None if not self . test_mode and limit is not None and line_counter > limit : pass else : if self . test_mode and int ( genotype_num ) not in self . test_keys [ 'genotype' ] : continue model . addIndividualToGraph ( genotype_id , uniquename , self . globaltt [ 'intrinsic_genotype' ] , description ) # we know all genotypes are in flies # FIXME we assume here they are in melanogaster, # but that isn't necessarily true!!! # TODO should the taxon be == genomic background? geno . addTaxon ( fly_tax , genotype_id ) genotype_iid = self . _makeInternalIdentifier ( 'genotype' , genotype_num ) model . addComment ( genotype_id , genotype_iid ) if name . strip ( ) != '' : model . addSynonym ( genotype_id , name ) return
251,256
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L358-L423
[ "def", "_timestamp_regulator", "(", "self", ")", ":", "unified_timestamps", "=", "_PrettyDefaultDict", "(", "list", ")", "staged_files", "=", "self", ".", "_list_audio_files", "(", "sub_dir", "=", "\"staging\"", ")", "for", "timestamp_basename", "in", "self", ".",...
Stock definitions . Here we instantiate them as instances of the given taxon .
def _process_stocks ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , 'stock' ) ) LOG . info ( "building labels for stocks" ) with open ( raw , 'r' ) as f : f . readline ( ) # read the header row; skip filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) for line in filereader : line_counter += 1 ( stock_id , dbxref_id , organism_id , name , uniquename , description , type_id , is_obsolete ) = line # 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670 stock_num = stock_id stock_id = 'FlyBase:' + uniquename self . idhash [ 'stock' ] [ stock_num ] = stock_id stock_label = description organism_key = organism_id taxon = self . idhash [ 'organism' ] [ organism_key ] # from what i can tell, the dbxrefs are just more FBst, # so no added information vs uniquename if not self . test_mode and limit is not None and line_counter > limit : pass else : if self . test_mode and int ( stock_num ) not in self . test_keys [ 'strain' ] : continue # tax_label = self.label_hash[taxon] # unused # add the tax in case it hasn't been already model . addClassToGraph ( taxon ) model . addIndividualToGraph ( stock_id , stock_label , taxon ) if is_obsolete == 't' : model . addDeprecatedIndividual ( stock_id ) return
251,257
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L426-L480
[ "def", "update_site", "(", "self", ",", "client_secret_expires_at", "=", "None", ")", ":", "params", "=", "{", "\"oxd_id\"", ":", "self", ".", "oxd_id", ",", "\"authorization_redirect_uri\"", ":", "self", ".", "authorization_redirect_uri", "}", "if", "client_secre...
Flybase publications .
def _process_pubs ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , 'pub' ) ) LOG . info ( "building labels for pubs" ) with open ( raw , 'r' ) as f : f . readline ( ) # read the header row; skip filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) for line in filereader : ( pub_id , title , volumetitle , volume , series_name , issue , pyear , pages , miniref , type_id , is_obsolete , publisher , pubplace , uniquename ) = line # 2 12153979 1 2 FBst0000002 w[*]; betaTub60D[2] Kr[If-1]/CyO 10670 # if self.test_mode is True: # if int(object_key) not in self.test_keys.get('genotype'): # continue pub_num = pub_id pub_id = 'FlyBase:' + uniquename . strip ( ) self . idhash [ 'publication' ] [ pub_num ] = pub_id # TODO figure out the type of pub by type_id if not re . match ( r'(FBrf|multi)' , uniquename ) : continue line_counter += 1 reference = Reference ( graph , pub_id ) if title != '' : reference . setTitle ( title ) if pyear != '' : reference . setYear ( str ( pyear ) ) if miniref != '' : reference . setShortCitation ( miniref ) if not self . test_mode and limit is not None and line_counter > limit : pass else : if self . test_mode and int ( pub_num ) not in self . test_keys [ 'pub' ] : continue if is_obsolete == 't' : model . addDeprecatedIndividual ( pub_id ) else : reference . addRefToGraph ( ) return
251,258
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L483-L539
[ "def", "_createLink", "(", "self", ",", "linkResult", ",", "replaceParamFile", ")", ":", "link", "=", "None", "# Cases", "if", "linkResult", "[", "'type'", "]", "==", "'XSEC'", ":", "# Cross section link handler", "link", "=", "self", ".", "_createCrossSection",...
There s only about 30 environments in which the phenotypes are recorded . There are no externally accessible identifiers for environments so we make anonymous nodes for now . Some of the environments are comprised of > 1 of the other environments ; we do some simple parsing to match the strings of the environmental labels to the other atomic components .
def _process_environments ( self ) : if self . test_mode : graph = self . testgraph else : graph = self . graph raw = '/' . join ( ( self . rawdir , 'environment' ) ) LOG . info ( "building labels for environment" ) env_parts = { } label_map = { } env = Environment ( graph ) with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) # read the header row; skip for line in filereader : ( environment_id , uniquename , description ) = line # 22 heat sensitive | tetracycline conditional environment_num = environment_id environment_internal_id = self . _makeInternalIdentifier ( 'environment' , environment_num ) if environment_num not in self . idhash [ 'environment' ] : self . idhash [ 'environment' ] [ environment_num ] = environment_internal_id environment_id = self . idhash [ 'environment' ] [ environment_num ] environment_label = uniquename if environment_label == 'unspecified' : environment_label += ' environment' env . addEnvironment ( environment_id , environment_label ) self . label_hash [ environment_id ] = environment_label # split up the environment into parts # if there's parts, then add them to the hash; # we'll match the components in a second pass components = re . split ( r'\|' , uniquename ) if len ( components ) > 1 : env_parts [ environment_id ] = components else : label_map [ environment_label ] = environment_id # ### end loop through file # build the environmental components for eid in env_parts : eid = eid . strip ( ) for e in env_parts [ eid ] : # search for the environmental component by label env_id = label_map . get ( e . strip ( ) ) env . addComponentToEnvironment ( eid , env_id ) return
251,259
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L542-L604
[ "def", "driverDebugRequest", "(", "self", ",", "unDeviceIndex", ",", "pchRequest", ",", "pchResponseBuffer", ",", "unResponseBufferSize", ")", ":", "fn", "=", "self", ".", "function_table", ".", "driverDebugRequest", "result", "=", "fn", "(", "unDeviceIndex", ",",...
The genotypes of the stocks .
def _process_stock_genotype ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph raw = '/' . join ( ( self . rawdir , 'stock_genotype' ) ) LOG . info ( "processing stock genotype" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) # read the header row; skip for line in filereader : ( stock_genotype_id , stock_id , genotype_id ) = line stock_key = stock_id stock_id = self . idhash [ 'stock' ] [ stock_key ] genotype_key = genotype_id genotype_id = self . idhash [ 'genotype' ] [ genotype_key ] if self . test_mode and int ( genotype_key ) not in self . test_keys [ 'genotype' ] : continue graph . addTriple ( stock_id , self . globaltt [ 'has_genotype' ] , genotype_id ) line_counter += 1 if not self . test_mode and limit is not None and line_counter > limit : break return
251,260
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L926-L965
[ "def", "process_config", "(", "config", ",", "config_data", ")", ":", "if", "'components'", "in", "config_data", ":", "process_components_config_section", "(", "config", ",", "config_data", "[", "'components'", "]", ")", "if", "'data'", "in", "config_data", ":", ...
We bring in the dbxref identifiers and store them in a hashmap for lookup in other functions . Note that some dbxrefs aren t mapped to identifiers . For example 5004018 is mapped to a string endosome & imaginal disc epithelial cell | somatic clone ... In those cases there just isn t a dbxref that s used when referencing with a cvterm ; it ll just use the internal key .
def _process_dbxref ( self ) : raw = '/' . join ( ( self . rawdir , 'dbxref' ) ) LOG . info ( "processing dbxrefs" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) # read the header row; skip for line in filereader : ( dbxref_id , db_id , accession , version , description , url ) = line # dbxref_id db_id accession version description url # 1 2 SO:0000000 "" accession = accession . strip ( ) db_id = db_id . strip ( ) if accession != '' and db_id in self . localtt : # scrub some identifiers here mch = re . match ( r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):' , accession ) if mch : accession = re . sub ( mch . group ( 1 ) + r'\:' , '' , accession ) elif re . match ( r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)' , accession ) : continue elif re . match ( r'\:' , accession ) : # starts with a colon accession = re . sub ( r'\:' , '' , accession ) elif re . search ( r'\s' , accession ) : # skip anything with a space # LOG.debug( # 'dbxref %s accession has a space: %s', dbxref_id, accession) continue if re . match ( r'http' , accession ) : did = accession else : prefix = self . localtt [ db_id ] did = ':' . join ( ( prefix , accession ) ) if re . search ( r'\:' , accession ) and prefix != 'DOI' : LOG . warning ( 'id %s may be malformed; skipping' , did ) self . dbxrefs [ dbxref_id ] = { db_id : did } elif url != '' : self . dbxrefs [ dbxref_id ] = { db_id : url . strip ( ) } else : continue # the following are some special cases that we scrub if int ( db_id ) == 2 and accession . strip ( ) == 'transgenic_transposon' : # transgenic_transposable_element self . dbxrefs [ dbxref_id ] = { db_id : self . globaltt [ 'transgenic_transposable_element' ] } line_counter += 1 return
251,261
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L1033-L1103
[ "def", "perturbParams", "(", "self", ",", "pertSize", "=", "1e-3", ")", ":", "params", "=", "self", ".", "getParams", "(", ")", "self", ".", "setParams", "(", "params", "+", "pertSize", "*", "sp", ".", "randn", "(", "params", ".", "shape", "[", "0", ...
Get the phenotypes and declare the classes . If the observable is unspecified then we assign the phenotype to the cvalue id ; otherwise we convert the phenotype into a uberpheno - style identifier simply based on the anatomical part that s affected ... that is listed as the observable_id concatenated with the literal PHENOTYPE
def _process_phenotype ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , 'phenotype' ) ) LOG . info ( "processing phenotype" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) # read the header row; skip for line in filereader : ( phenotype_id , uniquename , observable_id , attr_id , value , cvalue_id , assay_id ) = line # 8505 unspecified # 20142 mesothoracic leg disc | somatic clone 87719 60468 60468 60468 # 8507 sex comb | ectopic 88877 60468 60468 60468 # 8508 tarsal segment 83664 60468 60468 60468 # 18404 oocyte | oogenesis stage S9 86769 60468 60468 60468 # for now make these as phenotypic classes # will need to dbxref at some point phenotype_key = phenotype_id phenotype_id = None phenotype_internal_id = self . _makeInternalIdentifier ( 'phenotype' , phenotype_key ) phenotype_label = None self . label_hash [ phenotype_internal_id ] = uniquename cvterm_id = None if observable_id != '' and int ( observable_id ) == 60468 : # undefined - typically these are already phenotypes if cvalue_id in self . idhash [ 'cvterm' ] : cvterm_id = self . idhash [ 'cvterm' ] [ cvalue_id ] phenotype_id = self . idhash [ 'cvterm' ] [ cvalue_id ] elif observable_id in self . idhash [ 'cvterm' ] : # observations to anatomical classes cvterm_id = self . idhash [ 'cvterm' ] [ observable_id ] phenotype_id = self . idhash [ 'cvterm' ] [ observable_id ] + 'PHENOTYPE' if cvterm_id is not None and cvterm_id in self . label_hash : phenotype_label = self . label_hash [ cvterm_id ] phenotype_label += ' phenotype' self . label_hash [ phenotype_id ] = phenotype_label else : LOG . info ( 'cvtermid=%s not in label_hash' , cvterm_id ) else : LOG . info ( "No observable id or label for %s: %s" , phenotype_key , uniquename ) # TODO store this composite phenotype in some way # as a proper class definition? self . idhash [ 'phenotype' ] [ phenotype_key ] = phenotype_id # assay_id is currently only "undefined" key=60468 if not self . test_mode and limit is not None and line_counter > limit : pass else : if phenotype_id is not None : # assume that these fit into the phenotypic uberpheno # elsewhere model . addClassToGraph ( phenotype_id , phenotype_label ) line_counter += 1 return
251,262
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L1105-L1195
[ "def", "read_file", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "experiment_file", ")", ":", "try", ":", "with", "open", "(", "self", ".", "experiment_file", ",", "'r'", ")", "as", "file", ":", "return", "json", ...
CVterms are the internal identifiers for any controlled vocab or ontology term . Many are xrefd to actual ontologies . The actual external id is stored in the dbxref table which we place into the internal hashmap for lookup with the cvterm id . The name of the external term is stored in the name element of this table and we add that to the label hashmap for lookup elsewhere
def _process_cvterm ( self ) : line_counter = 0 raw = '/' . join ( ( self . rawdir , 'cvterm' ) ) LOG . info ( "processing cvterms" ) with open ( raw , 'r' ) as f : f . readline ( ) # read the header row; skip filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) for line in filereader : line_counter += 1 ( cvterm_id , cv_id , definition , dbxref_id , is_obsolete , is_relationshiptype , name ) = line # 316 6 1665919 0 0 rRNA_cleavage_snoRNA_primary_transcript # 28 5 1663309 0 0 synonym # 455 6 1665920 0 0 tmRNA # not sure the following is necessary # cv_prefixes = { # 6 : 'SO', # 20: 'FBcv', # 28: 'GO', # 29: 'GO', # 30: 'GO', # 31: 'FBcv', # not actually FBcv - I think FBbt. # 32: 'FBdv', # 37: 'GO', # these are relationships # 73: 'DOID' # } # if int(cv_id) not in cv_prefixes: # continue cvterm_key = cvterm_id cvterm_id = self . _makeInternalIdentifier ( 'cvterm' , cvterm_key ) self . label_hash [ cvterm_id ] = name self . idhash [ 'cvterm' ] [ cvterm_key ] = cvterm_id # look up the dbxref_id for the cvterm # hopefully it's one-to-one dbxrefs = self . dbxrefs . get ( dbxref_id ) if dbxrefs is not None : if len ( dbxrefs ) > 1 : LOG . info ( ">1 dbxref for this cvterm (%s: %s): %s" , str ( cvterm_id ) , name , dbxrefs . values ( ) ) elif len ( dbxrefs ) == 1 : # replace the cvterm with # the dbxref (external) identifier did = dbxrefs . popitem ( ) [ 1 ] # get the value self . idhash [ 'cvterm' ] [ cvterm_key ] = did # also add the label to the dbxref self . label_hash [ did ] = name return
251,263
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L1324-L1389
[ "def", "receive_data_chunk", "(", "self", ",", "raw_data", ",", "start", ")", ":", "self", ".", "file", ".", "write", "(", "raw_data", ")", "# CHANGED: This un-hangs us long enough to keep things rolling.", "eventlet", ".", "sleep", "(", "0", ")" ]
The internal identifiers for the organisms in flybase
def _process_organisms ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , 'organism' ) ) LOG . info ( "processing organisms" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) # read the header row; skip for line in filereader : ( organism_id , abbreviation , genus , species , common_name , comment ) = line # 1 Dmel Drosophila melanogaster fruit fly # 2 Comp Computational result line_counter += 1 tax_internal_id = self . _makeInternalIdentifier ( 'organism' , organism_id ) tax_label = ' ' . join ( ( genus , species ) ) tax_id = tax_internal_id self . idhash [ 'organism' ] [ organism_id ] = tax_id self . label_hash [ tax_id ] = tax_label # we won't actually add the organism to the graph, # unless we actually use it therefore it is added outside of # this function if self . test_mode and int ( organism_id ) not in self . test_keys [ 'organism' ] : continue if not self . test_mode and limit is not None and line_counter > limit : pass else : model . addClassToGraph ( tax_id ) for s in [ common_name , abbreviation ] : if s is not None and s . strip ( ) != '' : model . addSynonym ( tax_id , s ) model . addComment ( tax_id , tax_internal_id ) return
251,264
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L1792-L1844
[ "def", "request", "(", "self", ",", "message", ",", "timeout", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "connection_pool", ".", "full", "(", ")", ":", "self", ".", "connection_pool", ".", "put", "...
Add equivalentClass and sameAs relationships
def _add_gene_equivalencies ( self , xrefs , gene_id , taxon ) : clique_map = self . open_and_parse_yaml ( self . resources [ 'clique_leader' ] ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) filter_out = [ 'Vega' , 'IMGT/GENE-DB' , 'Araport' ] # deal with the dbxrefs # MIM:614444|HGNC:HGNC:16851|Ensembl:ENSG00000136828|HPRD:11479|Vega:OTTHUMG00000020696 for dbxref in xrefs . strip ( ) . split ( '|' ) : prefix = ':' . join ( dbxref . split ( ':' ) [ : - 1 ] ) . strip ( ) if prefix in self . localtt : prefix = self . localtt [ prefix ] dbxref_curie = ':' . join ( ( prefix , dbxref . split ( ':' ) [ - 1 ] ) ) if dbxref_curie is not None and prefix != '' : if prefix == 'HPRD' : # proteins are not == genes. model . addTriple ( gene_id , self . globaltt [ 'has gene product' ] , dbxref_curie ) continue # skip some of these for now based on curie prefix if prefix in filter_out : continue if prefix == 'ENSEMBL' : model . addXref ( gene_id , dbxref_curie ) if prefix == 'OMIM' : if DipperUtil . is_omim_disease ( dbxref_curie ) : continue try : if self . class_or_indiv . get ( gene_id ) == 'C' : model . addEquivalentClass ( gene_id , dbxref_curie ) if taxon in clique_map : if clique_map [ taxon ] == prefix : model . makeLeader ( dbxref_curie ) elif clique_map [ taxon ] == gene_id . split ( ':' ) [ 0 ] : model . makeLeader ( gene_id ) else : model . addSameIndividual ( gene_id , dbxref_curie ) except AssertionError as err : LOG . warning ( "Error parsing %s: %s" , gene_id , err ) return
251,265
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/NCBIGene.py#L377-L430
[ "def", "mol_supplier", "(", "lines", ",", "no_halt", ",", "assign_descriptors", ")", ":", "def", "sdf_block", "(", "lns", ")", ":", "mol", "=", "[", "]", "opt", "=", "[", "]", "is_mol", "=", "True", "for", "line", "in", "lns", ":", "if", "line", "....
Loops through the gene2pubmed file and adds a simple triple to say that a given publication is_about a gene . Publications are added as NamedIndividuals .
def _get_gene2pubmed ( self , limit ) : src_key = 'gene2pubmed' if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing Gene records" ) line_counter = 0 myfile = '/' . join ( ( self . rawdir , self . files [ src_key ] [ 'file' ] ) ) LOG . info ( "FILE: %s" , myfile ) assoc_counter = 0 col = self . files [ src_key ] [ 'columns' ] with gzip . open ( myfile , 'rb' ) as tsv : row = tsv . readline ( ) . decode ( ) . strip ( ) . split ( '\t' ) row [ 0 ] = row [ 0 ] [ 1 : ] # strip comment if col != row : LOG . info ( '%s\nExpected Headers:\t%s\nRecived Headers:\t %s\n' , src_key , col , row ) for line in tsv : line_counter += 1 # skip comments row = line . decode ( ) . strip ( ) . split ( '\t' ) if row [ 0 ] [ 0 ] == '#' : continue # (tax_num, gene_num, pubmed_num) = line.split('\t') # ## set id_filter=None in init if you don't want to have a filter # if self.id_filter is not None: # if ((self.id_filter == 'taxids' and \ # (int(tax_num) not in self.tax_ids)) # or (self.id_filter == 'geneids' and \ # (int(gene_num) not in self.gene_ids))): # continue # #### end filter gene_num = row [ col . index ( 'GeneID' ) ] . strip ( ) if self . test_mode and int ( gene_num ) not in self . gene_ids : continue tax_num = row [ col . index ( 'tax_id' ) ] . strip ( ) if not self . test_mode and tax_num not in self . tax_ids : continue pubmed_num = row [ col . index ( 'PubMed_ID' ) ] . strip ( ) if gene_num == '-' or pubmed_num == '-' : continue gene_id = ':' . join ( ( 'NCBIGene' , gene_num ) ) pubmed_id = ':' . join ( ( 'PMID' , pubmed_num ) ) if self . class_or_indiv . get ( gene_id ) == 'C' : model . addClassToGraph ( gene_id , None ) else : model . addIndividualToGraph ( gene_id , None ) # add the publication as a NamedIndividual # add type publication model . addIndividualToGraph ( pubmed_id , None , None ) reference = Reference ( graph , pubmed_id , self . globaltt [ 'journal article' ] ) reference . addRefToGraph ( ) graph . addTriple ( pubmed_id , self . globaltt [ 'is_about' ] , gene_id ) assoc_counter += 1 if not self . test_mode and limit is not None and line_counter > limit : break LOG . info ( "Processed %d pub-gene associations" , assoc_counter ) return
251,266
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/NCBIGene.py#L516-L598
[ "def", "discard_config", "(", "self", ")", ":", "if", "self", ".", "config_session", "is", "not", "None", ":", "commands", "=", "[", "]", "commands", ".", "append", "(", "'configure session {}'", ".", "format", "(", "self", ".", "config_session", ")", ")",...
Given a list of omim ids this will use the omim API to fetch the entries according to the included_fields passed as a parameter . If a transformation function is supplied this will iterate over each entry and either add the results to the supplied graph or will return a set of processed entries that the calling function can further iterate .
def process_entries ( self , omimids , transform , included_fields = None , graph = None , limit = None , globaltt = None ) : omimparams = { } # add the included_fields as parameters if included_fields is not None and included_fields : omimparams [ 'include' ] = ',' . join ( included_fields ) processed_entries = list ( ) # scrub any omim prefixes from the omimids before processing # cleanomimids = set() # for omimid in omimids: # scrubbed = str(omimid).split(':')[-1] # if re.match(r'^\d+$', str(scrubbed)): # cleanomimids.update(scrubbed) # omimids = list(cleanomimids) cleanomimids = [ o . split ( ':' ) [ - 1 ] for o in omimids ] diff = set ( omimids ) - set ( cleanomimids ) if diff : LOG . warning ( 'OMIM has %i dirty bits see"\n %s' , len ( diff ) , str ( diff ) ) omimids = cleanomimids else : cleanomimids = list ( ) acc = 0 # for counting # note that you can only do request batches of 20 # see info about "Limits" at http://omim.org/help/api # TODO 2017 May seems a majority of many groups of 20 # are producing python None for RDF triple Objects groupsize = 20 if not self . test_mode and limit is not None : # just in case the limit is larger than the number of records, maxit = limit if limit > len ( omimids ) : maxit = len ( omimids ) else : maxit = len ( omimids ) while acc < maxit : end = min ( ( maxit , acc + groupsize ) ) # iterate through the omim ids list, # and fetch from the OMIM api in batches of 20 if self . test_mode : intersect = list ( set ( [ str ( i ) for i in self . test_ids ] ) & set ( omimids [ acc : end ] ) ) # some of the test ids are in the omimids if intersect : LOG . info ( "found test ids: %s" , intersect ) omimparams . update ( { 'mimNumber' : ',' . join ( intersect ) } ) else : acc += groupsize continue else : omimparams . update ( { 'mimNumber' : ',' . join ( omimids [ acc : end ] ) } ) url = OMIMAPI + urllib . parse . urlencode ( omimparams ) try : req = urllib . request . urlopen ( url ) except HTTPError as e : # URLError? LOG . warning ( 'fetching: %s' , url ) error_msg = e . read ( ) if re . search ( r'The API key: .* is invalid' , str ( error_msg ) ) : msg = "API Key not valid" raise HTTPError ( url , e . code , msg , e . hdrs , e . fp ) LOG . error ( "Failed with: %s" , str ( error_msg ) ) break resp = req . read ( ) . decode ( ) acc += groupsize myjson = json . loads ( resp ) # snag a copy with open ( './raw/omim/_' + str ( acc ) + '.json' , 'w' ) as fp : json . dump ( myjson , fp ) entries = myjson [ 'omim' ] [ 'entryList' ] for e in entries : # apply the data transformation, and save it to the graph processed_entry = transform ( e , graph , globaltt ) if processed_entry is not None : processed_entries . append ( processed_entry ) # ### end iterating over batch of entries return processed_entries
251,267
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIM.py#L253-L367
[ "def", "unshare", "(", "flags", ")", ":", "res", "=", "lib", ".", "unshare", "(", "flags", ")", "if", "res", "!=", "0", ":", "_check_error", "(", "ffi", ".", "errno", ")" ]
This takes the list of omim identifiers from the omim . txt . Z file and iteratively queries the omim api for the json - formatted data . This will create OMIM classes with the label definition and some synonyms . If an entry is removed it is added as a deprecated class . If an entry is moved it is deprecated and consider annotations are added .
def _process_all ( self , limit ) : omimids = self . _get_omim_ids ( ) LOG . info ( 'Have %i omim numbers to fetch records from their API' , len ( omimids ) ) LOG . info ( 'Have %i omim types ' , len ( self . omim_type ) ) if self . test_mode : graph = self . testgraph else : graph = self . graph geno = Genotype ( graph ) model = Model ( graph ) tax_label = 'Homo sapiens' tax_id = self . globaltt [ tax_label ] # add genome and taxon geno . addGenome ( tax_id , tax_label ) # tax label can get added elsewhere model . addClassToGraph ( tax_id , None ) # label added elsewhere includes = set ( ) includes . add ( 'all' ) self . process_entries ( omimids , self . _transform_entry , includes , graph , limit , self . globaltt )
251,268
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/OMIM.py#L369-L412
[ "def", "wait", "(", "self", ")", ":", "self", ".", "_done_event", ".", "wait", "(", "MAXINT", ")", "return", "self", ".", "_status", ",", "self", ".", "_exception" ]
Merge an update for another key with the one we are tracking internally .
def update ( self , key : bytes , value : bytes , node_updates : Sequence [ Hash32 ] ) : validate_is_bytes ( key ) validate_length ( key , self . _key_size ) # Path diff is the logical XOR of the updated key and this account path_diff = ( to_int ( self . key ) ^ to_int ( key ) ) # Same key (diff of 0), update the tracked value if path_diff == 0 : self . _value = value # No need to update branch else : # Find the first mismatched bit between keypaths. This is # where the branch point occurs, and we should update the # sibling node in the source branch at the branch point. # NOTE: Keys are in MSB->LSB (root->leaf) order. # Node lists are in root->leaf order. # Be sure to convert between them effectively. for bit in reversed ( range ( self . _branch_size ) ) : if path_diff & ( 1 << bit ) > 0 : branch_point = ( self . _branch_size - 1 ) - bit break # NOTE: node_updates only has to be as long as necessary # to obtain the update. This allows an optimization # of pruning updates to the maximum possible depth # that would be required to update, which may be # significantly smaller than the tree depth. if len ( node_updates ) <= branch_point : raise ValidationError ( "Updated node list is not deep enough" ) # Update sibling node in the branch where our key differs from the update self . _branch [ branch_point ] = node_updates [ branch_point ]
251,269
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/smt.py#L144-L186
[ "def", "load_suite_from_stdin", "(", "self", ")", ":", "suite", "=", "unittest", ".", "TestSuite", "(", ")", "rules", "=", "Rules", "(", "\"stream\"", ",", "suite", ")", "line_generator", "=", "self", ".", "_parser", ".", "parse_stdin", "(", ")", "return",...
Returns db value and branch in root - > leaf order
def _get ( self , key : bytes ) -> Tuple [ bytes , Tuple [ Hash32 ] ] : validate_is_bytes ( key ) validate_length ( key , self . _key_size ) branch = [ ] target_bit = 1 << ( self . depth - 1 ) path = to_int ( key ) node_hash = self . root_hash # Append the sibling node to the branch # Iterate on the parent for _ in range ( self . depth ) : node = self . db [ node_hash ] left , right = node [ : 32 ] , node [ 32 : ] if path & target_bit : branch . append ( left ) node_hash = right else : branch . append ( right ) node_hash = left target_bit >>= 1 # Value is the last hash in the chain # NOTE: Didn't do exception here for testing purposes return self . db [ node_hash ] , tuple ( branch )
251,270
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/smt.py#L271-L297
[ "def", "get", "(", "self", ")", ":", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "list", "(", "self", ".", "options", ".", "items", "(", ")", ")", "if", "k", "in", "self", ".", "_allowed_graphics", "}" ]
Returns all updated hashes in root - > leaf order
def set ( self , key : bytes , value : bytes ) -> Tuple [ Hash32 ] : validate_is_bytes ( key ) validate_length ( key , self . _key_size ) validate_is_bytes ( value ) path = to_int ( key ) node = value _ , branch = self . _get ( key ) proof_update = [ ] # Keep track of proof updates target_bit = 1 # branch is in root->leaf order, so flip for sibling_node in reversed ( branch ) : # Set node_hash = keccak ( node ) proof_update . append ( node_hash ) self . db [ node_hash ] = node # Update if ( path & target_bit ) : node = sibling_node + node_hash else : node = node_hash + sibling_node target_bit <<= 1 # Finally, update root hash self . root_hash = keccak ( node ) self . db [ self . root_hash ] = node # updates need to be in root->leaf order, so flip back return tuple ( reversed ( proof_update ) )
251,271
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/smt.py#L299-L333
[ "def", "get", "(", "self", ")", ":", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "list", "(", "self", ".", "options", ".", "items", "(", ")", ")", "if", "k", "in", "self", ".", "_allowed_graphics", "}" ]
Equals to setting the value to None Returns all updated hashes in root - > leaf order
def delete ( self , key : bytes ) -> Tuple [ Hash32 ] : validate_is_bytes ( key ) validate_length ( key , self . _key_size ) return self . set ( key , self . _default )
251,272
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/smt.py#L345-L353
[ "def", "get", "(", "self", ")", ":", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "list", "(", "self", ".", "options", ".", "items", "(", ")", ")", "if", "k", "in", "self", ".", "_allowed_graphics", "}" ]
Return the next requests that should be dispatched .
def next_batch ( self , n = 1 ) : if len ( self . queue ) == 0 : return [ ] batch = list ( reversed ( ( self . queue [ - n : ] ) ) ) self . queue = self . queue [ : - n ] return batch
251,273
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/sync.py#L120-L126
[ "def", "hash_to_unsigned", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "string_types", ")", ":", "# Return a CRC32 value identical across Python versions and platforms", "# by stripping the sign bit as on", "# http://docs.python.org/library/zlib.html.", "return", ...
Schedule a request for the node with the given key .
def schedule ( self , node_key , parent , depth , leaf_callback , is_raw = False ) : if node_key in self . _existing_nodes : self . logger . debug ( "Node %s already exists in db" % encode_hex ( node_key ) ) return if node_key in self . db : self . _existing_nodes . add ( node_key ) self . logger . debug ( "Node %s already exists in db" % encode_hex ( node_key ) ) return if parent is not None : parent . dependencies += 1 existing = self . requests . get ( node_key ) if existing is not None : self . logger . debug ( "Already requesting %s, will just update parents list" % node_key ) existing . parents . append ( parent ) return request = SyncRequest ( node_key , parent , depth , leaf_callback , is_raw ) # Requests get added to both self.queue and self.requests; the former is used to keep # track which requests should be sent next, and the latter is used to avoid scheduling a # request for a given node multiple times. self . logger . debug ( "Scheduling retrieval of %s" % encode_hex ( request . node_key ) ) self . requests [ request . node_key ] = request bisect . insort ( self . queue , request )
251,274
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/sync.py#L128-L155
[ "def", "write", "(", "self", ",", "symbol", ",", "data", ")", ":", "# get the full set of date ranges that we have", "cursor", "=", "self", ".", "_collection", ".", "find", "(", ")", "for", "res", "in", "cursor", ":", "library", "=", "self", ".", "_arctic_li...
Return all children of the node retrieved by the given request .
def get_children ( self , request ) : node = decode_node ( request . data ) return _get_children ( node , request . depth )
251,275
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/sync.py#L157-L164
[ "def", "save", "(", "self", ",", "ts", ")", ":", "with", "open", "(", "self", ",", "'w'", ")", "as", "f", ":", "Timestamp", ".", "wrap", "(", "ts", ")", ".", "dump", "(", "f", ")" ]
Process request results .
def process ( self , results ) : for node_key , data in results : request = self . requests . get ( node_key ) if request is None : # This may happen if we resend a request for a node after waiting too long, # and then eventually get two responses with it. self . logger . info ( "No SyncRequest found for %s, maybe we got more than one response for it" % encode_hex ( node_key ) ) return if request . data is not None : raise SyncRequestAlreadyProcessed ( "%s has been processed already" % request ) request . data = data if request . is_raw : self . commit ( request ) continue references , leaves = self . get_children ( request ) for depth , ref in references : self . schedule ( ref , request , depth , request . leaf_callback ) if request . leaf_callback is not None : for leaf in leaves : request . leaf_callback ( leaf , request ) if request . dependencies == 0 : self . commit ( request )
251,276
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/sync.py#L166-L199
[ "def", "vn_release", "(", "call", "=", "None", ",", "kwargs", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The vn_reserve function must be called with -f or --function.'", ")", "if", "kwargs", "is", "None", "...
Given a key prefix return whether this prefix is the prefix of an existing key in the trie .
def check_if_branch_exist ( db , root_hash , key_prefix ) : validate_is_bytes ( key_prefix ) return _check_if_branch_exist ( db , root_hash , encode_to_bin ( key_prefix ) )
251,277
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/branches.py#L30-L37
[ "def", "buffer_read_into", "(", "self", ",", "buffer", ",", "dtype", ")", ":", "ctype", "=", "self", ".", "_check_dtype", "(", "dtype", ")", "cdata", ",", "frames", "=", "self", ".", "_check_buffer", "(", "buffer", ",", "ctype", ")", "frames", "=", "se...
Get a long - format Merkle branch
def get_branch ( db , root_hash , key ) : validate_is_bytes ( key ) return tuple ( _get_branch ( db , root_hash , encode_to_bin ( key ) ) )
251,278
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/branches.py#L71-L77
[ "def", "return_port", "(", "port", ")", ":", "if", "port", "in", "_random_ports", ":", "_random_ports", ".", "remove", "(", "port", ")", "elif", "port", "in", "_owned_ports", ":", "_owned_ports", ".", "remove", "(", "port", ")", "_free_ports", ".", "add", ...
Get all witness given a keypath prefix . Include
def get_witness_for_key_prefix ( db , node_hash , key ) : validate_is_bytes ( key ) return tuple ( _get_witness_for_key_prefix ( db , node_hash , encode_to_bin ( key ) ) )
251,279
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/branches.py#L155-L165
[ "def", "DeleteOldCronJobRuns", "(", "self", ",", "cutoff_timestamp", ",", "cursor", "=", "None", ")", ":", "query", "=", "\"DELETE FROM cron_job_runs WHERE write_time < FROM_UNIXTIME(%s)\"", "cursor", ".", "execute", "(", "query", ",", "[", "mysql_utils", ".", "RDFDat...
Serializes a branch node
def encode_branch_node ( left_child_node_hash , right_child_node_hash ) : validate_is_bytes ( left_child_node_hash ) validate_length ( left_child_node_hash , 32 ) validate_is_bytes ( right_child_node_hash ) validate_length ( right_child_node_hash , 32 ) return BRANCH_TYPE_PREFIX + left_child_node_hash + right_child_node_hash
251,280
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/nodes.py#L157-L165
[ "def", "obj_to_file", "(", "obj", ",", "filename", ",", "filetype", "=", "'auto'", ",", "ndarray_to_list", "=", "False", ",", "squeeze", "=", "True", ")", ":", "# import json", "# with open(filename, mode='w') as f:", "# json.dump(annotation,f)", "if", "ndarray_to_...
Serializes a leaf node
def encode_leaf_node ( value ) : validate_is_bytes ( value ) if value is None or value == b'' : raise ValidationError ( "Value of leaf node can not be empty" ) return LEAF_TYPE_PREFIX + value
251,281
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/nodes.py#L168-L175
[ "def", "generate_http_manifest", "(", "self", ")", ":", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "translate_path", "(", "self", ".", "path", ")", ")", "self", ".", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri"...
Batch and commit and end of context
def batch_commit ( self , * , do_deletes = False ) : try : yield except Exception as exc : raise exc else : for key , value in self . cache . items ( ) : if value is not DELETED : self . wrapped_db [ key ] = value elif do_deletes : self . wrapped_db . pop ( key , None ) # if do_deletes is False, ignore deletes to underlying db finally : self . cache = { }
251,282
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/db.py#L48-L64
[ "def", "forget", "(", "empowered", ",", "powerupClass", ",", "interface", ")", ":", "className", "=", "fullyQualifiedName", "(", "powerupClass", ")", "withThisName", "=", "_StoredByName", ".", "className", "==", "className", "items", "=", "empowered", ".", "stor...
Prune the given node if context exits cleanly .
def _prune_node ( self , node ) : if self . is_pruning : # node is mutable, so capture the key for later pruning now prune_key , node_body = self . _node_to_db_mapping ( node ) should_prune = ( node_body is not None ) else : should_prune = False yield # Prune only if no exception is raised if should_prune : del self . db [ prune_key ]
251,283
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/hexary.py#L231-L246
[ "def", "scale_columns", "(", "A", ",", "v", ",", "copy", "=", "True", ")", ":", "v", "=", "np", ".", "ravel", "(", "v", ")", "M", ",", "N", "=", "A", ".", "shape", "if", "not", "isspmatrix", "(", "A", ")", ":", "raise", "ValueError", "(", "'s...
A branch node which is left with only a single non - blank item should be turned into either a leaf or extension node .
def _normalize_branch_node ( self , node ) : iter_node = iter ( node ) if any ( iter_node ) and any ( iter_node ) : return node if node [ 16 ] : return [ compute_leaf_key ( [ ] ) , node [ 16 ] ] sub_node_idx , sub_node_hash = next ( ( idx , v ) for idx , v in enumerate ( node [ : 16 ] ) if v ) sub_node = self . get_node ( sub_node_hash ) sub_node_type = get_node_type ( sub_node ) if sub_node_type in { NODE_TYPE_LEAF , NODE_TYPE_EXTENSION } : with self . _prune_node ( sub_node ) : new_subnode_key = encode_nibbles ( tuple ( itertools . chain ( [ sub_node_idx ] , decode_nibbles ( sub_node [ 0 ] ) , ) ) ) return [ new_subnode_key , sub_node [ 1 ] ] elif sub_node_type == NODE_TYPE_BRANCH : subnode_hash = self . _persist_node ( sub_node ) return [ encode_nibbles ( [ sub_node_idx ] ) , subnode_hash ] else : raise Exception ( "Invariant: this code block should be unreachable" )
251,284
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/hexary.py#L324-L356
[ "def", "dicomdir_info", "(", "dirpath", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "dr", "=", "DicomReader", "(", "dirpath", "=", "dirpath", ",", "*", "args", ",", "*", "*", "kwargs", ")", "info", "=", "dr", ".", "dicomdirectory", ".", "g...
Delete a key from inside or underneath a branch node
def _delete_branch_node ( self , node , trie_key ) : if not trie_key : node [ - 1 ] = BLANK_NODE return self . _normalize_branch_node ( node ) node_to_delete = self . get_node ( node [ trie_key [ 0 ] ] ) sub_node = self . _delete ( node_to_delete , trie_key [ 1 : ] ) encoded_sub_node = self . _persist_node ( sub_node ) if encoded_sub_node == node [ trie_key [ 0 ] ] : return node node [ trie_key [ 0 ] ] = encoded_sub_node if encoded_sub_node == BLANK_NODE : return self . _normalize_branch_node ( node ) return node
251,285
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/hexary.py#L361-L381
[ "def", "_write_packet", "(", "self", ",", "packet", ",", "sec", "=", "None", ",", "usec", "=", "None", ",", "caplen", "=", "None", ",", "wirelen", "=", "None", ")", ":", "if", "caplen", "is", "None", ":", "caplen", "=", "len", "(", "packet", ")", ...
Fetches the value with a given keypath from the given node .
def get ( self , key ) : validate_is_bytes ( key ) return self . _get ( self . root_hash , encode_to_bin ( key ) )
251,286
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L38-L46
[ "def", "if_range", "(", "self", ")", "->", "Optional", "[", "datetime", ".", "datetime", "]", ":", "return", "self", ".", "_http_date", "(", "self", ".", "headers", ".", "get", "(", "hdrs", ".", "IF_RANGE", ")", ")" ]
Sets the value at the given keypath from the given node
def set ( self , key , value ) : validate_is_bytes ( key ) validate_is_bytes ( value ) self . root_hash = self . _set ( self . root_hash , encode_to_bin ( key ) , value )
251,287
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L79-L88
[ "def", "open_required", "(", "func", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_status", "==", "\"closed\"", ":", "raise", "aiohttp", ".", "web", ".", "HTTPForbidden", "(", "text...
If if_delete_subtrie is set to True what it will do is that it take in a keypath and traverse til the end of keypath then delete the whole subtrie of that node .
def _set ( self , node_hash , keypath , value , if_delete_subtrie = False ) : # Empty trie if node_hash == BLANK_HASH : if value : return self . _hash_and_save ( encode_kv_node ( keypath , self . _hash_and_save ( encode_leaf_node ( value ) ) ) ) else : return BLANK_HASH nodetype , left_child , right_child = parse_node ( self . db [ node_hash ] ) # Node is a leaf node if nodetype == LEAF_TYPE : # Keypath must match, there should be no remaining keypath if keypath : raise NodeOverrideError ( "Fail to set the value because the prefix of it's key" " is the same as existing key" ) if if_delete_subtrie : return BLANK_HASH return self . _hash_and_save ( encode_leaf_node ( value ) ) if value else BLANK_HASH # node is a key-value node elif nodetype == KV_TYPE : # Keypath too short if not keypath : if if_delete_subtrie : return BLANK_HASH else : raise NodeOverrideError ( "Fail to set the value because it's key" " is the prefix of other existing key" ) return self . _set_kv_node ( keypath , node_hash , nodetype , left_child , right_child , value , if_delete_subtrie ) # node is a branch node elif nodetype == BRANCH_TYPE : # Keypath too short if not keypath : if if_delete_subtrie : return BLANK_HASH else : raise NodeOverrideError ( "Fail to set the value because it's key" " is the prefix of other existing key" ) return self . _set_branch_node ( keypath , nodetype , left_child , right_child , value , if_delete_subtrie ) raise Exception ( "Invariant: This shouldn't ever happen" )
251,288
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L90-L153
[ "def", "installStatsLoop", "(", "statsFile", ",", "statsDelay", ")", ":", "def", "dumpStats", "(", ")", ":", "\"\"\"Actual stats dump function.\"\"\"", "scales", ".", "dumpStatsTo", "(", "statsFile", ")", "reactor", ".", "callLater", "(", "statsDelay", ",", "dumpS...
Equals to setting the value to None
def delete ( self , key ) : validate_is_bytes ( key ) self . root_hash = self . _set ( self . root_hash , encode_to_bin ( key ) , b'' )
251,289
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L297-L303
[ "def", "ekpsel", "(", "query", ",", "msglen", ",", "tablen", ",", "collen", ")", ":", "query", "=", "stypes", ".", "stringToCharP", "(", "query", ")", "msglen", "=", "ctypes", ".", "c_int", "(", "msglen", ")", "tablen", "=", "ctypes", ".", "c_int", "...
Given a key prefix delete the whole subtrie that starts with the key prefix .
def delete_subtrie ( self , key ) : validate_is_bytes ( key ) self . root_hash = self . _set ( self . root_hash , encode_to_bin ( key ) , value = b'' , if_delete_subtrie = True , )
251,290
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L305-L320
[ "def", "writearff", "(", "data", ",", "filename", ",", "relation_name", "=", "None", ",", "index", "=", "True", ")", ":", "if", "isinstance", "(", "filename", ",", "str", ")", ":", "fp", "=", "open", "(", "filename", ",", "'w'", ")", "if", "relation_...
Saves a node into the database and returns its hash
def _hash_and_save ( self , node ) : validate_is_bin_node ( node ) node_hash = keccak ( node ) self . db [ node_hash ] = node return node_hash
251,291
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/binary.py#L338-L346
[ "def", "handleServerEvents", "(", "self", ",", "msg", ")", ":", "self", ".", "log", ".", "debug", "(", "'MSG %s'", ",", "msg", ")", "self", ".", "handleConnectionState", "(", "msg", ")", "if", "msg", ".", "typeName", "==", "\"error\"", ":", "self", "."...
0100000101010111010000110100100101001001 - > ASCII
def decode_from_bin ( input_bin ) : for chunk in partition_all ( 8 , input_bin ) : yield sum ( 2 ** exp * bit for exp , bit in enumerate ( reversed ( chunk ) ) )
251,292
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/binaries.py#L18-L27
[ "async", "def", "with_exception", "(", "self", ",", "subprocess", ",", "*", "matchers", ")", ":", "def", "_callback", "(", "event", ",", "matcher", ")", ":", "raise", "RoutineException", "(", "matcher", ",", "event", ")", "return", "await", "self", ".", ...
ASCII - > 0100000101010111010000110100100101001001
def encode_to_bin ( value ) : for char in value : for exp in EXP : if char & exp : yield True else : yield False
251,293
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/binaries.py#L31-L40
[ "def", "_check_regr", "(", "self", ",", "regr", ",", "new_reg", ")", ":", "body", "=", "getattr", "(", "new_reg", ",", "'body'", ",", "new_reg", ")", "for", "k", ",", "v", "in", "body", ".", "items", "(", ")", ":", "if", "k", "==", "'resource'", ...
Encodes a sequence of 0s and 1s into tightly packed bytes Used in encoding key path of a KV - NODE
def encode_from_bin_keypath ( input_bin ) : padded_bin = bytes ( ( 4 - len ( input_bin ) ) % 4 ) + input_bin prefix = TWO_BITS [ len ( input_bin ) % 4 ] if len ( padded_bin ) % 8 == 4 : return decode_from_bin ( PREFIX_00 + prefix + padded_bin ) else : return decode_from_bin ( PREFIX_100000 + prefix + padded_bin )
251,294
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/binaries.py#L43-L53
[ "def", "emailUser", "(", "video", ",", "error", "=", "None", ")", ":", "html", "=", "render_to_string", "(", "'frog/video_email.html'", ",", "{", "'user'", ":", "video", ".", "author", ",", "'error'", ":", "error", ",", "'video'", ":", "video", ",", "'SI...
Decodes bytes into a sequence of 0s and 1s Used in decoding key path of a KV - NODE
def decode_to_bin_keypath ( path ) : path = encode_to_bin ( path ) if path [ 0 ] == 1 : path = path [ 4 : ] assert path [ 0 : 2 ] == PREFIX_00 padded_len = TWO_BITS . index ( path [ 2 : 4 ] ) return path [ 4 + ( ( 4 - padded_len ) % 4 ) : ]
251,295
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/binaries.py#L56-L66
[ "def", "check_origin", "(", "self", ",", "origin", ")", ":", "mod_opts", "=", "self", ".", "application", ".", "mod_opts", "if", "mod_opts", ".", "get", "(", "'cors_origin'", ")", ":", "return", "bool", "(", "_check_cors_origin", "(", "origin", ",", "mod_o...
The Hex Prefix function
def encode_nibbles ( nibbles ) : if is_nibbles_terminated ( nibbles ) : flag = HP_FLAG_2 else : flag = HP_FLAG_0 raw_nibbles = remove_nibbles_terminator ( nibbles ) is_odd = len ( raw_nibbles ) % 2 if is_odd : flagged_nibbles = tuple ( itertools . chain ( ( flag + 1 , ) , raw_nibbles , ) ) else : flagged_nibbles = tuple ( itertools . chain ( ( flag , 0 ) , raw_nibbles , ) ) prefixed_value = nibbles_to_bytes ( flagged_nibbles ) return prefixed_value
251,296
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/nibbles.py#L78-L104
[ "def", "decode_tile_data", "(", "codec", ",", "tidx", ",", "data", ",", "data_size", ",", "stream", ")", ":", "OPENJP2", ".", "opj_decode_tile_data", ".", "argtypes", "=", "[", "CODEC_TYPE", ",", "ctypes", ".", "c_uint32", ",", "ctypes", ".", "POINTER", "(...
The inverse of the Hex Prefix function
def decode_nibbles ( value ) : nibbles_with_flag = bytes_to_nibbles ( value ) flag = nibbles_with_flag [ 0 ] needs_terminator = flag in { HP_FLAG_2 , HP_FLAG_2 + 1 } is_odd_length = flag in { HP_FLAG_0 + 1 , HP_FLAG_2 + 1 } if is_odd_length : raw_nibbles = nibbles_with_flag [ 1 : ] else : raw_nibbles = nibbles_with_flag [ 2 : ] if needs_terminator : nibbles = add_nibbles_terminator ( raw_nibbles ) else : nibbles = raw_nibbles return nibbles
251,297
https://github.com/ethereum/py-trie/blob/d33108d21b54d59ee311f61d978496c84a6f1f8b/trie/utils/nibbles.py#L107-L127
[ "def", "_check_missing_manifests", "(", "self", ",", "segids", ")", ":", "manifest_paths", "=", "[", "self", ".", "_manifest_path", "(", "segid", ")", "for", "segid", "in", "segids", "]", "with", "Storage", "(", "self", ".", "vol", ".", "layer_cloudpath", ...
Get a local version of the file downloading it from the remote storage if required . The returned value should be used as a context manager to ensure any temporary files are cleaned up afterwards .
def get_local_file ( file ) : try : with open ( file . path ) : yield file . path except NotImplementedError : _ , ext = os . path . splitext ( file . name ) with NamedTemporaryFile ( prefix = 'wagtailvideo-' , suffix = ext ) as tmp : try : file . open ( 'rb' ) for chunk in file . chunks ( ) : tmp . write ( chunk ) finally : file . close ( ) tmp . flush ( ) yield tmp . name
251,298
https://github.com/neon-jungle/wagtailvideos/blob/05a43571ac4b5e7cf07fbb89e804e53447b699c2/wagtailvideos/models.py#L292-L311
[ "def", "add_mismatch", "(", "self", ",", "entity", ",", "*", "traits", ")", ":", "for", "trait", "in", "traits", ":", "self", ".", "index", "[", "trait", "]", ".", "add", "(", "entity", ")" ]
Calls rust method and does some error handling .
def rustcall ( func , * args ) : lib . semaphore_err_clear ( ) rv = func ( * args ) err = lib . semaphore_err_get_last_code ( ) if not err : return rv msg = lib . semaphore_err_get_last_message ( ) cls = exceptions_by_code . get ( err , SemaphoreError ) exc = cls ( decode_str ( msg ) ) backtrace = decode_str ( lib . semaphore_err_get_backtrace ( ) ) if backtrace : exc . rust_info = backtrace raise exc
251,299
https://github.com/getsentry/semaphore/blob/6f260b4092261e893b4debd9a3a7a78232f46c5e/py/semaphore/utils.py#L17-L30
[ "def", "_rdd", "(", "self", ")", ":", "columns", "=", "self", ".", "_schema_rdd", ".", "columns", "index_names", "=", "self", ".", "_index_names", "def", "fromRecords", "(", "records", ")", ":", "if", "not", "records", ":", "return", "[", "]", "else", ...