idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
39,100 | def print_graph ( self , format : str = 'turtle' ) -> str : print ( self . g . serialize ( format = format ) . decode ( 'utf-8' ) ) | prints serialized formated rdflib Graph |
39,101 | def updateTerms ( self , data : list , LIMIT : int = 20 , _print : bool = True , crawl : bool = False , ) -> list : url_base = self . base_url + '/api/1/term/edit/{id}' merged_data = [ ] old_data = self . identifierSearches ( [ d [ 'id' ] for d in data ] , LIMIT = LIMIT , _print = _print , crawl = crawl , ) for d in data : url = url_base . format ( id = str ( d [ 'id' ] ) ) if d [ 'ilx' ] != old_data [ int ( d [ 'id' ] ) ] [ 'ilx' ] : print ( d [ 'ilx' ] , old_data [ int ( d [ 'id' ] ) ] [ 'ilx' ] ) exit ( 'You might be using beta insead of production!' ) merged = scicrunch_client_helper . merge ( new = d , old = old_data [ int ( d [ 'id' ] ) ] ) merged = scicrunch_client_helper . superclasses_bug_fix ( merged ) merged_data . append ( ( url , merged ) ) resp = self . post ( merged_data , LIMIT = LIMIT , action = 'Updating Terms' , _print = _print , crawl = crawl , ) return resp | Updates existing entities |
39,102 | def getAnnotations_via_tid ( self , tids , LIMIT = 25 , _print = True , crawl = False ) : url_base = self . base_url + '/api/1/term/get-annotations/{tid}?key=' + self . api_key urls = [ url_base . format ( tid = str ( tid ) ) for tid in tids ] return self . get ( urls , LIMIT = LIMIT , _print = _print , crawl = crawl ) | tids = list of term ids that possess the annoations |
39,103 | def getAnnotations_via_id ( self , annotation_ids , LIMIT = 25 , _print = True , crawl = False ) : url_base = self . base_url + '/api/1/term/get-annotation/{id}?key=' + self . api_key urls = [ url_base . format ( id = str ( annotation_id ) ) for annotation_id in annotation_ids ] return self . get ( urls , LIMIT = LIMIT , _print = _print , crawl = crawl ) | tids = list of strings or ints that are the ids of the annotations themselves |
39,104 | def deleteAnnotations ( self , annotation_ids , LIMIT = 25 , _print = True , crawl = False , ) : url_base = self . base_url + '/api/1/term/edit-annotation/{annotation_id}' annotations = self . getAnnotations_via_id ( annotation_ids , LIMIT = LIMIT , _print = _print , crawl = crawl ) annotations_to_delete = [ ] for annotation_id in annotation_ids : annotation = annotations [ int ( annotation_id ) ] params = { 'value' : ' ' , 'annotation_tid' : ' ' , 'tid' : ' ' , 'term_version' : '1' , 'annotation_term_version' : '1' , } url = url_base . format ( annotation_id = annotation_id ) annotation . update ( { ** params } ) annotations_to_delete . append ( ( url , annotation ) ) return self . post ( annotations_to_delete , LIMIT = LIMIT , _print = _print , crawl = crawl ) | data = list of ids |
39,105 | def deprecate_entity ( self , ilx_id : str , note = None , ) -> None : term_id , term_version = [ ( d [ 'id' ] , d [ 'version' ] ) for d in self . ilxSearches ( [ ilx_id ] , crawl = True , _print = False ) . values ( ) ] [ 0 ] annotations = [ { 'tid' : term_id , 'annotation_tid' : '306375' , 'value' : 'True' , 'term_version' : term_version , 'annotation_term_version' : '1' , } ] if note : editor_note = { 'tid' : term_id , 'annotation_tid' : '306378' , 'value' : note , 'term_version' : term_version , 'annotation_term_version' : '1' , } annotations . append ( editor_note ) self . addAnnotations ( annotations , crawl = True , _print = False ) print ( annotations ) | Tagged term in interlex to warn this term is no longer used |
39,106 | def force_add_term ( self , entity : dict ) : needed = set ( [ 'label' , 'type' , ] ) url_ilx_add = self . base_url + '/api/1/ilx/add' url_term_add = self . base_url + '/api/1/term/add' url_term_update = self . base_url + '/api/1/term/edit/{id}' if ( set ( list ( entity ) ) & needed ) != needed : exit ( 'You need keys: ' + str ( needed - set ( list ( d ) ) ) ) random_string = '' . join ( random . choices ( string . ascii_uppercase + string . digits , k = 25 ) ) real_label = entity [ 'label' ] entity [ 'label' ] = entity [ 'label' ] + '_' + random_string entity [ 'term' ] = entity . pop ( 'label' ) primer_response = self . post ( [ ( url_ilx_add , entity . copy ( ) ) ] , _print = False , crawl = True ) [ 0 ] entity [ 'label' ] = entity . pop ( 'term' ) entity [ 'ilx' ] = primer_response [ 'fragment' ] if primer_response . get ( 'fragment' ) else primer_response [ 'ilx' ] entity = scicrunch_client_helper . superclasses_bug_fix ( entity ) response = self . post ( [ ( url_term_add , entity . copy ( ) ) ] , _print = False , crawl = True ) [ 0 ] old_data = self . identifierSearches ( [ response [ 'id' ] ] , _print = False , crawl = True , ) [ response [ 'id' ] ] old_data [ 'label' ] = real_label entity = old_data . copy ( ) url_term_update = url_term_update . format ( id = entity [ 'id' ] ) return self . post ( [ ( url_term_update , entity ) ] , _print = False , crawl = True ) | Need to add an entity that already has a label existing in InterLex? Well this is the function for you! |
39,107 | def create_html ( s1 , s2 , output = 'test.html' ) : html = difflib . HtmlDiff ( ) . make_file ( s1 . split ( ) , s2 . split ( ) ) with open ( output , 'w' ) as f : f . write ( html ) | creates basic html based on the diff of 2 strings |
39,108 | def traverse_data ( obj , key_target ) : if isinstance ( obj , str ) and '.json' in str ( obj ) : obj = json . load ( open ( obj , 'r' ) ) if isinstance ( obj , list ) : queue = obj . copy ( ) elif isinstance ( obj , dict ) : queue = [ obj . copy ( ) ] else : sys . exit ( 'obj needs to be a list or dict' ) count = 0 while not queue or count != 1000 : count += 1 curr_obj = queue . pop ( ) if isinstance ( curr_obj , dict ) : for key , value in curr_obj . items ( ) : if key == key_target : return curr_obj else : queue . append ( curr_obj [ key ] ) elif isinstance ( curr_obj , list ) : for co in curr_obj : queue . append ( co ) if count == 1000 : sys . exit ( 'traverse_data needs to be updated...' ) return False | will traverse nested list and dicts until key_target equals the current dict key |
39,109 | def memoryCheck ( vms_max_kb ) : safety_factor = 1.2 vms_max = vms_max_kb vms_gigs = vms_max / 1024 ** 2 buffer = safety_factor * vms_max buffer_gigs = buffer / 1024 ** 2 vm = psutil . virtual_memory ( ) free_gigs = vm . available / 1024 ** 2 if vm . available < buffer : raise MemoryError ( 'Running this requires quite a bit of memory ~ ' f'{vms_gigs:.2f}, you have {free_gigs:.2f} of the ' f'{buffer_gigs:.2f} needed' ) | Lookup vms_max using getCurrentVMSKb |
39,110 | def _sequence_query ( self ) : klass = self . __class__ query = klass . select ( ) . where ( klass . sequence . is_null ( False ) ) seq_scope_field_names = ( self . __seq_scope_field_name__ or '' ) . split ( ',' ) for name in seq_scope_field_names : seq_scope_field = getattr ( klass , name , None ) if seq_scope_field : seq_scope_field_value = getattr ( self , name ) query = query . where ( seq_scope_field == seq_scope_field_value ) return query | query all sequence rows |
39,111 | def add_types ( graph , phenotypes ) : collect = defaultdict ( set ) def recurse ( id_ , start , level = 0 ) : for t in graph . g . triples ( ( None , None , id_ ) ) : if level == 0 : if t [ 1 ] != rdflib . term . URIRef ( 'http://www.w3.org/2002/07/owl#someValuesFrom' ) : continue if type_check ( t , ( rdflib . term . URIRef , rdflib . term . URIRef , rdflib . term . BNode ) ) : collect [ start ] . add ( t [ 0 ] ) return if level > 1 : if t [ 1 ] == rdflib . URIRef ( 'http://www.w3.org/1999/02/22-rdf-syntax-ns#first' ) or t [ 1 ] == rdflib . URIRef ( 'http://www.w3.org/1999/02/22-rdf-syntax-ns#rest' ) : continue recurse ( t [ 0 ] , start , level + 1 ) for phenotype in phenotypes : recurse ( phenotype , phenotype ) return collect | Add disjoint union classes so that it is possible to see the invariants associated with individual phenotypes |
39,112 | def config ( self ) : config = { } if self . config_file . exists ( ) : with open ( self . config_file . as_posix ( ) , 'rt' ) as f : config = { k : self . _override [ k ] if k in self . _override else v for k , v in yaml . safe_load ( f ) . items ( ) } return config | Allows changing the config on the fly |
39,113 | def _get_service_account_info ( self ) : with open ( self . service_account_file , 'r' ) as f : info = json . load ( f ) self . service_account_email = info . get ( 'client_email' ) if not self . service_account_email : raise GCECloudException ( 'Service account JSON file is invalid for GCE. ' 'client_email key is expected. See getting started ' 'docs for information on GCE configuration.' ) self . service_account_project = info . get ( 'project_id' ) if not self . service_account_project : raise GCECloudException ( 'Service account JSON file is invalid for GCE. ' 'project_id key is expected. See getting started ' 'docs for information on GCE configuration.' ) | Retrieve json dict from service account file . |
39,114 | def _get_driver ( self ) : ComputeEngine = get_driver ( Provider . GCE ) return ComputeEngine ( self . service_account_email , self . service_account_file , project = self . service_account_project ) | Get authenticated GCE driver . |
39,115 | def _get_ssh_public_key ( self ) : key = ipa_utils . generate_public_ssh_key ( self . ssh_private_key_file ) return '{user}:{key} {user}' . format ( user = self . ssh_user , key = key . decode ( ) ) | Generate SSH public key from private key . |
39,116 | def _validate_region ( self ) : if not self . region : raise GCECloudException ( 'Zone is required for GCE cloud framework: ' 'Example: us-west1-a' ) try : zone = self . compute_driver . ex_get_zone ( self . region ) except Exception : zone = None if not zone : raise GCECloudException ( '{region} is not a valid GCE zone. ' 'Example: us-west1-a' . format ( region = self . region ) ) | Validate region was passed in and is a valid GCE zone . |
39,117 | def _set_instance_ip ( self ) : instance = self . _get_instance ( ) if instance . public_ips : self . instance_ip = instance . public_ips [ 0 ] elif instance . private_ips : self . instance_ip = instance . private_ips [ 0 ] else : raise GCECloudException ( 'IP address for instance: %s cannot be found.' % self . running_instance_id ) | Retrieve and set the instance ip address . |
39,118 | def grab_rdflib_graph_version ( g : Graph ) -> str : version = g . subject_objects ( predicate = URIRef ( OWL . versionIRI ) ) version = [ o for s , o in version ] if len ( version ) != 1 : print ( 'versioning isn\'t correct' ) else : version = str ( version [ 0 ] ) return version | Crap - shot for ontology iri if its properly in the header and correctly formated |
39,119 | def fix_ilx ( self , ilx_id : str ) -> str : ilx_id = ilx_id . replace ( 'http://uri.interlex.org/base/' , '' ) if ilx_id [ : 4 ] not in [ 'TMP:' , 'tmp_' , 'ILX:' , 'ilx_' ] : raise ValueError ( 'Need to provide ilx ID with format ilx_# or ILX:# for given ID ' + ilx_id ) return ilx_id . replace ( 'ILX:' , 'ilx_' ) . replace ( 'TMP:' , 'tmp_' ) | Database only excepts lower case and underscore version of ID |
39,120 | def pull_int_tail ( self , string : str ) -> str : int_tail = '' for element in string [ : : - 1 ] : try : int ( element ) int_tail = element + int_tail except : pass return int_tail | Useful for IDs that have giberish in the front of the real ID |
39,121 | def curie_search ( self , curie : str ) -> dict : ilx_row = self . curie2row . get ( curie ) if not ilx_row : return None else : return ilx_row | Returns the row in InterLex associated with the curie |
39,122 | def fragment_search ( self , fragement : str ) -> List [ dict ] : fragement = self . extract_fragment ( fragement ) ilx_rows = self . fragment2rows . get ( fragement ) if not ilx_rows : return None else : return ilx_rows | Returns the rows in InterLex associated with the fragment |
39,123 | def label_search ( self , label : str ) -> List [ dict ] : ilx_rows = self . label2rows ( self . local_degrade ( label ) ) if not ilx_rows : return None else : return ilx_rows | Returns the rows in InterLex associated with that label |
39,124 | def readyup_entity ( self , label : str , type : str , uid : Union [ int , str ] = None , comment : str = None , definition : str = None , superclass : str = None , synonyms : list = None , existing_ids : List [ dict ] = None , ) -> dict : entity = dict ( label = label , type = type , ) if uid : entity [ 'uid' ] = uid if definition : entity [ 'definition' ] = definition if comment : entity [ 'comment' ] = comment if superclass : entity [ 'superclass' ] = { 'ilx_id' : self . fix_ilx ( superclass ) } if synonyms : entity [ 'synonyms' ] = [ { 'literal' : syn } for syn in synonyms ] if existing_ids : if existing_ids [ 0 ] . get ( 'curie' ) and existing_ids [ 0 ] . get ( 'iri' ) : pass else : exit ( 'Need curie and iri for existing_ids in List[dict] form' ) entity [ 'existing_ids' ] = existing_ids return entity | Setups the entity to be InterLex ready |
39,125 | def exhaustive_label_check ( self , ontology : pd . DataFrame , label_predicate = 'rdfs:label' , diff : bool = True , ) -> Tuple [ list ] : inside , outside = [ ] , [ ] header = [ 'Index' ] + list ( ontology . columns ) for row in ontology . itertuples ( ) : row = { header [ i ] : val for i , val in enumerate ( row ) } label_obj = row [ label_predicate ] if isinstance ( label_obj , list ) : if len ( label_obj ) != 1 : exit ( 'Need to have only 1 label in the cell from the onotology.' ) else : label_obj = label_obj [ 0 ] entity_label = self . local_degrade ( label_obj ) ilx_rows = self . label2rows . get ( entity_label ) if ilx_rows : inside . append ( { 'external_ontology_row' : row , 'ilx_rows' : ilx_rows , } ) else : outside . append ( row ) if diff : diff = self . __exhaustive_diff ( inside ) return inside , outside , diff return inside , outside | All entities with conflicting labels gets a full diff |
39,126 | def exhaustive_iri_check ( self , ontology : pd . DataFrame , iri_predicate : str , diff : bool = True , ) -> Tuple [ list ] : inside , outside = [ ] , [ ] header = [ 'Index' ] + list ( ontology . columns ) for row in ontology . itertuples ( ) : row = { header [ i ] : val for i , val in enumerate ( row ) } entity_iri = row [ iri_predicate ] if isinstance ( entity_iri , list ) : if len ( entity_iri ) != 0 : exit ( 'Need to have only 1 iri in the cell from the onotology.' ) else : entity_iri = entity_iri [ 0 ] ilx_row = self . iri2row . get ( entity_iri ) if ilx_row : inside . append ( { 'external_ontology_row' : row , 'ilx_rows' : [ ilx_row ] , } ) else : outside . append ( row ) if diff : diff = self . __exhaustive_diff ( inside ) return inside , outside , diff return inside , outside | All entities with conflicting iris gets a full diff to see if they belong |
39,127 | def exhaustive_curie_check ( self , ontology : pd . DataFrame , curie_predicate : str , curie_prefix : str , diff : bool = True , ) -> Tuple [ list ] : inside , outside = [ ] , [ ] curie_prefix = curie_prefix . replace ( ':' , '' ) header = [ 'Index' ] + list ( ontology . columns ) for row in ontology . itertuples ( ) : row = { header [ i ] : val for i , val in enumerate ( row ) } entity_curie = row [ curie_predicate ] if isinstance ( entity_curie , list ) : if len ( entity_curie ) != 0 : exit ( 'Need to have only 1 iri in the cell from the onotology.' ) else : entity_curie = entity_curie [ 0 ] entity_curie = curie_prefix + ':' + self . extract_fragment ( entity_curie ) ilx_row = self . curie2row . get ( entity_curie ) if ilx_row : inside . append ( { 'external_ontology_row' : row , 'ilx_rows' : [ ilx_row ] , } ) else : outside . append ( row ) if diff : diff = self . __exhaustive_diff ( inside ) return inside , outside , diff return inside , outside | All entities with conflicting curies gets a full diff to see if they belong |
39,128 | def exhaustive_fragment_check ( self , ontology : pd . DataFrame , iri_curie_fragment_predicate : str = 'iri' , cross_reference_iris : bool = False , cross_reference_fragments : bool = False , diff : bool = True , ) -> Tuple [ list ] : inside , outside = [ ] , [ ] header = [ 'Index' ] + list ( ontology . columns ) for row in ontology . itertuples ( ) : row = { header [ i ] : val for i , val in enumerate ( row ) } entity_suffix = row [ iri_curie_fragment_predicate ] if isinstance ( entity_suffix , list ) : if len ( entity_suffix ) != 0 : exit ( 'Need to have only 1 iri in the cell from the onotology.' ) else : entity_suffix = entity_suffix [ 0 ] entity_fragment = self . extract_fragment ( entity_suffix ) ilx_rows = self . fragment2rows . get ( entity_fragment ) if cross_reference_fragments and ilx_rows : ilx_rows = [ row for row in ilx_rows if entity_fragment . lower ( ) in row [ 'iri' ] . lower ( ) ] if cross_reference_iris and ilx_rows : ilx_rows = [ row for row in ilx_rows if entity_suffix . rsplit ( '/' , 1 ) [ - 1 ] . lower ( ) in row [ 'iri' ] . lower ( ) ] if ilx_rows : inside . append ( { 'external_ontology_row' : row , 'ilx_rows' : ilx_rows , } ) else : outside . append ( row ) if diff : diff = self . __exhaustive_diff ( inside ) return inside , outside , diff return inside , outside | All entities with conflicting fragments gets a full diff to see if they belong |
39,129 | def exhaustive_ontology_ilx_diff_row_only ( self , ontology_row : dict ) -> dict : results = [ ] header = [ 'Index' ] + list ( self . existing_ids . columns ) for row in self . existing_ids . itertuples ( ) : row = { header [ i ] : val for i , val in enumerate ( row ) } check_list = [ { 'external_ontology_row' : ontology_row , 'ilx_rows' : [ row ] , } , ] result = self . __exhaustive_diff ( check_list ) [ 0 ] [ 0 ] if result [ 'same' ] : results . append ( result ) return results | WARNING RUNTIME IS AWEFUL |
39,130 | def combo_exhaustive_label_definition_check ( self , ontology : pd . DataFrame , label_predicate : str , definition_predicates : str , diff = True ) -> List [ List [ dict ] ] : inside , outside = [ ] , [ ] header = [ 'Index' ] + list ( ontology . columns ) for row in ontology . itertuples ( ) : row = { header [ i ] : val for i , val in enumerate ( row ) } label_obj = row [ label_predicate ] if isinstance ( label_obj , list ) : if len ( label_obj ) != 1 : exit ( 'Need to have only 1 label in the cell from the onotology.' ) else : label_obj = label_obj [ 0 ] entity_label = self . local_degrade ( label_obj ) label_search_results = self . label2rows . get ( entity_label ) label_ilx_rows = label_search_results if label_search_results else [ ] definition_ilx_rows = [ ] for definition_predicate in definition_predicates : definition_objs = row [ definition_predicate ] if not definition_objs : continue definition_objs = [ definition_objs ] if not isinstance ( definition_objs , list ) else definition_objs for definition_obj in definition_objs : definition_obj = self . local_degrade ( definition_obj ) definition_search_results = self . definition2rows . get ( definition_obj ) if definition_search_results : definition_ilx_rows . extend ( definition_search_results ) ilx_rows = [ dict ( t ) for t in { tuple ( d . items ( ) ) for d in ( label_ilx_rows + definition_ilx_rows ) } ] if ilx_rows : inside . append ( { 'external_ontology_row' : row , 'ilx_rows' : ilx_rows , } ) else : outside . append ( row ) if diff : diff = self . __exhaustive_diff ( inside ) return inside , outside , diff return inside , outside | Combo of label & definition exhaustive check out of convenience |
39,131 | def clear_cache ( ip = None ) : if ip : with ignored ( Exception ) : client = CLIENT_CACHE [ ip ] del CLIENT_CACHE [ ip ] client . close ( ) else : for client in CLIENT_CACHE . values ( ) : with ignored ( Exception ) : client . close ( ) CLIENT_CACHE . clear ( ) | Clear the client cache or remove key matching the given ip . |
39,132 | def establish_ssh_connection ( ip , ssh_private_key_file , ssh_user , port , attempts = 5 , timeout = None ) : client = paramiko . SSHClient ( ) client . set_missing_host_key_policy ( paramiko . AutoAddPolicy ( ) ) while attempts : try : client . connect ( ip , port = port , username = ssh_user , key_filename = ssh_private_key_file , timeout = timeout ) except : attempts -= 1 time . sleep ( 10 ) else : return client raise IpaSSHException ( 'Failed to establish SSH connection to instance.' ) | Establish ssh connection and return paramiko client . |
39,133 | def execute_ssh_command ( client , cmd ) : try : stdin , stdout , stderr = client . exec_command ( cmd ) err = stderr . read ( ) out = stdout . read ( ) if err : raise IpaSSHException ( out . decode ( ) + err . decode ( ) ) except : raise return out . decode ( ) | Execute given command using paramiko . |
39,134 | def extract_archive ( client , archive_path , extract_path = None ) : command = 'tar -xf {path}' . format ( path = archive_path ) if extract_path : command += ' -C {extract_path}' . format ( extract_path = extract_path ) out = execute_ssh_command ( client , command ) return out | Extract the archive in current path using the provided client . |
39,135 | def generate_public_ssh_key ( ssh_private_key_file ) : try : with open ( ssh_private_key_file , "rb" ) as key_file : key = key_file . read ( ) except FileNotFoundError : raise IpaUtilsException ( 'SSH private key file: %s cannot be found.' % ssh_private_key_file ) try : private_key = serialization . load_pem_private_key ( key , password = None , backend = default_backend ( ) ) except ValueError : raise IpaUtilsException ( 'SSH private key file: %s is not a valid key file.' % ssh_private_key_file ) return private_key . public_key ( ) . public_bytes ( serialization . Encoding . OpenSSH , serialization . PublicFormat . OpenSSH ) | Generate SSH public key from private key file . |
39,136 | def get_config_values ( config_path , section , default = 'default' ) : values = { } if not os . path . isfile ( config_path ) : raise IpaUtilsException ( 'Config file not found: %s' % config_path ) config = configparser . ConfigParser ( ) try : config . read ( config_path ) except Exception : raise IpaUtilsException ( 'Config file format invalid.' ) try : values . update ( config . items ( default ) ) except Exception : pass try : values . update ( config . items ( section ) ) except Exception : pass return values | Parse ini config file and return a dict of values . |
39,137 | def get_ssh_client ( ip , ssh_private_key_file , ssh_user = 'root' , port = 22 , timeout = 600 , wait_period = 10 ) : if ip in CLIENT_CACHE : return CLIENT_CACHE [ ip ] start = time . time ( ) end = start + timeout client = None while time . time ( ) < end : try : client = establish_ssh_connection ( ip , ssh_private_key_file , ssh_user , port , timeout = wait_period ) execute_ssh_command ( client , 'ls' ) except : if client : client . close ( ) wait_period += wait_period else : CLIENT_CACHE [ ip ] = client return client raise IpaSSHException ( 'Attempt to establish SSH connection failed.' ) | Attempt to establish and test ssh connection . |
39,138 | def get_yaml_config ( config_path ) : config_path = os . path . expanduser ( config_path ) if not os . path . isfile ( config_path ) : raise IpaUtilsException ( 'Config file not found: %s' % config_path ) with open ( config_path , 'r' ) as f : config = yaml . safe_load ( f ) return config | Load yaml config file and return dictionary . |
39,139 | def parse_sync_points ( names , tests ) : test_files = [ ] section = set ( ) for name in names : if name in SYNC_POINTS : if section : test_files . append ( section ) test_files . append ( name ) section = set ( ) else : section . add ( find_test_file ( name , tests ) ) if section : test_files . append ( section ) return test_files | Slice list of test names on sync points . |
39,140 | def put_file ( client , source_file , destination_file ) : try : sftp_client = client . open_sftp ( ) sftp_client . put ( source_file , destination_file ) except Exception as error : raise IpaUtilsException ( 'Error copying file to instance: {0}.' . format ( error ) ) finally : with ignored ( Exception ) : sftp_client . close ( ) | Copy file to instance using Paramiko client connection . |
39,141 | def redirect_output ( fileobj ) : old = sys . stdout sys . stdout = fileobj try : yield fileobj finally : sys . stdout = old | Redirect standard out to file . |
39,142 | def ssh_config ( ssh_user , ssh_private_key_file ) : try : ssh_file = NamedTemporaryFile ( delete = False , mode = 'w+' ) ssh_file . write ( 'Host *\n' ) ssh_file . write ( ' IdentityFile %s\n' % ssh_private_key_file ) ssh_file . write ( ' User %s' % ssh_user ) ssh_file . close ( ) yield ssh_file . name finally : with ignored ( OSError ) : os . remove ( ssh_file . name ) | Create temporary ssh config file . |
39,143 | def update_history_log ( history_log , clear = False , description = None , test_log = None ) : if not test_log and not clear : raise IpaUtilsException ( 'A test log or clear flag must be provided.' ) if clear : with ignored ( OSError ) : os . remove ( history_log ) else : history_dir = os . path . dirname ( history_log ) if not os . path . isdir ( history_dir ) : try : os . makedirs ( history_dir ) except OSError as error : raise IpaUtilsException ( 'Unable to create directory: %s' % error ) with open ( history_log , 'a+' ) as f : if description : description = '"%s"' % description out = '{} {}' . format ( test_log , description or '' ) f . write ( out . strip ( ) + '\n' ) | Update the history log file with item . If clear flag is provided the log file is deleted . |
39,144 | def create_pred2common ( self ) : self . pred2common = { } for common_name , ext_preds in self . common2preds . items ( ) : for pred in ext_preds : pred = pred . lower ( ) . strip ( ) self . pred2common [ pred ] = common_name | Takes list linked to common name and maps common name to accepted predicate and their respected suffixes to decrease sensitivity . |
39,145 | def clean_pred ( self , pred , ignore_warning = False ) : original_pred = pred pred = pred . lower ( ) . strip ( ) if 'http' in pred : pred = pred . split ( '/' ) [ - 1 ] elif ':' in pred : if pred [ - 1 ] != ':' : pred = pred . split ( ':' ) [ - 1 ] else : if not ignore_warning : exit ( 'Not a valid predicate: ' + original_pred + '. Needs to be an iri "/" or curie ":".' ) return pred | Takes the predicate and returns the suffix lower case stripped version |
39,146 | def _create_network_interface ( self , ip_config_name , nic_name , public_ip , region , resource_group_name , subnet , accelerated_networking = False ) : nic_config = { 'location' : region , 'ip_configurations' : [ { 'name' : ip_config_name , 'private_ip_allocation_method' : 'Dynamic' , 'subnet' : { 'id' : subnet . id } , 'public_ip_address' : { 'id' : public_ip . id } , } ] } if accelerated_networking : nic_config [ 'enable_accelerated_networking' ] = True try : nic_setup = self . network . network_interfaces . create_or_update ( resource_group_name , nic_name , nic_config ) except Exception as error : raise AzureCloudException ( 'Unable to create network interface: {0}.' . format ( error ) ) return nic_setup . result ( ) | Create a network interface in the resource group . |
39,147 | def _create_public_ip ( self , public_ip_name , resource_group_name , region ) : public_ip_config = { 'location' : region , 'public_ip_allocation_method' : 'Dynamic' } try : public_ip_setup = self . network . public_ip_addresses . create_or_update ( resource_group_name , public_ip_name , public_ip_config ) except Exception as error : raise AzureCloudException ( 'Unable to create public IP: {0}.' . format ( error ) ) return public_ip_setup . result ( ) | Create dynamic public IP address in the resource group . |
39,148 | def _create_resource_group ( self , region , resource_group_name ) : resource_group_config = { 'location' : region } try : self . resource . resource_groups . create_or_update ( resource_group_name , resource_group_config ) except Exception as error : raise AzureCloudException ( 'Unable to create resource group: {0}.' . format ( error ) ) | Create resource group if it does not exist . |
39,149 | def _create_storage_profile ( self ) : if self . image_publisher : storage_profile = { 'image_reference' : { 'publisher' : self . image_publisher , 'offer' : self . image_offer , 'sku' : self . image_sku , 'version' : self . image_version } , } else : for image in self . compute . images . list ( ) : if image . name == self . image_id : image_id = image . id break else : raise AzureCloudException ( 'Image with name {0} not found.' . format ( self . image_id ) ) storage_profile = { 'image_reference' : { 'id' : image_id } } return storage_profile | Create the storage profile for the instance . |
39,150 | def _create_subnet ( self , resource_group_name , subnet_id , vnet_name ) : subnet_config = { 'address_prefix' : '10.0.0.0/29' } try : subnet_setup = self . network . subnets . create_or_update ( resource_group_name , vnet_name , subnet_id , subnet_config ) except Exception as error : raise AzureCloudException ( 'Unable to create subnet: {0}.' . format ( error ) ) return subnet_setup . result ( ) | Create a subnet in the provided vnet and resource group . |
39,151 | def _create_virtual_network ( self , region , resource_group_name , vnet_name ) : vnet_config = { 'location' : region , 'address_space' : { 'address_prefixes' : [ '10.0.0.0/27' ] } } try : vnet_setup = self . network . virtual_networks . create_or_update ( resource_group_name , vnet_name , vnet_config ) except Exception as error : raise AzureCloudException ( 'Unable to create vnet: {0}.' . format ( error ) ) vnet_setup . wait ( ) | Create a vnet in the given resource group with default address space . |
39,152 | def _create_vm ( self , vm_config ) : try : vm_setup = self . compute . virtual_machines . create_or_update ( self . running_instance_id , self . running_instance_id , vm_config ) except Exception as error : raise AzureCloudException ( 'An exception occurred creating virtual machine: {0}' . format ( error ) ) vm_setup . wait ( ) | Attempt to create or update VM instance based on vm_parameters config . |
39,153 | def _create_vm_config ( self , interface ) : self . _process_image_id ( ) hardware_profile = { 'vm_size' : self . instance_type or AZURE_DEFAULT_TYPE } network_profile = { 'network_interfaces' : [ { 'id' : interface . id , 'primary' : True } ] } storage_profile = self . _create_storage_profile ( ) os_profile = { 'computer_name' : self . running_instance_id , 'admin_username' : self . ssh_user , 'linux_configuration' : { 'disable_password_authentication' : True , 'ssh' : { 'public_keys' : [ { 'path' : '/home/{0}/.ssh/authorized_keys' . format ( self . ssh_user ) , 'key_data' : self . ssh_public_key } ] } } } vm_config = { 'location' : self . region , 'os_profile' : os_profile , 'hardware_profile' : hardware_profile , 'storage_profile' : storage_profile , 'network_profile' : network_profile } return vm_config | Create the VM config dictionary . |
39,154 | def _get_instance ( self ) : try : instance = self . compute . virtual_machines . get ( self . running_instance_id , self . running_instance_id , expand = 'instanceView' ) except Exception as error : raise AzureCloudException ( 'Unable to retrieve instance: {0}' . format ( error ) ) return instance | Return the instance matching the running_instance_id . |
39,155 | def _get_instance_state ( self ) : instance = self . _get_instance ( ) statuses = instance . instance_view . statuses for status in statuses : if status . code . startswith ( 'PowerState' ) : return status . display_status | Retrieve state of instance . |
39,156 | def _get_management_client ( self , client_class ) : try : client = get_client_from_auth_file ( client_class , auth_path = self . service_account_file ) except ValueError as error : raise AzureCloudException ( 'Service account file format is invalid: {0}.' . format ( error ) ) except KeyError as error : raise AzureCloudException ( 'Service account file missing key: {0}.' . format ( error ) ) except Exception as error : raise AzureCloudException ( 'Unable to create resource management client: ' '{0}.' . format ( error ) ) return client | Return instance of resource management client . |
39,157 | def _launch_instance ( self ) : self . running_instance_id = ipa_utils . generate_instance_name ( 'azure-ipa-test' ) self . logger . debug ( 'ID of instance: %s' % self . running_instance_id ) self . _set_default_resource_names ( ) try : self . _create_resource_group ( self . region , self . running_instance_id ) if self . subnet_id : subnet = self . network . subnets . get ( self . vnet_resource_group , self . vnet_name , self . subnet_id ) else : self . subnet_id = '' . join ( [ self . running_instance_id , '-subnet' ] ) self . vnet_name = '' . join ( [ self . running_instance_id , '-vnet' ] ) self . _create_virtual_network ( self . region , self . running_instance_id , self . vnet_name ) subnet = self . _create_subnet ( self . running_instance_id , self . subnet_id , self . vnet_name ) public_ip = self . _create_public_ip ( self . public_ip_name , self . running_instance_id , self . region ) interface = self . _create_network_interface ( self . ip_config_name , self . nic_name , public_ip , self . region , self . running_instance_id , subnet , self . accelerated_networking ) vm_config = self . _create_vm_config ( interface ) self . _create_vm ( vm_config ) except Exception : try : self . _terminate_instance ( ) except Exception : pass raise else : self . _wait_on_instance ( 'VM running' , timeout = self . timeout ) | Create new test instance in a resource group with the same name . |
39,158 | def _process_image_id ( self ) : try : image_info = self . image_id . strip ( ) . split ( ':' ) self . image_publisher = image_info [ 0 ] self . image_offer = image_info [ 1 ] self . image_sku = image_info [ 2 ] self . image_version = image_info [ 3 ] except Exception : self . image_publisher = None | Split image id into component values . |
39,159 | def _set_default_resource_names ( self ) : self . ip_config_name = '' . join ( [ self . running_instance_id , '-ip-config' ] ) self . nic_name = '' . join ( [ self . running_instance_id , '-nic' ] ) self . public_ip_name = '' . join ( [ self . running_instance_id , '-public-ip' ] ) | Generate names for resources based on the running_instance_id . |
39,160 | def _set_image_id ( self ) : instance = self . _get_instance ( ) image_info = instance . storage_profile . image_reference if image_info . publisher : self . image_id = ':' . join ( [ image_info . publisher , image_info . offer , image_info . sku , image_info . version ] ) else : self . image_id = image_info . id . rsplit ( '/' , maxsplit = 1 ) [ 1 ] | If an existing instance is used get image id from deployment . |
39,161 | def _set_instance_ip ( self ) : try : ip_address = self . network . public_ip_addresses . get ( self . running_instance_id , self . public_ip_name ) . ip_address except Exception : try : ip_address = self . network . network_interfaces . get ( self . running_instance_id , self . nic_name ) . ip_configurations [ 0 ] . private_ip_address except Exception as error : raise AzureCloudException ( 'Unable to retrieve instance IP address: {0}.' . format ( error ) ) self . instance_ip = ip_address | Get the IP address based on instance ID . |
39,162 | def _terminate_instance ( self ) : try : self . resource . resource_groups . delete ( self . running_instance_id ) except Exception as error : raise AzureCloudException ( 'Unable to terminate resource group: {0}.' . format ( error ) ) | Terminate the resource group and instance . |
39,163 | def preferred_change ( data ) : ranking = [ 'CHEBI' , 'NCBITaxon' , 'COGPO' , 'CAO' , 'DICOM' , 'UBERON' , 'NLX' , 'NLXANAT' , 'NLXCELL' , 'NLXFUNC' , 'NLXINV' , 'NLXORG' , 'NLXRES' , 'NLXSUB' 'BIRNLEX' , 'SAO' , 'NDA.CDE' , 'PR' , 'IAO' , 'NIFEXT' , 'OEN' , 'ILX' , ] mock_rank = ranking [ : : - 1 ] score = [ ] old_pref_index = None for i , d in enumerate ( data [ 'existing_ids' ] ) : if not d . get ( 'preferred' ) : d [ 'preferred' ] = 0 if int ( d [ 'preferred' ] ) == 1 : old_pref_index = i if d . get ( 'curie' ) : pref = d [ 'curie' ] . split ( ':' ) [ 0 ] if pref in mock_rank : score . append ( mock_rank . index ( pref ) ) else : score . append ( - 1 ) else : score . append ( - 1 ) new_pref_index = score . index ( max ( score ) ) new_pref_iri = data [ 'existing_ids' ] [ new_pref_index ] [ 'iri' ] if new_pref_iri . rsplit ( '/' , 1 ) [ 0 ] == 'http://uri.interlex.org/base' : if old_pref_index : if old_pref_index != new_pref_index : return data for e in data [ 'existing_ids' ] : e [ 'preferred' ] = 0 data [ 'existing_ids' ] [ new_pref_index ] [ 'preferred' ] = 1 return data | Determines preferred existing id based on curie prefix in the ranking list |
39,164 | def main ( context , no_color ) : if context . obj is None : context . obj = { } context . obj [ 'no_color' ] = no_color | Ipa provides a Python API and command line utility for testing images . |
39,165 | def results ( context , history_log ) : if context . obj is None : context . obj = { } context . obj [ 'history_log' ] = history_log if context . invoked_subcommand is None : context . invoke ( show , item = 1 ) | Process provided history log and results files . |
39,166 | def delete ( context , item ) : history_log = context . obj [ 'history_log' ] no_color = context . obj [ 'no_color' ] try : with open ( history_log , 'r+' ) as f : lines = f . readlines ( ) history = lines . pop ( len ( lines ) - item ) f . seek ( 0 ) f . write ( '' . join ( lines ) ) f . flush ( ) f . truncate ( ) except IndexError : echo_style ( 'History result at index %s does not exist.' % item , no_color , fg = 'red' ) sys . exit ( 1 ) except Exception as error : echo_style ( 'Unable to delete result item {0}. {1}' . format ( item , error ) , no_color , fg = 'red' ) sys . exit ( 1 ) log_file = get_log_file_from_item ( history ) try : os . remove ( log_file ) except Exception : echo_style ( 'Unable to delete results file for item {0}.' . format ( item ) , no_color , fg = 'red' ) try : os . remove ( log_file . rsplit ( '.' , 1 ) [ 0 ] + '.results' ) except Exception : echo_style ( 'Unable to delete log file for item {0}.' . format ( item ) , no_color , fg = 'red' ) | Delete the specified history item from the history log . |
39,167 | def show ( context , log , results_file , verbose , item ) : history_log = context . obj [ 'history_log' ] no_color = context . obj [ 'no_color' ] if not results_file : try : with open ( history_log , 'r' ) as f : lines = f . readlines ( ) history = lines [ len ( lines ) - item ] except IndexError : echo_style ( 'History result at index %s does not exist.' % item , no_color , fg = 'red' ) sys . exit ( 1 ) except Exception : echo_style ( 'Unable to retrieve results history, ' 'provide results file or re-run test.' , no_color , fg = 'red' ) sys . exit ( 1 ) log_file = get_log_file_from_item ( history ) if log : echo_log ( log_file , no_color ) else : echo_results_file ( log_file . rsplit ( '.' , 1 ) [ 0 ] + '.results' , no_color , verbose ) elif log : echo_log ( results_file , no_color ) else : echo_results_file ( results_file , no_color , verbose ) | Print test results info from provided results json file . |
39,168 | def _get_ssh_client ( self ) : return ipa_utils . get_ssh_client ( self . instance_ip , self . ssh_private_key_file , self . ssh_user , timeout = self . timeout ) | Return a new or existing SSH client for given ip . |
39,169 | def _log_info ( self ) : if self . cloud == 'ssh' : self . results [ 'info' ] = { 'platform' : self . cloud , 'distro' : self . distro_name , 'image' : self . instance_ip , 'timestamp' : self . time_stamp , 'log_file' : self . log_file , 'results_file' : self . results_file } else : self . results [ 'info' ] = { 'platform' : self . cloud , 'region' : self . region , 'distro' : self . distro_name , 'image' : self . image_id , 'instance' : self . running_instance_id , 'timestamp' : self . time_stamp , 'log_file' : self . log_file , 'results_file' : self . results_file } self . _write_to_log ( '\n' . join ( '%s: %s' % ( key , val ) for key , val in self . results [ 'info' ] . items ( ) ) ) | Output test run information to top of log file . |
39,170 | def _write_to_log ( self , output ) : with open ( self . log_file , 'a' ) as log_file : log_file . write ( '\n' ) log_file . write ( output ) log_file . write ( '\n' ) | Write the output string to the log file . |
39,171 | def _merge_results ( self , results ) : self . results [ 'tests' ] += results [ 'tests' ] for key , value in results [ 'summary' ] . items ( ) : self . results [ 'summary' ] [ key ] += value | Combine results of test run with exisiting dict . |
39,172 | def _save_results ( self ) : with open ( self . results_file , 'w' ) as results_file : json . dump ( self . results , results_file ) | Save results dictionary to json file . |
39,173 | def _set_distro ( self ) : if self . distro_name == 'sles' : self . distro = SLES ( ) elif self . distro_name == 'opensuse_leap' : self . distro = openSUSE_Leap ( ) else : raise IpaCloudException ( 'Distribution: %s, not supported.' % self . distro_name ) | Determine distro for image and create instance of class . |
39,174 | def _set_results_dir ( self ) : if self . running_instance_id : self . results_dir = os . path . join ( self . results_dir , self . cloud , self . image_id , self . running_instance_id ) else : self . results_dir = os . path . join ( self . results_dir , self . cloud , self . instance_ip ) try : os . makedirs ( self . results_dir ) except OSError as error : if not os . path . isdir ( self . results_dir ) : raise IpaCloudException ( 'Unable to create ipa results directory: %s' % error ) self . time_stamp = datetime . now ( ) . strftime ( '%Y%m%d%H%M%S' ) self . log_file = '' . join ( [ self . results_dir , os . sep , self . time_stamp , '.log' ] ) self . logger . debug ( 'Created log file %s' % self . log_file ) self . results_file = '' . join ( [ self . results_dir , os . sep , self . time_stamp , '.results' ] ) self . logger . debug ( 'Created results file %s' % self . results_file ) file_handler = logging . FileHandler ( self . log_file ) file_handler . setLevel ( logging . DEBUG ) file_handler . setFormatter ( logging . Formatter ( '\n%(message)s\n' ) ) self . logger . addHandler ( file_handler ) | Create results directory if not exists . |
39,175 | def _collect_vm_info ( self ) : self . logger . info ( 'Collecting basic info about VM' ) client = self . _get_ssh_client ( ) out = self . distro . get_vm_info ( client ) self . _write_to_log ( out ) | Gather basic info about VM |
39,176 | def _update_history ( self ) : ipa_utils . update_history_log ( self . history_log , description = self . description , test_log = self . log_file ) | Save the current test information to history json . |
39,177 | def _wait_on_instance ( self , state , timeout = 600 , wait_period = 10 ) : current_state = 'Undefined' start = time . time ( ) end = start + timeout while time . time ( ) < end : current_state = self . _get_instance_state ( ) if state . lower ( ) == current_state . lower ( ) : return time . sleep ( wait_period ) raise IpaCloudException ( 'Instance has not arrived at the given state: {state}' . format ( state = state ) ) | Wait until instance is in given state . |
39,178 | def execute_ssh_command ( self , client , command ) : try : out = ipa_utils . execute_ssh_command ( client , command ) except Exception as error : raise IpaCloudException ( 'Command: "{0}", failed execution: {1}.' . format ( command , error ) ) else : self . _write_to_log ( out ) | Execute the provided command and log output . |
39,179 | def extract_archive ( self , client , archive_path , extract_path = None ) : try : out = ipa_utils . extract_archive ( client , archive_path , extract_path ) except Exception as error : raise IpaCloudException ( 'Failed to extract archive, "{0}": {1}.' . format ( archive_path , error ) ) else : self . _write_to_log ( out ) | Extract the archive files using the client in the current path . |
39,180 | def hard_reboot_instance ( self ) : self . _stop_instance ( ) self . _start_instance ( ) self . _set_instance_ip ( ) self . logger . debug ( 'IP of instance: %s' % self . instance_ip ) ipa_utils . clear_cache ( ) | Stop then start the instance . |
39,181 | def install_package ( self , client , package ) : try : out = self . distro . install_package ( client , package ) except Exception as error : raise IpaCloudException ( 'Failed installing package, "{0}"; {1}.' . format ( package , error ) ) else : self . _write_to_log ( out ) | Install package using distro specific install method . |
39,182 | def process_injection_file ( self , client ) : configuration = ipa_utils . get_yaml_config ( self . inject ) if configuration . get ( 'inject_packages' ) : inject_packages = configuration [ 'inject_packages' ] if not isinstance ( inject_packages , list ) : inject_packages = [ inject_packages ] for package in inject_packages : package_path = self . put_file ( client , package ) self . install_package ( client , package_path ) if configuration . get ( 'inject_archives' ) : inject_archives = configuration [ 'inject_archives' ] if not isinstance ( inject_archives , list ) : inject_archives = [ inject_archives ] for archive in inject_archives : archive_path = self . put_file ( client , archive ) self . extract_archive ( client , archive_path ) if configuration . get ( 'inject_files' ) : inject_files = configuration [ 'inject_files' ] if not isinstance ( inject_files , list ) : inject_files = [ inject_files ] for file_path in inject_files : self . put_file ( client , file_path ) if configuration . get ( 'execute' ) : execute = configuration [ 'execute' ] if not isinstance ( execute , list ) : execute = [ execute ] for command in execute : self . execute_ssh_command ( client , command ) if configuration . get ( 'install' ) : install = configuration [ 'install' ] if not isinstance ( install , list ) : install = [ install ] for package in install : self . install_package ( client , package ) | Load yaml file and process injection configuration . |
39,183 | def put_file ( self , client , source_file ) : try : file_name = os . path . basename ( source_file ) ipa_utils . put_file ( client , source_file , file_name ) except Exception as error : raise IpaCloudException ( 'Failed copying file, "{0}"; {1}.' . format ( source_file , error ) ) else : return file_name | Put file on instance in default SSH directory . |
39,184 | def decodeIlxResp ( resp ) : lines = [ _ for _ in resp . text . split ( '\n' ) if _ ] if 'successfull' in lines [ 0 ] : return [ ( _ . split ( '"' ) [ 1 ] , ilxIdFix ( _ . split ( ': ' ) [ - 1 ] ) ) for _ in lines [ 1 : ] ] elif 'errors' in lines [ 0 ] : return [ ( _ . split ( '"' ) [ 1 ] , ilxIdFix ( _ . split ( '(' ) [ 1 ] . split ( ')' ) [ 0 ] ) ) for _ in lines [ 1 : ] ] | We need this until we can get json back directly and this is SUPER nasty |
39,185 | def getSubOrder ( existing ) : alpha = list ( zip ( * sorted ( ( ( k , v [ 'rec' ] [ 'label' ] ) for k , v in existing . items ( ) ) , key = lambda a : a [ 1 ] ) ) ) [ 0 ] depths = { } def getDepth ( id_ ) : if id_ in depths : return depths [ id_ ] else : if id_ in existing : names_above = getDepth ( existing [ id_ ] [ 'sc' ] ) depths [ id_ ] = names_above + [ existing [ id_ ] [ 'rec' ] [ 'label' ] ] return depths [ id_ ] else : return [ '' ] for id_ in existing : getDepth ( id_ ) print ( sorted ( depths . values ( ) ) ) def key_ ( id_ ) : return depths [ id_ ] return sorted ( depths , key = key_ ) | Alpha sort by the full chain of parents . |
39,186 | def ilx_conv ( graph , prefix , ilx_start ) : to_sub = set ( ) for subject in graph . subjects ( rdflib . RDF . type , rdflib . OWL . Class ) : if PREFIXES [ prefix ] in subject : to_sub . add ( subject ) ilx_base = 'ilx_{:0>7}' ILX_base = 'ILX:{:0>7}' ilx_labels = { } replace = { } for sub in sorted ( to_sub ) : ilx_format = ilx_base . format ( ilx_start ) ILX_format = ILX_base . format ( ilx_start ) ilx_start += 1 prefix , url , suffix = graph . namespace_manager . compute_qname ( sub ) curie = prefix + ':' + suffix replace [ curie ] = ILX_format label = [ _ for _ in graph . objects ( sub , rdflib . RDFS . label ) ] [ 0 ] ilx_labels [ ilx_format ] = label new_sub = expand ( 'ilx:' + ilx_format ) for p , o in graph . predicate_objects ( sub ) : graph . remove ( ( sub , p , o ) ) graph . add ( ( new_sub , p , o ) ) for s , p in graph . subject_predicates ( sub ) : graph . remove ( ( s , p , sub ) ) graph . add ( ( s , p , new_sub ) ) return ilx_labels , replace | convert a set of temporary identifiers to ilx and modify the graph in place |
39,187 | def config ( remote_base = 'https://raw.githubusercontent.com/SciCrunch/NIF-Ontology/' , local_base = None , branch = devconfig . neurons_branch , core_graph_paths = [ 'ttl/phenotype-core.ttl' , 'ttl/phenotypes.ttl' ] , core_graph = None , in_graph_paths = tuple ( ) , out_graph_path = '/tmp/_Neurons.ttl' , out_imports = [ 'ttl/phenotype-core.ttl' ] , out_graph = None , prefixes = tuple ( ) , force_remote = False , checkout_ok = ont_checkout_ok , scigraph = None , iri = None , sources = tuple ( ) , source_file = None , use_local_import_paths = True , ignore_existing = True ) : graphBase . configGraphIO ( remote_base = remote_base , local_base = local_base , branch = branch , core_graph_paths = core_graph_paths , core_graph = core_graph , in_graph_paths = in_graph_paths , out_graph_path = out_graph_path , out_imports = out_imports , out_graph = out_graph , prefixes = prefixes , force_remote = force_remote , checkout_ok = checkout_ok , scigraph = scigraph , iri = iri , sources = sources , source_file = source_file , use_local_import_paths = use_local_import_paths , ignore_existing = ignore_existing ) pred = graphBase . _predicates return pred | Wraps graphBase . configGraphIO to provide a set of sane defaults for input ontologies and output files . |
39,188 | def add_version_iri ( graph , epoch ) : for ont in graph . subjects ( rdf . type , owl . Ontology ) : for versionIRI in graph . objects ( ont , owl . versionIRI ) : graph . remove ( ( ont , owl . versionIRI , versionIRI ) ) t = ont , owl . versionIRI , make_version_iri_from_iri ( ont , epoch ) graph . add ( t ) | Also remove the previous versionIRI if there was one . |
39,189 | def auth ( self ) : if self . oauth : return self . oauth return ( self . username , self . password ) | Return credentials for current Bitbucket user . |
39,190 | def authorize ( self , consumer_key , consumer_secret , callback_url = None , access_token = None , access_token_secret = None ) : self . consumer_key = consumer_key self . consumer_secret = consumer_secret if not access_token and not access_token_secret : if not callback_url : return ( False , "Callback URL required" ) oauth = OAuth1 ( consumer_key , client_secret = consumer_secret , callback_uri = callback_url ) r = requests . post ( self . url ( 'REQUEST_TOKEN' ) , auth = oauth ) if r . status_code == 200 : creds = parse_qs ( r . content ) self . access_token = creds . get ( 'oauth_token' ) [ 0 ] self . access_token_secret = creds . get ( 'oauth_token_secret' ) [ 0 ] else : return ( False , r . content ) else : self . finalize_oauth ( access_token , access_token_secret ) return ( True , None ) | Call this with your consumer key secret and callback URL to generate a token for verification . |
39,191 | def verify ( self , verifier , consumer_key = None , consumer_secret = None , access_token = None , access_token_secret = None ) : self . consumer_key = consumer_key or self . consumer_key self . consumer_secret = consumer_secret or self . consumer_secret self . access_token = access_token or self . access_token self . access_token_secret = access_token_secret or self . access_token_secret oauth = OAuth1 ( self . consumer_key , client_secret = self . consumer_secret , resource_owner_key = self . access_token , resource_owner_secret = self . access_token_secret , verifier = verifier ) r = requests . post ( self . url ( 'ACCESS_TOKEN' ) , auth = oauth ) if r . status_code == 200 : creds = parse_qs ( r . content ) else : return ( False , r . content ) self . finalize_oauth ( creds . get ( 'oauth_token' ) [ 0 ] , creds . get ( 'oauth_token_secret' ) [ 0 ] ) return ( True , None ) | After converting the token into verifier call this to finalize the authorization . |
39,192 | def finalize_oauth ( self , access_token , access_token_secret ) : self . access_token = access_token self . access_token_secret = access_token_secret self . oauth = OAuth1 ( self . consumer_key , client_secret = self . consumer_secret , resource_owner_key = self . access_token , resource_owner_secret = self . access_token_secret ) | Called internally once auth process is complete . |
39,193 | def dispatch ( self , method , url , auth = None , params = None , ** kwargs ) : r = Request ( method = method , url = url , auth = auth , params = params , data = kwargs ) s = Session ( ) resp = s . send ( r . prepare ( ) ) status = resp . status_code text = resp . text error = resp . reason if status >= 200 and status < 300 : if text : try : return ( True , json . loads ( text ) ) except TypeError : pass except ValueError : pass return ( True , text ) elif status >= 300 and status < 400 : return ( False , 'Unauthorized access, ' 'please check your credentials.' ) elif status >= 400 and status < 500 : return ( False , 'Service not found.' ) elif status >= 500 and status < 600 : return ( False , 'Server error.' ) else : return ( False , error ) | Send HTTP request with given method credentials and data to the given URL and return the success and the result on success . |
39,194 | def url ( self , action , ** kwargs ) : return self . URLS [ 'BASE' ] % self . URLS [ action ] % kwargs | Construct and return the URL for a specific API service . |
39,195 | def get_user ( self , username = None ) : username = username or self . username or '' url = self . url ( 'GET_USER' , username = username ) response = self . dispatch ( 'GET' , url ) try : return ( response [ 0 ] , response [ 1 ] [ 'user' ] ) except TypeError : pass return response | Returns user informations . If username is not defined tries to return own informations . |
39,196 | def get_tags ( self , repo_slug = None ) : repo_slug = repo_slug or self . repo_slug or '' url = self . url ( 'GET_TAGS' , username = self . username , repo_slug = repo_slug ) return self . dispatch ( 'GET' , url , auth = self . auth ) | Get a single repository on Bitbucket and return its tags . |
39,197 | def get_branches ( self , repo_slug = None ) : repo_slug = repo_slug or self . repo_slug or '' url = self . url ( 'GET_BRANCHES' , username = self . username , repo_slug = repo_slug ) return self . dispatch ( 'GET' , url , auth = self . auth ) | Get a single repository on Bitbucket and return its branches . |
39,198 | def get_privileges ( self ) : url = self . url ( 'GET_USER_PRIVILEGES' ) return self . dispatch ( 'GET' , url , auth = self . auth ) | Get privledges for this user . |
39,199 | def create ( self , repo_slug = None , key = None , label = None ) : key = '%s' % key repo_slug = repo_slug or self . bitbucket . repo_slug or '' url = self . bitbucket . url ( 'SET_DEPLOY_KEY' , username = self . bitbucket . username , repo_slug = repo_slug ) return self . bitbucket . dispatch ( 'POST' , url , auth = self . bitbucket . auth , key = key , label = label ) | Associate an ssh key with your repo and return it . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.