idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
12,700
def is_tomodir ( subdirectories ) : required = ( 'exe' , 'config' , 'rho' , 'mod' , 'inv' ) is_tomodir = True for subdir in required : if subdir not in subdirectories : is_tomodir = False return is_tomodir
provided with the subdirectories of a given directory check if this is a tomodir
71
18
12,701
def check_if_needs_modeling ( tomodir ) : print ( 'check for modeling' , tomodir ) required_files = ( 'config' + os . sep + 'config.dat' , 'rho' + os . sep + 'rho.dat' , 'grid' + os . sep + 'elem.dat' , 'grid' + os . sep + 'elec.dat' , 'exe' + os . sep + 'crmod.cfg' , ) not_allowed = ( 'mod' + os . sep + 'volt.dat' , ) needs_modeling = True for filename in not_allowed : if os . path . isfile ( tomodir + os . sep + filename ) : needs_modeling = False for filename in required_files : full_file = tomodir + os . sep + filename if not os . path . isfile ( full_file ) : print ( 'does not exist: ' , full_file ) needs_modeling = False return needs_modeling
check of we need to run CRMod in a given tomodir
224
14
12,702
def check_if_needs_inversion ( tomodir ) : required_files = ( 'grid' + os . sep + 'elem.dat' , 'grid' + os . sep + 'elec.dat' , 'exe' + os . sep + 'crtomo.cfg' , ) needs_inversion = True for filename in required_files : if not os . path . isfile ( tomodir + os . sep + filename ) : needs_inversion = False # check for crmod OR modeling capabilities if not os . path . isfile ( tomodir + os . sep + 'mod' + os . sep + 'volt.dat' ) : if not check_if_needs_modeling ( tomodir ) : print ( 'no volt.dat and no modeling possible' ) needs_inversion = False # check if finished inv_ctr_file = tomodir + os . sep + 'inv' + os . sep + 'inv.ctr' if os . path . isfile ( inv_ctr_file ) : inv_lines = open ( inv_ctr_file , 'r' ) . readlines ( ) print ( 'inv_lines' , inv_lines [ - 1 ] ) if inv_lines [ - 1 ] . startswith ( '***finished***' ) : needs_inversion = False return needs_inversion
check of we need to run CRTomo in a given tomodir
295
15
12,703
def add_boundary ( self , p1 , p2 , btype ) : index = self . add_line ( p1 , p2 , self . char_lengths [ 'boundary' ] ) # self.Boundaries.append((p1_id,p2_id,btype)) self . BoundaryIndices . append ( index ) self . Boundaries . append ( ( p1 , p2 , btype ) )
Add a boundary line
95
4
12,704
def add_line ( self , p1 , p2 , char_length ) : p1_id = self . get_point_id ( p1 , char_length ) p2_id = self . get_point_id ( p2 , char_length ) self . Lines . append ( ( p1_id , p2_id ) ) return len ( self . Lines )
Add a line to the list . Check if the nodes already exist and add them if not .
83
19
12,705
def is_in ( self , search_list , pair ) : index = - 1 for nr , i in enumerate ( search_list ) : if ( np . all ( i == pair ) ) : return nr return index
If pair is in search_list return the index . Otherwise return - 1
49
15
12,706
def read_electrodes ( self , electrodes ) : for nr , electrode in enumerate ( electrodes ) : index = self . get_point_id ( electrode , self . char_lengths [ 'electrode' ] ) self . Electrodes . append ( index )
Read in electrodes check if points already exist
59
8
12,707
def write_electrodes ( self , filename ) : fid = open ( filename , 'w' ) for i in self . Electrodes : fid . write ( '{0} {1}\n' . format ( self . Points [ i ] [ 0 ] , self . Points [ i ] [ 1 ] ) ) fid . close ( )
Write X Y coordinates of electrodes
72
6
12,708
def write_boundaries ( self , filename ) : fid = open ( filename , 'w' ) for i in self . Boundaries : print ( i ) # fid.write('{0} {1} {2}\n'.format(i[0], i[1], i[2])) fid . write ( '{0} {1} {2} {3} {4}\n' . format ( i [ 0 ] [ 0 ] , i [ 0 ] [ 1 ] , i [ 1 ] [ 0 ] , i [ 1 ] [ 1 ] , i [ 2 ] ) ) fid . close ( )
Write boundary lines X1 Y1 X2 Y2 TYPE to file
132
14
12,709
def read_char_lengths ( self , filename , electrode_filename ) : if os . path . isfile ( filename ) : data = np . atleast_1d ( np . loadtxt ( filename ) ) if data . size == 4 : characteristic_length = data # check sign of first (electrode) length value if characteristic_length [ 0 ] < 0 : try : elec_positions = np . loadtxt ( electrode_filename ) except : raise IOError ( 'The was an error opening the electrode file' ) import scipy . spatial . distance distances = scipy . spatial . distance . pdist ( elec_positions ) characteristic_length [ 0 ] = min ( distances ) * np . abs ( characteristic_length [ 0 ] ) if characteristic_length [ 0 ] == 0 : raise Exception ( 'Error computing electrode ' + 'distances (got a minimal distance of zero' ) else : characteristic_length = np . ones ( 4 ) * data [ 0 ] else : characteristic_length = np . ones ( 4 ) if np . any ( characteristic_length <= 0 ) : raise Exception ( 'No negative characteristic lengths allowed ' + '(except for electrode length' ) self . char_lengths = { } for key , item in zip ( ( 'electrode' , 'boundary' , 'extra_line' , 'extra_node' ) , characteristic_length ) : self . char_lengths [ key ] = item
Read characteristic lengths from the given file .
312
8
12,710
def write_points ( self , fid ) : for nr , point in enumerate ( self . Points ) : fid . write ( 'Point({0}) = {{{1}, {2}, 0, {3}}};\n' . format ( nr + 1 , point [ 0 ] , point [ 1 ] , self . Charlengths [ nr ] ) )
Write the grid points to the GMSH - command file .
79
13
12,711
def get_output ( cls , response : requests . Response ) -> str : output = response . headers [ 'X-Lizzy-Output' ] # type: str output = output . replace ( '\\n' , '\n' ) # unescape new lines lines = ( '[AGENT] {}' . format ( line ) for line in output . splitlines ( ) ) return '\n' . join ( lines )
Extracts the senza cli output from the response
91
12
12,712
def new_stack ( self , keep_stacks : int , new_traffic : int , senza_yaml : dict , stack_version : str , disable_rollback : bool , parameters : List [ str ] , region : Optional [ str ] , dry_run : bool , tags : List [ str ] ) -> ( Dict [ str , str ] , str ) : # TODO put arguments in a more logical order header = make_header ( self . access_token ) data = { 'senza_yaml' : yaml . dump ( senza_yaml ) , 'stack_version' : stack_version , 'disable_rollback' : disable_rollback , 'dry_run' : dry_run , 'keep_stacks' : keep_stacks , 'new_traffic' : new_traffic , 'parameters' : parameters , 'tags' : tags } if region : data [ 'region' ] = region request = self . stacks_url . post ( json = data , headers = header , verify = False ) request . raise_for_status ( ) return request . json ( ) , self . get_output ( request )
Requests a new stack .
253
6
12,713
def pot_ana ( r , rho ) : I = 1.0 sigma = 1.0 / rho phi = np . divide ( I , ( 2.0 * np . pi * sigma * r ) ) return phi
Return the analytical potential in distance r over a homogeneous half - space
52
14
12,714
def compute_potentials_analytical_hs ( grid , configs_raw , rho ) : potentials = [ ] nodes_sorted = grid . nodes [ 'sorted' ] nodes_raw = grid . nodes [ 'sorted' ] for config in configs_raw : print ( 'potential configs' , config ) # determine distance of all nodes to both electrodes e1_node = grid . get_electrode_node ( config [ 0 ] ) print ( 'e1_node' , e1_node ) electrode1 = nodes_sorted [ e1_node ] [ 1 : 3 ] # electrode1 = nodes_sorted[config[0]][1:3] r1 = np . sqrt ( ( nodes_raw [ : , 1 ] - electrode1 [ 0 ] ) ** 2 + ( nodes_raw [ : , 2 ] - electrode1 [ 1 ] ) ** 2 ) # electrode2 = nodes_sorted[config[1]][1:3] e2_node = grid . get_electrode_node ( config [ 1 ] ) print ( 'e2_node' , e2_node ) electrode2 = nodes_sorted [ e2_node ] [ 1 : 3 ] r2 = np . sqrt ( ( nodes_raw [ : , 1 ] - electrode2 [ 0 ] ) ** 2 + ( nodes_raw [ : , 2 ] - electrode2 [ 1 ] ) ** 2 ) pot1 = pot_ana ( r1 , rho ) pot2 = - pot_ana ( r2 , rho ) pot12 = pot1 + pot2 potentials . append ( pot12 ) return potentials
Compute the potential superpositions of each current dipole in the configurations using the provided resistivity
360
20
12,715
def compute_voltages ( grid , configs_raw , potentials_raw ) : # we operate on 0-indexed arrays, config holds 1-indexed values # configs = configs_raw - 1 voltages = [ ] for config , potentials in zip ( configs_raw , potentials_raw ) : print ( 'config' , config ) e3_node = grid . get_electrode_node ( config [ 2 ] ) e4_node = grid . get_electrode_node ( config [ 3 ] ) print ( e3_node , e4_node ) print ( 'pot1' , potentials [ e3_node ] ) print ( 'pot2' , potentials [ e4_node ] ) voltage = potentials [ e3_node ] - potentials [ e4_node ] voltages . append ( voltage ) return voltages
Given a list of potential distribution and corresponding four - point spreads compute the voltages
191
16
12,716
def vcf_writer ( parser , keep , extract , args ) : # The output output = sys . stdout if args . output == "-" else open ( args . output , "w" ) try : # Getting the samples samples = np . array ( parser . get_samples ( ) , dtype = str ) k = _get_sample_select ( samples = samples , keep = keep ) # Writing the VCF header output . write ( _VCF_HEADER . format ( date = datetime . today ( ) . strftime ( "%Y%m%d" ) , version = __version__ , samples = "\t" . join ( samples [ k ] ) , ) ) # The data generator generator = _get_generator ( parser = parser , extract = extract , keep = k , check_maf = args . maf ) # The number of markers extracted nb_extracted = 0 for data in generator : # Keeping only the required genotypes genotypes = data . genotypes # Computing the alternative allele frequency af = np . nanmean ( genotypes ) / 2 print ( data . variant . chrom , data . variant . pos , data . variant . name , data . reference , data . coded , "." , "PASS" , "AF={}" . format ( af ) , "GT:DS" , sep = "\t" , end = "" , file = output ) for geno in genotypes : if np . isnan ( geno ) : output . write ( "\t./.:." ) else : rounded_geno = int ( round ( geno , 0 ) ) output . write ( "\t{}:{}" . format ( _VCF_GT_MAP [ rounded_geno ] , geno , ) ) output . write ( "\n" ) nb_extracted += 1 if nb_extracted == 0 : logger . warning ( "No markers matched the extract list" ) finally : output . close ( )
Writes the data in VCF format .
416
9
12,717
def csv_writer ( parser , keep , extract , args ) : # The output output = sys . stdout if args . output == "-" else open ( args . output , "w" ) try : # Getting the samples samples = np . array ( parser . get_samples ( ) , dtype = str ) k = _get_sample_select ( samples = samples , keep = keep ) # Writing the CSV header print ( "sample_id" , "variant_id" , "chromosome" , "position" , "reference" , "coded" , "dosage" , "hard_call" , sep = "," , file = output ) # The data generator generator = _get_generator ( parser = parser , extract = extract , keep = k , check_maf = args . maf ) # The number of markers extracted nb_extracted = 0 for data in generator : # Keeping only the required genotypes genotypes = data . genotypes # The hard call mapping hard_call_mapping = { 0 : "{ref}/{ref}" . format ( ref = data . reference ) , 1 : "{ref}/{alt}" . format ( ref = data . reference , alt = data . coded ) , 2 : "{alt}/{alt}" . format ( alt = data . coded ) , } for sample , geno in zip ( samples [ k ] , genotypes ) : # Is the genotype missing is_missing = np . isnan ( geno ) # Hard coding (NaN values are empty string) hard_coded = None if is_missing : geno = "" hard_coded = "" else : hard_coded = hard_call_mapping [ int ( round ( geno , 0 ) ) ] print ( sample , data . variant . name , data . variant . chrom , data . variant . pos , data . reference , data . coded , geno , hard_coded , sep = "," , file = output ) nb_extracted += 1 if nb_extracted == 0 : logger . warning ( "No markers matched the extract list" ) finally : output . close ( )
Writes the data in CSV format .
457
8
12,718
def _get_generator ( parser , extract , keep , check_maf ) : if extract is not None : parser = Extractor ( parser , names = extract ) for data in parser . iter_genotypes ( ) : data . genotypes = data . genotypes [ keep ] # Checking the MAF, if required if check_maf : data . code_minor ( ) yield data
Generates the data ( with extract markers and keep if required .
84
13
12,719
def bitterness ( self , ibu_method , early_og , batch_size ) : if ibu_method == "tinseth" : bitterness = 1.65 * math . pow ( 0.000125 , early_og - 1.0 ) * ( ( 1 - math . pow ( math . e , - 0.04 * self . time ) ) / 4.15 ) * ( ( self . alpha / 100.0 * self . amount * 1000000 ) / batch_size ) * self . utilization_factor ( ) elif ibu_method == "rager" : utilization = 18.11 + 13.86 * math . tanh ( ( self . time - 31.32 ) / 18.27 ) adjustment = max ( 0 , ( early_og - 1.050 ) / 0.2 ) bitterness = self . amount * 100 * utilization * self . utilization_factor ( ) * self . alpha / ( batch_size * ( 1 + adjustment ) ) else : raise Exception ( "Unknown IBU method %s!" % ibu_method ) return bitterness
Calculate bitterness based on chosen method
228
8
12,720
def _check_error ( response ) : if ( not response . ok ) or ( response . status_code != 200 ) : raise Exception ( response . json ( ) [ 'error' ] + ': ' + response . json ( ) [ 'error_description' ] )
Raises an exception if the Spark Cloud returned an error .
58
12
12,721
def _login ( self , username , password ) : data = { 'username' : username , 'password' : password , 'grant_type' : 'password' } r = self . spark_api . oauth . token . POST ( auth = ( 'spark' , 'spark' ) , data = data , timeout = self . timeout ) self . _check_error ( r ) return r . json ( ) [ 'access_token' ]
Proceed to login to the Spark Cloud and returns an access token .
98
14
12,722
def devices ( self ) : params = { 'access_token' : self . access_token } r = self . spark_api . GET ( params = params , timeout = self . timeout ) self . _check_error ( r ) json_list = r . json ( ) devices_dict = { } if json_list : # it is possible the keys in json responses varies from one device to another: compute the set of all keys allKeys = { 'functions' , 'variables' , 'api' , 'requires_deep_update' , 'status' } # added by device_info for device_json in json_list : allKeys . update ( device_json . keys ( ) ) Device = _BaseDevice . make_device_class ( self , allKeys , timeout = self . timeout ) for d in json_list : if d [ "connected" ] : info = self . _get_device_info ( d [ 'id' ] ) d [ 'functions' ] = info . get ( 'functions' ) d [ 'variables' ] = info . get ( 'variables' ) d [ 'api' ] = self . spark_api ( d [ 'id' ] ) d [ 'requires_deep_update' ] = d . get ( 'requires_deep_update' , False ) d [ 'status' ] = info . get ( 'status' ) # ensure the set of all keys is present in the dictionnary (Device constructor requires all keys present) [ d . setdefault ( key , None ) for key in allKeys ] devices_dict [ d [ 'name' ] ] = Device ( * * d ) return devices_dict
Create a dictionary of devices known to the user account .
360
11
12,723
def _get_device_info ( self , device_id ) : params = { 'access_token' : self . access_token } r = self . spark_api ( device_id ) . GET ( params = params , timeout = 30 ) self . _check_error ( r ) return r . json ( )
Queries the Spark Cloud for detailed information about a device .
68
12
12,724
def make_device_class ( spark_cloud , entries , timeout = 30 ) : attrs = list ( set ( list ( entries ) + [ 'requires_deep_update' , 'functions' , 'variables' , 'api' , 'status' ] ) ) return type ( 'Device' , ( _BaseDevice , namedtuple ( 'Device' , attrs ) ) , { '__slots__' : ( ) , 'spark_cloud' : spark_cloud , 'timeout' : timeout } )
Returns a dynamic Device class based on what a GET device list from the Spark Cloud returns . spark_cloud parameter should be the caller instance of SparkCloud . entries parameter should be the list of fields the Spark Cloud API is returning .
113
46
12,725
def report_metric ( metric_name : str , value : int , fail_silently : bool = True ) : if metricz is None : return configuration = Configuration ( ) try : lizzy_domain = urlparse ( configuration . lizzy_url ) . netloc lizzy_name , _ = lizzy_domain . split ( '.' , 1 ) except Exception : lizzy_name = 'UNKNOWN' tags = { 'version' : VERSION , 'lizzy' : lizzy_name } # noinspection PyBroadException try : writer = metricz . MetricWriter ( url = configuration . token_url , directory = configuration . credentials_dir , fail_silently = False ) writer . write_metric ( metric_name , value , tags , timeout = 10 ) except Exception : if not fail_silently : raise
Tries to report a metric ignoring all errors
180
9
12,726
def get_form_bound_field ( form , field_name ) : field = form . fields [ field_name ] field = field . get_bound_field ( form , field_name ) return field
Intends to get the bound field from the form regarding the field name
44
14
12,727
def read ( self , module_name ) : self . parser . read ( "{}/{}.ini" . format ( self . path , module_name . split ( "." ) [ - 1 ] ) )
Read a particular config file
45
5
12,728
def get_for_nearest_ancestor ( self , cls , attribute_name ) : for family_cls in family ( cls ) : if self . has ( family_cls . __module__ , family_cls . __name__ , attribute_name ) : return self . get ( family_cls . __module__ , family_cls . __name__ , attribute_name ) ini_filename = cls . __module__ . split ( "." ) [ - 1 ] raise exc . PriorException ( "The prior config at {}/{} does not contain {} in {} or any of its parents" . format ( self . path , ini_filename , attribute_name , cls . __name__ ) )
Find a prior with the attribute analysis_path from the config for this class or one of its ancestors
161
20
12,729
def fib ( number : int ) -> int : if number < 2 : return number return fib ( number - 1 ) + fib ( number - 2 )
Simple Fibonacci function .
31
6
12,730
def add_data ( self , data ) : subdata = np . atleast_2d ( data ) # we try to accommodate transposed input if subdata . shape [ 1 ] != self . grid . nr_of_nodes : if subdata . shape [ 0 ] == self . grid . nr_of_nodes : subdata = subdata . T else : raise Exception ( 'Number of values does not match the number of ' + 'nodes in the grid {0} grid nodes vs {1} data' . format ( self . grid . nr_of_nodes , subdata . shape , ) ) return_ids = [ ] for dataset in subdata : cid = self . _get_next_index ( ) self . nodevals [ cid ] = dataset . copy ( ) return_ids . append ( cid ) if len ( return_ids ) == 1 : return return_ids [ 0 ] else : return return_ids
Add data to the node value sets
208
7
12,731
def instance ( cls , * args , * * kwgs ) : if not hasattr ( cls , "_instance" ) : cls . _instance = cls ( * args , * * kwgs ) return cls . _instance
Will be the only instance
53
5
12,732
def configure_logger ( logger , filename , folder , log_level ) : fmt = logging . Formatter ( '%(asctime)s %(levelname)s: %(message)s' ) if folder is not None : log_file = os . path . join ( folder , filename ) hdl = logging . FileHandler ( log_file ) hdl . setFormatter ( fmt ) hdl . setLevel ( log_level ) logger . addHandler ( hdl ) shdl = logging . StreamHandler ( ) shdl . setLevel ( log_level ) shdl . setFormatter ( fmt ) logger . addHandler ( shdl ) logger . setLevel ( log_level )
Configure logging behvior for the simulations .
150
10
12,733
def _nargs ( f ) -> Optional [ int ] : if isinstance ( f , Function ) : return f . nargs spec = inspect . getfullargspec ( f ) if spec . varargs is not None : return None return len ( spec . args )
number of positional arguments values . Dynamically computed from the arguments attribute .
56
14
12,734
def _ndefs ( f ) : if isinstance ( f , Function ) : return f . ndefs spec = inspect . getfullargspec ( f ) if spec . defaults is None : return 0 return len ( spec . defaults )
number of any default values for positional or keyword parameters
51
10
12,735
def singledispatch ( * , nargs = None , nouts = None , ndefs = None ) : def wrapper ( f ) : return wraps ( f ) ( SingleDispatchFunction ( f , nargs = nargs , nouts = nouts , ndefs = ndefs ) ) return wrapper
singledispatch decorate of both functools . singledispatch and func
65
17
12,736
def multidispatch ( * , nargs = None , nouts = None ) : def wrapper ( f ) : return wraps ( f ) ( MultiDispatchFunction ( f , nargs = nargs , nouts = nouts ) ) return wrapper
multidispatch decorate of both functools . singledispatch and func
52
17
12,737
def flip ( f : Callable ) -> Function : nargs_ , nouts_ , ndefs_ = nargs ( f ) , nouts ( f ) , ndefs ( f ) return WrappedFunction ( lambda * args , * * kwargs : f ( args [ 1 ] , args [ 0 ] , * args [ 2 : ] , * * kwargs ) , nargs = nargs_ , nouts = nouts_ , ndefs = ndefs_ )
flip order of first two arguments to function .
106
10
12,738
def tagfunc ( nargs = None , ndefs = None , nouts = None ) : def wrapper ( f ) : return wraps ( f ) ( FunctionWithTag ( f , nargs = nargs , nouts = nouts , ndefs = ndefs ) ) return wrapper
decorate of tagged function
62
5
12,739
def fmap ( self , f : 'WrappedFunction' ) -> 'WrappedFunction' : if not isinstance ( f , WrappedFunction ) : f = WrappedFunction ( f ) return WrappedFunction ( lambda * args , * * kwargs : self ( f ( * args , * * kwargs ) ) , nargs = f . nargs , nouts = self . nouts )
function map for Wrapped Function . A forced transfermation to WrappedFunction would be applied . async def
87
22
12,740
def parse_atoms ( self ) : atom_site_header_tag = self . main_tag . getElementsByTagName ( "PDBx:atom_siteCategory" ) assert ( len ( atom_site_header_tag ) == 1 ) atom_site_header_tag = atom_site_header_tag [ 0 ] atom_site_tags = atom_site_header_tag . getElementsByTagName ( "PDBx:atom_site" ) residue_map = { } residues_read = { } int_type = types . IntType for t in atom_site_tags : r , seqres , ResidueAA , Residue3AA = PDBML_slow . parse_atom_site ( t , self . modified_residues ) if r : # skip certain ACE residues if not ( self . pdb_id in cases_with_ACE_residues_we_can_ignore and Residue3AA == 'ACE' ) : full_residue_id = str ( r ) if residues_read . get ( full_residue_id ) : assert ( residues_read [ full_residue_id ] == ( r . ResidueAA , seqres ) ) else : residues_read [ full_residue_id ] = ( r . ResidueAA , seqres ) residue_map [ r . Chain ] = residue_map . get ( r . Chain , { } ) assert ( type ( seqres ) == int_type ) residue_map [ r . Chain ] [ str ( r ) ] = seqres ## Create SequenceMap objects to map the ATOM Sequences to the SEQRES Sequences atom_to_seqres_sequence_maps = { } for chain_id , atom_seqres_mapping in residue_map . iteritems ( ) : atom_to_seqres_sequence_maps [ chain_id ] = SequenceMap . from_dict ( atom_seqres_mapping ) self . atom_to_seqres_sequence_maps = atom_to_seqres_sequence_maps
All ATOM lines are parsed even though only one per residue needs to be parsed . The reason for parsing all the lines is just to sanity - checks that the ATOMs within one residue are consistent with each other .
457
44
12,741
def parse_atom_site ( self , name , attributes ) : if name == "PDBx:pdbx_PDB_ins_code" : assert ( not ( self . current_atom_site . ATOMResidueiCodeIsNull ) ) if attributes . get ( 'xsi:nil' ) == 'true' : self . current_atom_site . ATOMResidueiCodeIsNull = True if name == "PDBx:auth_asym_id" : assert ( not ( self . current_atom_site . PDBChainIDIsNull ) ) if attributes . get ( 'xsi:nil' ) == 'true' : self . current_atom_site . PDBChainIDIsNull = True
Parse the atom tag attributes . Most atom tags do not have attributes .
162
15
12,742
def create_atom_data ( self ) : current_atom_site = self . current_atom_site # Only parse ATOM records if current_atom_site . IsHETATM : # Early out - do not parse HETATM records return None , None , None , None elif current_atom_site . IsATOM : return current_atom_site . convert_to_residue ( self . modified_residues ) else : raise Exception ( 'current_atom_site' )
The atom site work is split into two parts . This function type - converts the tags .
110
18
12,743
def import_source ( module , path , pass_errors = False ) : try : m = imp . load_source ( module , path ) return m except Exception as e : return None
Function imports a module given full path
39
7
12,744
def import_module ( module , pass_errors = False ) : frm = module . split ( '.' ) try : m = __import__ ( module , fromlist = [ frm [ 1 ] ] ) return m except ImportError as e : if pass_errors : return None else : print ( traceback . format_exc ( ) ) return None except Exception as e : print ( traceback . format_exc ( ) ) return None
Function imports a module given module name
93
7
12,745
def copytree ( src , dst , symlinks = False , ignore = None ) : if not os . path . exists ( dst ) : os . mkdir ( dst ) try : for item in os . listdir ( src ) : s = os . path . join ( src , item ) d = os . path . join ( dst , item ) if os . path . isdir ( s ) : shutil . copytree ( s , d , symlinks , ignore ) else : shutil . copy2 ( s , d ) except Exception as e : raise FolderExistsError ( "Folder already exists in %s" % dst )
Function recursively copies from directory to directory .
133
10
12,746
def empty ( key , dict ) : if key in dict . keys ( ) : if dict [ key ] : return False return True
Function determines if the dict key exists or it is empty
27
11
12,747
def lookup ( self , path , must_be_leaf = False ) : assert ( type ( path ) == type ( self . name ) ) d = self . color_scheme tokens = path . split ( '.' ) for t in tokens [ : - 1 ] : d = d . get ( t ) if d == None : raise Exception ( "Path '%s' not found." ) if must_be_leaf : assert ( type ( d [ tokens [ - 1 ] ] ) == type ( self . name ) ) return d [ tokens [ - 1 ] ]
Looks up a part of the color scheme . If used for looking up colors must_be_leaf should be True .
120
24
12,748
def resolve_pid ( fetched_pid ) : return PersistentIdentifier . get ( pid_type = fetched_pid . pid_type , pid_value = fetched_pid . pid_value , pid_provider = fetched_pid . provider . pid_provider )
Retrieve the real PID given a fetched PID .
62
11
12,749
def ordered ( self , ord = 'desc' ) : if ord not in ( 'asc' , 'desc' , ) : raise ord_f = getattr ( PIDRelation . index , ord ) ( ) return self . order_by ( ord_f )
Order the query result on the relations indexes .
56
9
12,750
def status ( self , status_in ) : if isinstance ( status_in , PIDStatus ) : status_in = [ status_in , ] return self . filter ( self . _filtered_pid_class . status . in_ ( status_in ) )
Filter the PIDs based on their status .
57
9
12,751
def _resolved_pid ( self ) : if not isinstance ( self . pid , PersistentIdentifier ) : return resolve_pid ( self . pid ) return self . pid
Resolve self . pid if it is a fetched pid .
38
13
12,752
def _get_child_relation ( self , child_pid ) : return PIDRelation . query . filter_by ( parent = self . _resolved_pid , child = child_pid , relation_type = self . relation_type . id ) . one ( )
Retrieve the relation between this node and a child PID .
58
12
12,753
def _check_child_limits ( self , child_pid ) : if self . max_children is not None and self . children . count ( ) >= self . max_children : raise PIDRelationConsistencyError ( "Max number of children is set to {}." . format ( self . max_children ) ) if self . max_parents is not None and PIDRelation . query . filter_by ( child = child_pid , relation_type = self . relation_type . id ) . count ( ) >= self . max_parents : raise PIDRelationConsistencyError ( "This pid already has the maximum number of parents." )
Check that inserting a child is within the limits .
138
10
12,754
def _connected_pids ( self , from_parent = True ) : to_pid = aliased ( PersistentIdentifier , name = 'to_pid' ) if from_parent : to_relation = PIDRelation . child_id from_relation = PIDRelation . parent_id else : to_relation = PIDRelation . parent_id from_relation = PIDRelation . child_id query = PIDQuery ( [ to_pid ] , db . session ( ) , _filtered_pid_class = to_pid ) . join ( PIDRelation , to_pid . id == to_relation ) # accept both PersistentIdentifier models and fake PIDs with just # pid_value, pid_type as they are fetched with the PID fetcher. if isinstance ( self . pid , PersistentIdentifier ) : query = query . filter ( from_relation == self . pid . id ) else : from_pid = aliased ( PersistentIdentifier , name = 'from_pid' ) query = query . join ( from_pid , from_pid . id == from_relation ) . filter ( from_pid . pid_value == self . pid . pid_value , from_pid . pid_type == self . pid . pid_type , ) return query
Follow a relationship to find connected PIDs . abs .
276
11
12,755
def insert_child ( self , child_pid ) : self . _check_child_limits ( child_pid ) try : # TODO: Here add the check for the max parents and the max children with db . session . begin_nested ( ) : if not isinstance ( child_pid , PersistentIdentifier ) : child_pid = resolve_pid ( child_pid ) return PIDRelation . create ( self . _resolved_pid , child_pid , self . relation_type . id , None ) except IntegrityError : raise PIDRelationConsistencyError ( "PID Relation already exists." )
Add the given PID to the list of children PIDs .
133
12
12,756
def index ( self , child_pid ) : if not isinstance ( child_pid , PersistentIdentifier ) : child_pid = resolve_pid ( child_pid ) relation = PIDRelation . query . filter_by ( parent = self . _resolved_pid , child = child_pid , relation_type = self . relation_type . id ) . one ( ) return relation . index
Index of the child in the relation .
85
8
12,757
def is_last_child ( self , child_pid ) : last_child = self . last_child if last_child is None : return False return last_child == child_pid
Determine if pid is the latest version of a resource .
40
13
12,758
def last_child ( self ) : return self . children . filter ( PIDRelation . index . isnot ( None ) ) . ordered ( ) . first ( )
Get the latest PID as pointed by the Head PID .
35
11
12,759
def next_child ( self , child_pid ) : relation = self . _get_child_relation ( child_pid ) if relation . index is not None : return self . children . filter ( PIDRelation . index > relation . index ) . ordered ( ord = 'asc' ) . first ( ) else : return None
Get the next child PID in the PID relation .
69
10
12,760
def insert_child ( self , child_pid , index = - 1 ) : self . _check_child_limits ( child_pid ) if index is None : index = - 1 try : with db . session . begin_nested ( ) : if not isinstance ( child_pid , PersistentIdentifier ) : child_pid = resolve_pid ( child_pid ) child_relations = self . _resolved_pid . child_relations . filter ( PIDRelation . relation_type == self . relation_type . id ) . order_by ( PIDRelation . index ) . all ( ) relation_obj = PIDRelation . create ( self . _resolved_pid , child_pid , self . relation_type . id , None ) if index == - 1 : child_relations . append ( relation_obj ) else : child_relations . insert ( index , relation_obj ) for idx , c in enumerate ( child_relations ) : c . index = idx except IntegrityError : raise PIDRelationConsistencyError ( "PID Relation already exists." )
Insert a new child into a PID concept .
233
9
12,761
def asset ( url = None ) : # fallback to url_for('static') if assets path not configured url = url . lstrip ( '/' ) assets_path = app . config . get ( 'ASSETS_PATH' ) if not assets_path : url_for = app . jinja_env . globals . get ( 'url_for' ) url = url_for ( 'static' , filename = url ) else : assets_path = assets_path . rstrip ( '/' ) url = assets_path + '/' + url version = app . config . get ( 'ASSETS_VERSION' ) if not version : return url sign = '?' if sign in url : sign = '&' pattern = '{url}{sign}v{version}' return pattern . format ( url = url , sign = sign , version = version )
Asset helper Generates path to a static asset based on configuration base path and support for versioning . Will easily allow you to move your assets away to a CDN without changing templates . Versioning allows you to cache your asset changes forever by the webserver .
184
52
12,762
def pick_a_model_randomly ( models : List [ Any ] ) -> Any : try : return random . choice ( models ) except IndexError as e : raise ModelPickerException ( cause = e )
Naive picking function return one of the models chosen randomly .
45
12
12,763
def link ( origin = None , rel = None , value = None , attributes = None , source = None ) : attributes = attributes or { } #rel = I(iri.absolutize(rel, ctx.base)) def _link ( ctx ) : if source : if not callable ( source ) : raise ValueError ( 'Link source must be a pattern action function' ) contexts = source ( ctx ) for ctx in contexts : ctx . output_model . add ( ctx . current_link [ ORIGIN ] , ctx . current_link [ RELATIONSHIP ] , ctx . current_link [ TARGET ] , attributes ) return ( o , r , v , a ) = ctx . current_link _origin = origin ( ctx ) if callable ( origin ) else origin o_list = [ o ] if _origin is None else ( _origin if isinstance ( _origin , list ) else [ _origin ] ) #_origin = _origin if isinstance(_origin, set) else set([_origin]) _rel = rel ( ctx ) if callable ( rel ) else rel r_list = [ r ] if _rel is None else ( _rel if isinstance ( _rel , list ) else [ _rel ] ) #_rel = _rel if isinstance(_rel, set) else set([_rel]) _value = value ( ctx ) if callable ( value ) else value v_list = [ v ] if _value is None else ( _value if isinstance ( _value , list ) else [ _value ] ) #_target = _target if isinstance(_target, set) else set([_target]) _attributes = attributes ( ctx ) if callable ( attributes ) else attributes #(ctx_o, ctx_r, ctx_t, ctx_a) = ctx.current_link #FIXME: Add test for IRI output via wrapper action function for ( o , r , v , a ) in [ ( o , r , v , a ) for o in o_list for r in r_list for v in v_list ] : ctx . output_model . add ( o , r , v , attributes ) return return _link
Action function generator to create a link based on the context s current link or on provided parameters
480
18
12,764
def attr ( aid ) : def _attr ( ctx ) : return ctx . current_link [ ATTRIBUTES ] . get ( aid ) return _attr
Action function generator to retrieve an attribute from the current link
37
11
12,765
def values ( * rels ) : #Action function generator to multiplex a relationship at processing time def _values ( ctx ) : ''' Versa action function Utility to specify a list of relationships :param ctx: Versa context used in processing (e.g. includes the prototype link :return: Tuple of key/value tuples from the attributes; suitable for hashing ''' computed_rels = [ rel ( ctx ) if callable ( rel ) else rel for rel in rels ] return computed_rels return _values
Action function generator to compute a set of relationships from criteria
114
11
12,766
def foreach ( origin = None , rel = None , target = None , attributes = None ) : def _foreach ( ctx ) : ''' Versa action function utility to compute a list of values from a list of expressions :param ctx: Versa context used in processing (e.g. includes the prototype link) ''' _origin = origin ( ctx ) if callable ( origin ) else origin _rel = rel ( ctx ) if callable ( rel ) else rel _target = target ( ctx ) if callable ( target ) else target _attributes = attributes ( ctx ) if callable ( attributes ) else attributes ( o , r , t , a ) = ctx . current_link o = [ o ] if _origin is None else ( _origin if isinstance ( _origin , list ) else [ _origin ] ) r = [ r ] if _rel is None else ( _rel if isinstance ( _rel , list ) else [ _rel ] ) t = [ t ] if _target is None else ( _target if isinstance ( _target , list ) else [ _target ] ) #a = [a] if _attributes is None else _attributes a = [ a ] if _attributes is None else ( _attributes if isinstance ( _attributes , list ) else [ _attributes ] ) #print([(curr_o, curr_r, curr_t, curr_a) for (curr_o, curr_r, curr_t, curr_a) # in product(o, r, t, a)]) return [ ctx . copy ( current_link = ( curr_o , curr_r , curr_t , curr_a ) ) for ( curr_o , curr_r , curr_t , curr_a ) in itertools . product ( o , r , t , a ) ] #for (curr_o, curr_r, curr_t, curr_a) in product(origin or [o], rel or [r], target or [t], attributes or [a]): # newctx = ctx.copy(current_link=(curr_o, curr_r, curr_t, curr_a)) #ctx.output_model.add(I(objid), VTYPE_REL, I(iri.absolutize(_typ, ctx.base)), {}) return _foreach
Action function generator to compute a combination of links
539
9
12,767
def res ( arg ) : def _res ( ctx ) : _arg = arg ( ctx ) if callable ( arg ) else arg return I ( arg ) return _res
Convert the argument into an IRI ref
38
9
12,768
def static_singleton ( * args , * * kwargs ) : def __static_singleton_wrapper ( cls ) : if cls not in __singleton_instances : __singleton_instances [ cls ] = cls ( * args , * * kwargs ) return __singleton_instances [ cls ] return __static_singleton_wrapper
STATIC Singleton Design Pattern Decorator Class is initialized with arguments passed into the decorator .
83
20
12,769
def get_method_documentation ( method ) : from inspect import getargspec result = { 'name' : method . __name__ , 'friendly_name' : ' ' . join ( [ name . capitalize ( ) for name in method . __name__ . split ( '_' ) ] ) , } arg_specs = getargspec ( method ) arguments = { } if not arg_specs . defaults : if len ( arg_specs . args [ 1 : ] ) > 0 : arguments [ 'required' ] = list ( arg_specs . args [ 1 : ] ) else : if len ( arg_specs . args [ 1 : - ( len ( arg_specs . defaults ) ) ] ) : arguments [ 'required' ] = list ( arg_specs . args [ 1 : - ( len ( arg_specs . defaults ) ) ] ) arguments [ 'optional' ] = { } for i in range ( len ( arg_specs . defaults ) ) : arguments [ 'optional' ] [ arg_specs . args [ - ( len ( arg_specs . defaults ) ) + i ] ] = arg_specs . defaults [ i ] if arguments != { } : result [ 'parameters' ] = arguments doc = method . __doc__ . strip ( ) if method . __doc__ else '' if ':' in method . __doc__ : doc = { 'summary' : method . __doc__ [ 0 : doc . find ( ' :' ) ] . strip ( ) } params = re . findall ( r":param ([^\s]*): (.*)\n" , method . __doc__ ) if len ( params ) > 0 : doc [ 'parameters' ] = { } for param in params : doc [ 'parameters' ] [ param [ 0 ] ] = param [ 1 ] . strip ( ) regex = re . compile ( r":returns:(.*)" , re . MULTILINE | re . DOTALL ) returns = regex . search ( method . __doc__ ) if returns and returns . group ( 0 ) : doc [ 'return' ] = returns . group ( 0 ) . replace ( ':returns:' , '' ) . replace ( '\n ' , '\n' ) . strip ( ) if doc != '' : result [ 'help' ] = doc return result
This function uses inspect to retrieve information about a method .
504
11
12,770
def sort_dictionary_list ( dict_list , sort_key ) : if not dict_list or len ( dict_list ) == 0 : return dict_list dict_list . sort ( key = itemgetter ( sort_key ) ) return dict_list
sorts a list of dictionaries based on the value of the sort_key
57
16
12,771
def safe_info ( self , dic = None ) : if dic is None and dic != { } : dic = self . to_dict ( ) output = { } for ( key , value ) in dic . items ( ) : if key [ 0 ] != '_' : if isinstance ( value , SerializableObject ) : output [ key ] = value . safe_info ( ) elif isinstance ( value , dict ) : output [ key ] = self . safe_info ( dic = value ) elif isinstance ( value , list ) : output [ key ] = [ ] for f in value : if isinstance ( f , SerializableObject ) : output [ key ] . append ( f . safe_info ( ) ) elif isinstance ( f , dict ) : output [ key ] . append ( self . safe_info ( dic = f ) ) else : output [ key ] . append ( f ) else : output [ key ] = value return output
Returns public information of the object
210
6
12,772
def run ( host = '0.0.0.0' , port = 5000 , reload = True , debug = True ) : from werkzeug . serving import run_simple app = bootstrap . get_app ( ) return run_simple ( hostname = host , port = port , application = app , use_reloader = reload , use_debugger = debug , )
Run development server
82
3
12,773
def shell ( ) : app = bootstrap . get_app ( ) context = dict ( app = app ) # and push app context app_context = app . app_context ( ) app_context . push ( ) # got ipython? ipython = importlib . util . find_spec ( "IPython" ) # run now if ipython : from IPython import embed embed ( user_ns = context ) else : import code code . interact ( local = context )
Start application - aware shell
100
5
12,774
def _push ( self , title , view , class_name , is_class , * * kwargs ) : # Set the page title set_view_attr ( view , "title" , title , cls_name = class_name ) module_name = view . __module__ method_name = view . __name__ _endpoint = build_endpoint_route_name ( view , "index" if is_class else method_name , class_name ) endpoint = kwargs . pop ( "endpoint" , _endpoint ) kwargs . setdefault ( "endpoint_kwargs" , { } ) order = kwargs . pop ( "order" , 0 ) # Tags _nav_tags = get_view_attr ( view , "nav_tags" , [ "default" ] , cls_name = class_name ) tags = kwargs . pop ( "tags" , _nav_tags ) if not isinstance ( tags , list ) : _ = tags tags = [ _ ] kwargs [ "tags" ] = tags # visible: accepts a bool or list of callback to execute visible = kwargs . pop ( "visible" , [ True ] ) if not isinstance ( visible , list ) : visible = [ visible ] if get_view_attr ( view , "nav_visible" , cls_name = class_name ) is False : visible = False kwargs [ "view" ] = view kwargs [ "visible" ] = visible kwargs [ "active" ] = False kwargs [ "key" ] = class_name if is_class : # class menu kwargs [ "endpoint" ] = endpoint kwargs [ "has_subnav" ] = True else : kwargs [ "has_subnav" ] = False kwargs . update ( { "order" : order , "has_subnav" : False , "title" : title , "endpoint" : endpoint , } ) self . _title_map [ endpoint ] = title path = "%s.%s" % ( module_name , method_name if is_class else class_name ) attach_to = kwargs . pop ( "attach_to" , [ ] ) if not attach_to : attach_to . append ( path ) for path in attach_to : if path not in self . MENU : self . MENU [ path ] = { "title" : None , "endpoint" : None , "endpoint_kwargs" : { } , "order" : None , "subnav" : [ ] , "kwargs" : { } } if is_class : # class menu self . MENU [ path ] [ "title" ] = title self . MENU [ path ] [ "order" ] = order self . MENU [ path ] [ "kwargs" ] = kwargs else : # sub menu self . MENU [ path ] [ "subnav" ] . append ( kwargs )
Push nav data stack
650
4
12,775
def render ( self ) : menu_list = [ ] menu_index = 0 for _ , menu in copy . deepcopy ( self . MENU ) . items ( ) : subnav = [ ] menu [ "kwargs" ] [ "_id" ] = str ( menu_index ) menu [ "kwargs" ] [ "active" ] = False if "visible" in menu [ "kwargs" ] : menu [ "kwargs" ] [ "visible" ] = self . _test_visibility ( menu [ "kwargs" ] [ "visible" ] ) for s in menu [ "subnav" ] : if s [ "title" ] : s [ "title" ] = self . _get_title ( s [ "title" ] ) if s [ "endpoint" ] == request . endpoint : s [ "active" ] = True menu [ "kwargs" ] [ "active" ] = True s [ "visible" ] = self . _test_visibility ( s [ "visible" ] ) menu_index += 1 s [ "_id" ] = str ( menu_index ) subnav . append ( s ) _kwargs = menu [ "kwargs" ] if menu [ "title" ] : _kwargs . update ( { "subnav" : self . _sort ( subnav ) , "order" : menu [ "order" ] , "title" : self . _get_title ( menu [ "title" ] ) } ) menu_list . append ( _kwargs ) else : menu_list += subnav menu_index += 1 return self . _sort ( menu_list )
Render the menu into a sorted by order multi dict
348
10
12,776
def add_qtl_to_marker ( marker , qtls ) : cnt = 0 for qtl in qtls : if qtl [ - 1 ] == marker [ 0 ] : cnt = cnt + 1 marker . append ( str ( cnt ) ) return marker
Add the number of QTLs found for a given marker .
62
13
12,777
def add_qtl_to_map ( qtlfile , mapfile , outputfile = 'map_with_qtls.csv' ) : qtl_list = read_input_file ( qtlfile , ',' ) map_list = read_input_file ( mapfile , ',' ) map_list [ 0 ] . append ( '# QTLs' ) markers = [ ] markers . append ( map_list [ 0 ] ) qtl_cnt = 0 for marker in map_list [ 1 : ] : markers . append ( add_qtl_to_marker ( marker , qtl_list [ 1 : ] ) ) qtl_cnt = qtl_cnt + int ( markers [ - 1 ] [ - 1 ] ) LOG . info ( '- %s markers processed in %s' % ( len ( markers ) , mapfile ) ) LOG . info ( '- %s QTLs located in the map: %s' % ( qtl_cnt , outputfile ) ) write_matrix ( outputfile , markers )
This function adds to a genetic map for each marker the number of significant QTLs found .
233
19
12,778
def send_command ( self , data , read_delay = None ) : self . _write ( data ) if read_delay : time . sleep ( read_delay ) return self . _read ( )
Write data to the port and return the response form it
43
11
12,779
def serialize_relations ( pid ) : data = { } relations = PIDRelation . get_child_relations ( pid ) . all ( ) for relation in relations : rel_cfg = resolve_relation_type_config ( relation . relation_type ) dump_relation ( rel_cfg . api ( relation . parent ) , rel_cfg , pid , data ) parent_relations = PIDRelation . get_parent_relations ( pid ) . all ( ) rel_cfgs = set ( [ resolve_relation_type_config ( p ) for p in parent_relations ] ) for rel_cfg in rel_cfgs : dump_relation ( rel_cfg . api ( pid ) , rel_cfg , pid , data ) return data
Serialize the relations for given PID .
156
8
12,780
def dump_relation ( api , rel_cfg , pid , data ) : schema_class = rel_cfg . schema if schema_class is not None : schema = schema_class ( ) schema . context [ 'pid' ] = pid result , errors = schema . dump ( api ) data . setdefault ( rel_cfg . name , [ ] ) . append ( result )
Dump a specific relation to a data dict .
79
10
12,781
def add_item ( self , url , title = None , selection = None , jsonp = None , redirect = None , response_info = False ) : parameters = { 'username' : self . user , 'password' : self . password , 'url' : url , } # look for optional parameters title and selection if title is not None : parameters [ 'title' ] = title else : parameters [ 'auto-title' ] = 1 if selection is not None : parameters [ 'selection' ] = selection if redirect is not None : parameters [ 'redirect' ] = redirect if jsonp is not None : parameters [ 'jsonp' ] = jsonp # make query with the chosen parameters status , headers = self . _query ( self . addurl , parameters ) # return the callback call if we want jsonp if jsonp is not None : return status statustxt = self . add_status_codes [ int ( status ) ] # if response headers are desired, return them also if response_info : return ( int ( status ) , statustxt , headers [ 'title' ] , headers [ 'location' ] ) else : return ( int ( status ) , statustxt )
Method to add a new item to a instapaper account
251
12
12,782
def _query ( self , url = None , params = "" ) : if url is None : raise NoUrlError ( "No URL was provided." ) # return values headers = { 'location' : None , 'title' : None } headerdata = urllib . urlencode ( params ) try : request = urllib2 . Request ( url , headerdata ) response = urllib2 . urlopen ( request ) # return numeric HTTP status code unless JSONP was requested if 'jsonp' in params : status = response . read ( ) else : status = response . getcode ( ) info = response . info ( ) try : headers [ 'location' ] = info [ 'Content-Location' ] except KeyError : pass try : headers [ 'title' ] = info [ 'X-Instapaper-Title' ] except KeyError : pass return ( status , headers ) except urllib2 . HTTPError as exception : # handle API not returning JSONP response on 403 if 'jsonp' in params : return ( '%s({"status":%d})' % ( params [ 'jsonp' ] , exception . code ) , headers ) else : return ( exception . code , headers ) except IOError as exception : return ( exception . code , headers )
method to query a URL with the given parameters
271
9
12,783
def cors ( * args , * * kwargs ) : def decorator ( fn ) : cors_fn = flask_cors . cross_origin ( automatic_options = False , * args , * * kwargs ) if inspect . isclass ( fn ) : apply_function_to_members ( fn , cors_fn ) else : return cors_fn ( fn ) return fn return decorator
A wrapper around flask - cors cross_origin to also act on classes
89
15
12,784
def get_residue_mapping ( self ) : if len ( self . sequence_ids ) == 2 : if not self . alignment_output : self . align ( ) assert ( self . alignment_output ) return self . _create_residue_map ( self . _get_alignment_lines ( ) , self . sequence_ids [ 1 ] , self . sequence_ids [ 2 ] ) else : return None
Returns a mapping between the sequences ONLY IF there are exactly two . This restriction makes the code much simpler .
92
21
12,785
def realign ( self , cut_off , chains_to_skip = set ( ) ) : if cut_off != self . cut_off : self . cut_off = cut_off # Wipe any existing information for chains not in chains_to_skip for c in self . chains : if c not in chains_to_skip : self . clustal_matches [ c ] = None self . substring_matches [ c ] = None if self . alignment . get ( c ) : del self . alignment [ c ] if self . seqres_to_uniparc_sequence_maps . get ( c ) : del self . seqres_to_uniparc_sequence_maps [ c ] # Run alignment for the remaining chains self . _align_with_clustal ( chains_to_skip = chains_to_skip ) self . _align_with_substrings ( chains_to_skip = chains_to_skip ) self . _check_alignments ( chains_to_skip = chains_to_skip ) self . _get_residue_mapping ( chains_to_skip = chains_to_skip )
Alter the cut - off and run alignment again . This is much quicker than creating a new PDBUniParcSequenceAligner object as the UniParcEntry creation etc . in the constructor does not need to be repeated .
249
50
12,786
def _determine_representative_chains ( self ) : # todo: This logic should be moved into the FASTA class or a more general module (maybe a fast exists which uses a C/C++ library?) but at present it is easier to write here since we do not need to worry about other PDB IDs. equivalence_fiber = { } matched_chains = set ( ) for chain_id , equivalent_chains in self . identical_sequences . iteritems ( ) : matched_chains . add ( chain_id ) equivalent_chain_ids = set ( ) for equivalent_chain in equivalent_chains : assert ( len ( equivalent_chain ) == 6 ) assert ( ( equivalent_chain [ : 5 ] == '%s_' % self . pdb_id ) or ( equivalent_chain [ : 5 ] == '%s:' % self . pdb_id ) ) # ClustalW changes e.g. 1KI1:A to 1KI1_A in its output equivalent_chain_ids . add ( equivalent_chain [ 5 ] ) found = False for equivalent_chain_id in equivalent_chain_ids : if equivalence_fiber . get ( equivalent_chain_id ) : found = True assert ( equivalence_fiber [ equivalent_chain_id ] == equivalent_chain_ids . union ( set ( [ chain_id ] ) ) ) break if not found : equivalence_fiber [ chain_id ] = set ( equivalent_chain_ids ) equivalence_fiber [ chain_id ] . add ( chain_id ) for c in self . chains : if c not in matched_chains : equivalence_fiber [ c ] = set ( [ c ] ) self . equivalence_fiber = equivalence_fiber self . representative_chains = equivalence_fiber . keys ( )
Quotient the chains to get equivalence classes of chains . These will be used for the actual mapping .
398
22
12,787
def _get_uniparc_sequences_through_uniprot_ACs ( self , mapping_pdb_id , uniprot_ACs , cache_dir ) : # Map the UniProt ACs to the UniParc IDs m = uniprot_map ( 'ACC' , 'UPARC' , uniprot_ACs , cache_dir = cache_dir ) UniParcIDs = [ ] for _ , v in m . iteritems ( ) : UniParcIDs . extend ( v ) # Create a mapping from the mapping_pdb_id to the UniParcEntry objects. This must match the return type from pdb_to_uniparc. mapping = { mapping_pdb_id : [ ] } for UniParcID in UniParcIDs : entry = UniParcEntry ( UniParcID , cache_dir = cache_dir ) mapping [ mapping_pdb_id ] . append ( entry ) return mapping
Get the UniParc sequences associated with the UniProt accession number .
210
15
12,788
def _get_residue_mapping ( self , chains_to_skip = set ( ) ) : for c in self . representative_chains : # Skip specified chains if c not in chains_to_skip : if self . alignment . get ( c ) : uniparc_entry = self . get_uniparc_object ( c ) sa = SequenceAligner ( ) sa . add_sequence ( c , self . fasta [ c ] ) sa . add_sequence ( uniparc_entry . UniParcID , uniparc_entry . sequence ) sa . align ( ) residue_mapping , residue_match_mapping = sa . get_residue_mapping ( ) # Create a SequenceMap s = PDBUniParcSequenceMap ( ) assert ( sorted ( residue_mapping . keys ( ) ) == sorted ( residue_match_mapping . keys ( ) ) ) for k , v in residue_mapping . iteritems ( ) : s . add ( k , ( uniparc_entry . UniParcID , v ) , residue_match_mapping [ k ] ) self . seqres_to_uniparc_sequence_maps [ c ] = s else : self . seqres_to_uniparc_sequence_maps [ c ] = PDBUniParcSequenceMap ( ) # Use the representatives' alignments for their respective equivalent classes. This saves memory as the same SequenceMap is used. for c_1 , related_chains in self . equivalence_fiber . iteritems ( ) : for c_2 in related_chains : if self . seqres_to_uniparc_sequence_maps . get ( c_1 ) : self . seqres_to_uniparc_sequence_maps [ c_2 ] = self . seqres_to_uniparc_sequence_maps [ c_1 ]
Creates a mapping between the residues of the chains and the associated UniParc entries .
412
18
12,789
def get_corresponding_chains ( self , from_pdb_id , from_chain_id , to_pdb_id ) : chains = self . chain_map . get ( from_pdb_id , { } ) . get ( from_chain_id , { } ) . get ( to_pdb_id , [ ] ) return sorted ( chains )
Should be called after get_mutations .
82
9
12,790
def get_mapping_from_db3_file ( db_path ) : import sqlite3 # should be moved to the top but we do this here for CentOS 5 support conn = sqlite3 . connect ( db_path ) results = conn . cursor ( ) . execute ( ''' SELECT chain_id, pdb_residue_number, insertion_code, residues.struct_id, residues.resNum, residues.name3, residues.res_type FROM residue_pdb_identification INNER JOIN residues ON residue_pdb_identification.struct_id=residues.struct_id AND residue_pdb_identification.residue_number=residues.resNum ''' ) # Create the mapping from PDB residues to Rosetta residues rosetta_residue_ids = [ ] mapping = { } for r in results : mapping [ "%s%s%s" % ( r [ 0 ] , str ( r [ 1 ] ) . rjust ( 4 ) , r [ 2 ] ) ] = { 'pose_residue_id' : r [ 4 ] , 'name3' : r [ 5 ] , 'res_type' : r [ 6 ] } rosetta_residue_ids . append ( r [ 4 ] ) # Ensure that the the range of the map is exactly the set of Rosetta residues i.e. the map from (a subset of) the PDB residues to the Rosetta residues is surjective raw_residue_list = [ r for r in conn . cursor ( ) . execute ( '''SELECT resNum, name3 FROM residues ORDER BY resNum''' ) ] assert ( sorted ( [ r [ 0 ] for r in raw_residue_list ] ) == sorted ( rosetta_residue_ids ) ) return mapping
Does the work of reading the Rosetta SQLite3 . db3 file to retrieve the mapping
400
19
12,791
def add_company_quarter ( self , company_name , quarter_name , dt , calendar_id = 'notices' ) : assert ( calendar_id in self . configured_calendar_ids . keys ( ) ) calendarId = self . configured_calendar_ids [ calendar_id ] quarter_name = quarter_name . title ( ) quarter_numbers = { 'Spring' : 1 , 'Summer' : 2 , 'Fall' : 3 , 'Winter' : 4 } assert ( quarter_name in quarter_numbers . keys ( ) ) start_time = datetime ( year = dt . year , month = dt . month , day = dt . day , hour = 0 , minute = 0 , second = 0 , tzinfo = self . timezone ) + timedelta ( days = - 1 ) end_time = start_time + timedelta ( days = 3 , seconds = - 1 ) summary = '%s %s Quarter begins' % ( company_name , quarter_name ) # Do not add the quarter multiple times events = self . get_events ( start_time . isoformat ( ) , end_time . isoformat ( ) , ignore_cancelled = True ) for event in events : if event . summary . find ( summary ) != - 1 : return False event_body = { 'summary' : summary , 'description' : summary , 'start' : { 'date' : dt . isoformat ( ) , 'timeZone' : self . timezone_string } , 'end' : { 'date' : dt . isoformat ( ) , 'timeZone' : self . timezone_string } , 'status' : 'confirmed' , 'gadget' : { 'display' : 'icon' , 'iconLink' : 'https://guybrush.ucsf.edu/images/Q%d_32.png' % quarter_numbers [ quarter_name ] , 'title' : summary , } , 'extendedProperties' : { 'shared' : { 'event_type' : '%s quarter' % company_name , 'quarter_name' : quarter_name } } } colortext . warning ( '\n%s\n' % pprint . pformat ( event_body ) ) created_event = self . service . events ( ) . insert ( calendarId = self . configured_calendar_ids [ calendar_id ] , body = event_body ) . execute ( ) return True
Adds a company_name quarter event to the calendar . dt should be a date object . Returns True if the event was added .
536
27
12,792
def create_space ( self ) : cur = self . _conn . cursor ( ) cur . executescript ( SQL_MODEL ) self . _conn . commit ( ) cur . close ( ) return
Set up a new table space for the first time
42
10
12,793
def drop_space ( self ) : cur = self . _conn . cursor ( ) cur . executescript ( DROP_SQL_MODEL ) self . _conn . commit ( ) cur . close ( ) return
Dismantle an existing table space
45
7
12,794
def eval ( self , text ) : program = Program ( text , echo = self . echo , transforms = self . transforms ) tokens = program . gen_tokens ( ) for sentence in program . gen_sentences ( tokens , self . aliases ) : if self . echo : self . terminal . debug ( str ( sentence ) ) program . interpret ( sentence , self . commands )
Respond to text entered by the user .
80
9
12,795
def interact ( self ) : lines = "" for line in self . read ( ) : lines += line try : self . eval ( lines ) except ValueError : pass except KeyboardInterrupt as e : raise e except : self . terminal . error ( traceback . format_exc ( ) ) break else : break
Get a command from the user and respond to it .
64
11
12,796
def serve_forever ( self , banner = None ) : if banner : print ( banner ) while True : try : self . interact ( ) except KeyboardInterrupt : # program interrupted by the user print # do not print on the same line as ^C pass except SystemExit : # exit from the interpreter break
Handle one interaction at a time until shutdown .
64
9
12,797
def process_results ( output_dir , config ) : print ( '\nanalyzing results...\n' ) res = output_results ( output_dir , config ) if res : print ( 'created: %s/results.html\n' % output_dir ) else : print ( 'results cannot be processed' )
Process results and output them
70
5
12,798
def copy_config ( project_path , output_dir ) : project_config = os . path . join ( project_path , 'config.json' ) saved_config = os . path . join ( output_dir , 'config.json' ) shutil . copy ( project_config , saved_config )
Copy current config file to output directory
67
7
12,799
def start_hq ( output_dir , config , topic , is_master = True , * * kwargs ) : HightQuarter = get_hq_class ( config . get ( 'hq_class' ) ) hq = HightQuarter ( output_dir , config , topic , * * kwargs ) hq . setup ( ) if is_master : hq . wait_turrets ( config . get ( "min_turrets" , 1 ) ) hq . run ( ) hq . tear_down ( )
Start a HQ
120
3