idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
6,200
def _validate_config ( config ) : if not isinstance ( config , list ) : raise TypeError ( 'Config must be a list' ) for config_dict in config : if not isinstance ( config_dict , dict ) : raise TypeError ( 'Config must be a list of dictionaries' ) label = config_dict . keys ( ) [ 0 ] cfg = config_dict [ label ] if not i...
Validate that the provided configurtion is valid .
6,201
def _parse_configs ( self , config ) : for config_dict in config : label = config_dict . keys ( ) [ 0 ] cfg = config_dict [ label ] dbpath = cfg [ 'dbpath' ] pattern = self . _parse_dbpath ( dbpath ) read_preference = cfg . get ( 'read_preference' , 'primary' ) . upper ( ) read_preference = self . _get_read_preference ...
Builds a dict with information to connect to Clusters .
6,202
def _parse_dbpath ( dbpath ) : if isinstance ( dbpath , list ) : dbpath = '|' . join ( dbpath ) if not dbpath . endswith ( '$' ) : dbpath = '(%s)$' % dbpath return dbpath
Converts the dbpath to a regexp pattern .
6,203
def _get_read_preference ( read_preference ) : read_preference = getattr ( pymongo . ReadPreference , read_preference , None ) if read_preference is None : raise ValueError ( 'Invalid read preference: %s' % read_preference ) return read_preference
Converts read_preference from string to pymongo . ReadPreference value .
6,204
def set_timeout ( self , network_timeout ) : if network_timeout == self . _network_timeout : return self . _network_timeout = network_timeout self . _disconnect ( )
Set the timeout for existing and future Clients .
6,205
def _disconnect ( self ) : for cluster in self . _clusters : if 'connection' in cluster : connection = cluster . pop ( 'connection' ) connection . close ( ) for dbname in self . _mapped_databases : self . __delattr__ ( dbname ) self . _mapped_databases = [ ]
Disconnect from all MongoDB Clients .
6,206
def _get_connection ( self , cluster ) : if 'connection' not in cluster : cluster [ 'connection' ] = self . _connection_class ( socketTimeoutMS = self . _network_timeout , w = 1 , j = self . j , ** cluster [ 'params' ] ) return cluster [ 'connection' ]
Return a connection to a Cluster .
6,207
def _match_dbname ( self , dbname ) : for config in self . _clusters : if re . match ( config [ 'pattern' ] , dbname ) : return config raise Exception ( 'No such database %s.' % dbname )
Map a database name to the Cluster that holds the database .
6,208
def try_ntime ( max_try , func , * args , ** kwargs ) : if max_try < 1 : raise ValueError for i in range ( max_try ) : try : return func ( * args , ** kwargs ) except Exception as e : last_exception = e raise last_exception
Try execute a function n times until no exception raised or tried max_try times .
6,209
def highlightjs_javascript ( jquery = None ) : javascript = '' if jquery is None : jquery = get_highlightjs_setting ( 'include_jquery' , False ) if jquery : url = highlightjs_jquery_url ( ) if url : javascript += '<script src="{url}"></script>' . format ( url = url ) url = highlightjs_url ( ) if url : javascript += '<s...
Return HTML for highlightjs JavaScript .
6,210
def repo ( name : str , owner : str ) -> snug . Query [ dict ] : return json . loads ( ( yield f'/repos/{owner}/{name}' ) . content )
a repository lookup by owner and name
6,211
def repo ( name : str , owner : str ) -> snug . Query [ dict ] : request = snug . GET ( f'https://api.github.com/repos/{owner}/{name}' ) response = yield request return json . loads ( response . content )
a repo lookup by owner and name
6,212
def follow ( name : str ) -> snug . Query [ bool ] : request = snug . PUT ( f'https://api.github.com/user/following/{name}' ) response = yield request return response . status_code == 204
follow another user
6,213
def taskinfo ( self ) : task_input = { 'taskName' : 'QueryTask' , 'inputParameters' : { "Task_Name" : self . _name } } info = taskengine . execute ( task_input , self . _engine , cwd = self . _cwd ) task_def = info [ 'outputParameters' ] [ 'DEFINITION' ] task_def [ 'name' ] = str ( task_def . pop ( 'NAME' ) ) task_def ...
Retrieve the Task Information
6,214
def despeckle_simple ( B , th2 = 2 ) : A = np . copy ( B ) n1 = A . shape [ 0 ] dist = { u : np . diag ( A , u ) for u in range ( n1 ) } medians , stds = { } , { } for u in dist : medians [ u ] = np . median ( dist [ u ] ) stds [ u ] = np . std ( dist [ u ] ) for nw , j in itertools . product ( range ( n1 ) , range ( n...
Single - chromosome despeckling
6,215
def bin_sparse ( M , subsampling_factor = 3 ) : try : from scipy . sparse import coo_matrix except ImportError as e : print ( str ( e ) ) print ( "I am peforming dense binning by default." ) return bin_dense ( M . todense ( ) ) N = M . tocoo ( ) n , m = N . shape row , col , data = N . row , N . col , N . data binned_r...
Perform the bin_dense procedure for sparse matrices . Remaining rows and cols are lumped with the rest at the end .
6,216
def bin_matrix ( M , subsampling_factor = 3 ) : try : from scipy . sparse import issparse if issparse ( M ) : return bin_sparse ( M , subsampling_factor = subsampling_factor ) else : raise ImportError except ImportError : return bin_dense ( M , subsampling_factor = subsampling_factor )
Bin either sparse or dense matrices .
6,217
def bin_annotation ( annotation = None , subsampling_factor = 3 ) : if annotation is None : annotation = np . array ( [ ] ) n = len ( annotation ) binned_positions = [ annotation [ i ] for i in range ( n ) if i % subsampling_factor == 0 ] if len ( binned_positions ) == 0 : binned_positions . append ( 0 ) return np . ar...
Perform binning on genome annotations such as contig information or bin positions .
6,218
def build_pyramid ( M , subsampling_factor = 3 ) : subs = int ( subsampling_factor ) if subs < 1 : raise ValueError ( "Subsampling factor needs to be an integer greater than 1." ) N = [ M ] while min ( N [ - 1 ] . shape ) > 1 : N . append ( bin_matrix ( N [ - 1 ] , subsampling_factor = subs ) ) return N
Iterate over a given number of times on matrix M so as to compute smaller and smaller matrices with bin_dense .
6,219
def bin_exact_kb_dense ( M , positions , length = 10 ) : unit = 10 ** 3 ul = unit * length units = positions / ul n = len ( positions ) idx = [ i for i in range ( n - 1 ) if np . ceil ( units [ i ] ) < np . ceil ( units [ i + 1 ] ) ] m = len ( idx ) - 1 N = np . zeros ( ( m , m ) ) remainders = [ 0 ] + [ np . abs ( uni...
Perform the kb - binning procedure with total bin lengths being exactly set to that of the specified input . Fragments overlapping two potential bins will be split and related contact counts will be divided according to overlap proportions in each bin .
6,220
def bin_kb_sparse ( M , positions , length = 10 ) : try : from scipy . sparse import coo_matrix except ImportError as e : print ( str ( e ) ) print ( "I am peforming dense normalization by default." ) return bin_kb_dense ( M . todense ( ) , positions = positions ) r = M . tocoo ( ) unit = 10 ** 3 ul = unit * length uni...
Perform the exact kb - binning procedure on a sparse matrix .
6,221
def trim_sparse ( M , n_std = 3 , s_min = None , s_max = None ) : try : from scipy . sparse import coo_matrix except ImportError as e : print ( str ( e ) ) print ( "I am peforming dense normalization by default." ) return trim_dense ( M . todense ( ) ) r = M . tocoo ( ) sparsity = np . array ( r . sum ( axis = 1 ) ) . ...
Apply the trimming procedure to a sparse matrix .
6,222
def normalize_dense ( M , norm = "frag" , order = 1 , iterations = 3 ) : s = np . array ( M , np . float64 ) floatorder = np . float64 ( order ) if norm == "SCN" : for _ in range ( 0 , iterations ) : sumrows = s . sum ( axis = 1 ) maskrows = ( sumrows != 0 ) [ : , None ] * ( sumrows != 0 ) [ None , : ] sums_row = sumro...
Apply one of the many normalization types to input dense matrix . Will also apply any callable norms such as a user - made or a lambda function .
6,223
def normalize_sparse ( M , norm = "frag" , order = 1 , iterations = 3 ) : try : from scipy . sparse import csr_matrix except ImportError as e : print ( str ( e ) ) print ( "I am peforming dense normalization by default." ) return normalize_dense ( M . todense ( ) ) r = csr_matrix ( M ) if norm == "SCN" : for _ in range...
Applies a normalization type to a sparse matrix .
6,224
def GC_wide ( genome , window = 1000 ) : GC = [ ] from Bio import SeqIO with open ( genome ) as handle : sequence = "" . join ( [ str ( record . seq ) for record in SeqIO . parse ( handle , "fasta" ) ] ) n = len ( sequence ) for i in range ( 0 , n , window ) : portion = sequence [ i : min ( i + window , n ) ] GC . appe...
Compute GC across a window of given length .
6,225
def to_dade_matrix ( M , annotations = "" , filename = None ) : n , m = M . shape A = np . zeros ( ( n + 1 , m + 1 ) ) A [ 1 : , 1 : ] = M if not annotations : annotations = np . array ( [ "" for _ in n ] , dtype = str ) A [ 0 , : ] = annotations A [ : , 0 ] = annotations . T if filename : try : np . savetxt ( filename...
Returns a Dade matrix from input numpy matrix . Any annotations are added as header . If filename is provided and valid said matrix is also saved as text .
6,226
def largest_connected_component ( matrix ) : try : import scipy . sparse n , components = scipy . sparse . csgraph . connected_components ( matrix , directed = False ) print ( "I found " + str ( n ) + " connected components." ) component_dist = collections . Counter ( components ) print ( "Distribution of components: "...
Compute the adjacency matrix of the largest connected component of the graph whose input matrix is adjacent .
6,227
def to_structure ( matrix , alpha = 1 ) : connected = largest_connected_component ( matrix ) distances = to_distance ( connected , alpha ) n , m = connected . shape bary = np . sum ( np . triu ( distances , 1 ) ) / ( n ** 2 ) d = np . array ( np . sum ( distances ** 2 , 0 ) / n - bary ) gram = np . array ( [ ( d [ i ] ...
Compute best matching 3D genome structure from underlying input matrix using ShRec3D - derived method from Lesne et al . 2014 .
6,228
def get_missing_bins ( original , trimmed ) : original_diag = np . diag ( original ) trimmed_diag = np . diag ( trimmed ) index = [ ] m = min ( original . shape ) for j in range ( min ( trimmed . shape ) ) : k = 0 while original_diag [ j + k ] != trimmed_diag [ j ] and k < 2 * m : k += 1 index . append ( k + j ) return...
Retrieve indices of a trimmed matrix with respect to the original matrix . Fairly fast but is only correct if diagonal values are different which is always the case in practice .
6,229
def distance_to_contact ( D , alpha = 1 ) : if callable ( alpha ) : distance_function = alpha else : try : a = np . float64 ( alpha ) def distance_function ( x ) : return 1 / ( x ** ( 1 / a ) ) except TypeError : print ( "Alpha parameter must be callable or an array-like" ) raise except ZeroDivisionError : raise ValueE...
Compute contact matrix from input distance matrix . Distance values of zeroes are given the largest contact count otherwise inferred non - zero distance values .
6,230
def pdb_to_structure ( filename ) : try : from Bio . PDB import PDB except ImportError : print ( "I can't import Biopython which is needed to handle PDB files." ) raise p = PDB . PDBParser ( ) structure = p . get_structure ( 'S' , filename ) for _ in structure . get_chains ( ) : atoms = [ np . array ( atom . get_coord ...
Import a structure object from a PDB file .
6,231
def positions_to_contigs ( positions ) : if isinstance ( positions , np . ndarray ) : flattened_positions = positions . flatten ( ) else : try : flattened_positions = np . array ( [ pos for contig in positions for pos in contig ] ) except TypeError : flattened_positions = np . array ( positions ) if ( np . diff ( posit...
Flattens and converts a positions array to a contigs array if applicable .
6,232
def distance_diagonal_law ( matrix , positions = None ) : n = min ( matrix . shape ) if positions is None : return np . array ( [ np . average ( np . diagonal ( matrix , j ) ) for j in range ( n ) ] ) else : contigs = positions_to_contigs ( positions ) def is_intra ( i , j ) : return contigs [ i ] == contigs [ j ] max_...
Compute a distance law trend using the contact averages of equal distances . Specific positions can be supplied if needed .
6,233
def rippe_parameters ( matrix , positions , lengths = None , init = None , circ = False ) : n , _ = matrix . shape if lengths is None : lengths = np . abs ( np . diff ( positions ) ) measurements , bins = [ ] , [ ] for i in range ( n ) : for j in range ( 1 , i ) : mean_length = ( lengths [ i ] + lengths [ j ] ) / 2. if...
Estimate parameters from the model described in Rippe et al . 2001 .
6,234
def scalogram ( M , circ = False ) : if not type ( M ) is np . ndarray : M = np . array ( M ) if M . shape [ 0 ] != M . shape [ 1 ] : raise ValueError ( "Matrix is not square." ) try : n = min ( M . shape ) except AttributeError : n = M . size N = np . zeros ( M . shape ) for i in range ( n ) : for j in range ( n ) : i...
Computes so - called scalograms used to easily visualize contacts at different distance scales . Edge cases have been painstakingly taken care of .
6,235
def asd ( M1 , M2 ) : from scipy . fftpack import fft2 spectra1 = np . abs ( fft2 ( M1 ) ) spectra2 = np . abs ( fft2 ( M2 ) ) return np . linalg . norm ( spectra2 - spectra1 )
Compute a Fourier transform based distance between two matrices .
6,236
def remove_intra ( M , contigs ) : N = np . copy ( M ) n = len ( N ) assert n == len ( contigs ) for ( i , j ) in itertools . product ( range ( n ) , range ( n ) ) : if contigs [ i ] == contigs [ j ] : N [ i , j ] = 0 return N
Remove intrachromosomal contacts
6,237
def positions_to_contigs ( positions ) : contig_labels = np . zeros_like ( positions ) contig_index = 0 for i , p in enumerate ( positions ) : if p == 0 : contig_index += 1 contig_labels [ i ] = contig_index return contig_labels
Label contigs according to relative positions
6,238
def contigs_to_positions ( contigs , binning = 10000 ) : positions = np . zeros_like ( contigs ) index = 0 for _ , chunk in itertools . groubpy ( contigs ) : l = len ( chunk ) positions [ index : index + l ] = np . arange ( list ( chunk ) ) * binning index += l return positions
Build positions from contig labels
6,239
def split_matrix ( M , contigs ) : index = 0 for _ , chunk in itertools . groubpy ( contigs ) : l = len ( chunk ) yield M [ index : index + l , index : index + l ] index += l
Split multiple chromosome matrix
6,240
def find_nearest ( sorted_list , x ) : if x <= sorted_list [ 0 ] : return sorted_list [ 0 ] elif x >= sorted_list [ - 1 ] : return sorted_list [ - 1 ] else : lower = find_le ( sorted_list , x ) upper = find_ge ( sorted_list , x ) if ( x - lower ) > ( upper - x ) : return upper else : return lower
Find the nearest item of x from sorted array .
6,241
def format_x_tick ( axis , major_locator = None , major_formatter = None , minor_locator = None , minor_formatter = None ) : if major_locator : axis . xaxis . set_major_locator ( major_locator ) if major_formatter : axis . xaxis . set_major_formatter ( major_formatter ) if minor_locator : axis . xaxis . set_minor_locat...
Set x axis s format .
6,242
def set_legend ( axis , lines , legend ) : try : if legend : axis . legend ( lines , legend ) except Exception as e : raise ValueError ( "invalid 'legend', Error: %s" % e )
Set line legend .
6,243
def get_max ( array ) : largest = - np . inf for i in array : try : if i > largest : largest = i except : pass if np . isinf ( largest ) : raise ValueError ( "there's no numeric value in array!" ) else : return largest
Get maximum value of an array . Automatically ignore invalid data .
6,244
def get_min ( array ) : smallest = np . inf for i in array : try : if i < smallest : smallest = i except : pass if np . isinf ( smallest ) : raise ValueError ( "there's no numeric value in array!" ) else : return smallest
Get minimum value of an array . Automatically ignore invalid data .
6,245
def get_yAxis_limit ( y , lower = 0.05 , upper = 0.2 ) : smallest = get_min ( y ) largest = get_max ( y ) gap = largest - smallest if gap >= 0.000001 : y_min = smallest - lower * gap y_max = largest + upper * gap else : y_min = smallest - lower * abs ( smallest ) y_max = largest + upper * abs ( largest ) return y_min ,...
Find optimal y_min and y_max that guarantee enough space for legend and plot .
6,246
def create_figure ( width = 20 , height = 10 ) : figure = plt . figure ( figsize = ( width , height ) ) axis = figure . add_subplot ( 1 , 1 , 1 ) return figure , axis
Create a figure instance .
6,247
def preprocess_x_y ( x , y ) : def is_iterable_slicable ( a ) : if hasattr ( a , "__iter__" ) and hasattr ( a , "__getitem__" ) : return True else : return False if is_iterable_slicable ( x ) : if is_iterable_slicable ( x [ 0 ] ) : return x , y else : return ( x , ) , ( y , ) else : raise ValueError ( "invalid input!" ...
Preprocess x y input data . Returns list of list style .
6,248
def execute ( input_params , engine , cwd = None ) : try : taskengine_exe = config . get ( 'engine' ) except NoConfigOptionError : raise TaskEngineNotFoundError ( "Task Engine config option not set." + "\nPlease verify the 'engine' configuration setting." ) if not os . path . exists ( taskengine_exe ) : raise TaskEngin...
Execute a task with the provided input parameters
6,249
def run ( self , wrappers = [ "" , "" ] ) : opened_file = open ( self . lyfile , 'w' ) lilystring = self . piece_obj . toLily ( ) opened_file . writelines ( wrappers [ 0 ] + "\\version \"2.18.2\" \n" + lilystring + wrappers [ 1 ] ) opened_file . close ( ) os . system ( self . lily_script + " --loglevel=WARNING --output...
run the lilypond script on the hierarchy class
6,250
def extract_fasta ( partition_file , fasta_file , output_dir , chunk_size = DEFAULT_CHUNK_SIZE , max_cores = DEFAULT_MAX_CORES , ) : genome = { record . id : record . seq for record in SeqIO . parse ( fasta_file , "fasta" ) } data_chunks = list ( zip ( * np . genfromtxt ( partition_file , usecols = ( 0 , 1 ) , dtype = ...
Extract sequences from bins
6,251
def merge_fasta ( fasta_file , output_dir ) : def chunk_lexicographic_order ( chunk ) : chunk_fields = chunk . split ( "_" ) chunk_name = chunk_fields [ : - 1 ] chunk_id = chunk_fields [ - 1 ] return ( chunk_name , int ( chunk_id ) ) def are_consecutive ( chunk1 , chunk2 ) : if None in { chunk1 , chunk2 } : return Fals...
Merge chunks into complete FASTA bins
6,252
def monitor ( ) : log = logging . getLogger ( __name__ ) loop = asyncio . get_event_loop ( ) asyncio . ensure_future ( console ( loop , log ) ) loop . run_forever ( )
Wrapper to call console with a loop .
6,253
def make_object ( cls , data ) : if issubclass ( cls , Object ) : self = object . __new__ ( cls ) self . _data = data else : self = data return self
Creates an API object of class cls setting its _data to data . Subclasses of Object are required to use this to build a new empty instance without using their constructor .
6,254
def String ( length = None , ** kwargs ) : return Property ( length = length , types = stringy_types , convert = to_string , ** kwargs )
A string valued property with max . length .
6,255
def Datetime ( null = True , ** kwargs ) : return Property ( types = datetime . datetime , convert = util . local_timezone , load = dateutil . parser . parse , null = null , ** kwargs )
A datetime property .
6,256
def InstanceOf ( cls , ** kwargs ) : return Property ( types = cls , load = cls . load , ** kwargs )
A property that is an instance of cls .
6,257
def ListOf ( cls , ** kwargs ) : def _list_load ( value ) : return [ cls . load ( d ) for d in value ] return Property ( types = list , load = _list_load , default = list , ** kwargs )
A property that is a list of cls .
6,258
def add_dimension ( self , name , data = None ) : self . dimensions . add ( name ) if data is None : valobj = self . __dimtype__ ( ) else : valobj = make_object ( self . __dimtype__ , data ) self . _data [ name ] = valobj setattr ( self , name , valobj ) return valobj
Add a named dimension to this entity .
6,259
def print_block ( self , section_key , f = sys . stdout , file_format = "mwtab" ) : if file_format == "mwtab" : for key , value in self [ section_key ] . items ( ) : if section_key == "METABOLOMICS WORKBENCH" and key not in ( "VERSION" , "CREATED_ON" ) : continue if key in ( "VERSION" , "CREATED_ON" ) : cw = 20 - len (...
Print mwtab section into a file or stdout .
6,260
def _is_mwtab ( string ) : if isinstance ( string , str ) : lines = string . split ( "\n" ) elif isinstance ( string , bytes ) : lines = string . decode ( "utf-8" ) . split ( "\n" ) else : raise TypeError ( "Expecting <class 'str'> or <class 'bytes'>, but {} was passed" . format ( type ( string ) ) ) lines = [ line for...
Test if input string is in mwtab format .
6,261
def getTraceIdsBySpanName ( self , service_name , span_name , end_ts , limit , order ) : self . send_getTraceIdsBySpanName ( service_name , span_name , end_ts , limit , order ) return self . recv_getTraceIdsBySpanName ( )
Fetch trace ids by service and span name . Gets limit number of entries from before the end_ts .
6,262
def getTraceIdsByServiceName ( self , service_name , end_ts , limit , order ) : self . send_getTraceIdsByServiceName ( service_name , end_ts , limit , order ) return self . recv_getTraceIdsByServiceName ( )
Fetch trace ids by service name . Gets limit number of entries from before the end_ts .
6,263
def getTraceIdsByAnnotation ( self , service_name , annotation , value , end_ts , limit , order ) : self . send_getTraceIdsByAnnotation ( service_name , annotation , value , end_ts , limit , order ) return self . recv_getTraceIdsByAnnotation ( )
Fetch trace ids with a particular annotation . Gets limit number of entries from before the end_ts .
6,264
def getTracesByIds ( self , trace_ids , adjust ) : self . send_getTracesByIds ( trace_ids , adjust ) return self . recv_getTracesByIds ( )
Get the full traces associated with the given trace ids .
6,265
def getTraceSummariesByIds ( self , trace_ids , adjust ) : self . send_getTraceSummariesByIds ( trace_ids , adjust ) return self . recv_getTraceSummariesByIds ( )
Fetch trace summaries for the given trace ids .
6,266
def getTraceCombosByIds ( self , trace_ids , adjust ) : self . send_getTraceCombosByIds ( trace_ids , adjust ) return self . recv_getTraceCombosByIds ( )
Not content with just one of traces summaries or timelines? Want it all? This is the method for you .
6,267
def setTraceTimeToLive ( self , trace_id , ttl_seconds ) : self . send_setTraceTimeToLive ( trace_id , ttl_seconds ) self . recv_setTraceTimeToLive ( )
Change the TTL of a trace . If we find an interesting trace we want to keep around for further investigation .
6,268
def discover_datasource_columns ( datastore_str , datasource_id ) : datastore = DataStore ( datastore_str ) datasource = datastore . get_datasource ( datasource_id ) if datasource . type != "RASTER" : return datasource . list_columns ( ) else : return [ ]
Loop through the datastore s datasources to find the datasource identified by datasource_id return the matching datasource s columns .
6,269
def _get_column_type ( self , column ) : ctype = column . GetType ( ) if ctype in [ ogr . OFTInteger , ogr . OFTReal ] : return 'numeric' else : return 'string'
Return numeric if the column is of type integer or real otherwise return string .
6,270
def _get_default_mapfile_excerpt ( self ) : layerobj = self . _get_layer_stub ( ) classobj = mapscript . classObj ( ) layerobj . insertClass ( classobj ) styleobj = self . _get_default_style ( ) classobj . insertStyle ( styleobj ) return mapserializer . layerobj_to_dict ( layerobj , None )
Given an OGR string an OGR connection and an OGR layer create and return a representation of a MapFile LAYER block .
6,271
def _get_layer_stub ( self ) : layerobj = mapscript . layerObj ( ) layerobj . name = self . name layerobj . status = mapscript . MS_ON projection = self . ogr_layer . GetSpatialRef ( ) featureIdColumn = self . _get_featureId_column ( ) if featureIdColumn is not None and featureIdColumn != '' : layerobj . metadata . set...
builds a minimal mapscript layerobj with no styling
6,272
def reelect_app ( self , request , app ) : app . disconnect ( ) endpoints_size = len ( app . locator . endpoints ) for _ in xrange ( 0 , endpoints_size + 1 ) : if len ( app . locator . endpoints ) == 0 : request . logger . info ( "giving up on connecting to dist-info hosts, falling back to common pool processing" ) app...
tries to connect to the same app on differnet host from dist - info
6,273
def RecordHelloWorld ( handler , t ) : url = "%s/receive_recording.py" % THIS_URL t . startRecording ( url ) t . say ( "Hello, World." ) t . stopRecording ( ) json = t . RenderJson ( ) logging . info ( "RecordHelloWorld json: %s" % json ) handler . response . out . write ( json )
Demonstration of recording a message .
6,274
def RedirectDemo ( handler , t ) : t . redirect ( SIP_PHONE ) json = t . RenderJson ( ) logging . info ( "RedirectDemo json: %s" % json ) handler . response . out . write ( json )
Demonstration of redirecting to another number .
6,275
def TransferDemo ( handler , t ) : t . say ( "One moment please." ) t . transfer ( MY_PHONE ) t . say ( "Hi. I am a robot" ) json = t . RenderJson ( ) logging . info ( "TransferDemo json: %s" % json ) handler . response . out . write ( json )
Demonstration of transfering to another number
6,276
def retry ( ExceptionToCheck , tries = 4 , delay = 3 , backoff = 2 , status_codes = [ ] , logger = None ) : if backoff is None or backoff <= 0 : raise ValueError ( "backoff must be a number greater than 0" ) tries = math . floor ( tries ) if tries < 0 : raise ValueError ( "tries must be a number 0 or greater" ) if dela...
Decorator function for retrying the decorated function using an exponential or fixed backoff .
6,277
def _custom_response_edit ( self , method , url , headers , body , response ) : if self . get_implementation ( ) . is_mock ( ) : delay = self . get_setting ( "MOCKDATA_DELAY" , 0.0 ) time . sleep ( delay ) self . _edit_mock_response ( method , url , headers , body , response )
This method allows a service to edit a response .
6,278
def postURL ( self , url , headers = { } , body = None ) : return self . _load_resource ( "POST" , url , headers , body )
Request a URL using the HTTP method POST .
6,279
def putURL ( self , url , headers , body = None ) : return self . _load_resource ( "PUT" , url , headers , body )
Request a URL using the HTTP method PUT .
6,280
def patchURL ( self , url , headers , body ) : return self . _load_resource ( "PATCH" , url , headers , body )
Request a URL using the HTTP method PATCH .
6,281
def setup_dir ( f ) : setup_dir = os . path . dirname ( os . path . abspath ( __file__ ) ) def wrapped ( * args , ** kwargs ) : with chdir ( setup_dir ) : return f ( * args , ** kwargs ) return wrapped
Decorate f to run inside the directory where setup . py resides .
6,282
def feedback_form ( context ) : user = None url = None if context . get ( 'request' ) : url = context [ 'request' ] . path if context [ 'request' ] . user . is_authenticated ( ) : user = context [ 'request' ] . user return { 'form' : FeedbackForm ( url = url , user = user ) , 'background_color' : FEEDBACK_FORM_COLOR , ...
Template tag to render a feedback form .
6,283
def select ( self , * itms ) : if not itms : itms = [ '*' ] self . terms . append ( "select %s from %s" % ( ', ' . join ( itms ) , self . table ) ) return self
Joins the items to be selected and inserts the current table name
6,284
def _in ( self , * lst ) : self . terms . append ( 'in (%s)' % ', ' . join ( [ '"%s"' % x for x in lst ] ) ) return self
Build out the in clause . Using _in due to shadowing for in
6,285
def compile ( self ) : cs = "" for term in self . terms : if cs : cs += " " cs += term self . compiled_str = urllib . parse . quote ( cs ) return self
Take all of the parts components and build the complete query to be passed to Yahoo YQL
6,286
def read_files ( * sources , ** kwds ) : filenames = _generate_filenames ( sources ) filehandles = _generate_handles ( filenames ) for fh , source in filehandles : try : f = mwtab . MWTabFile ( source ) f . read ( fh ) if kwds . get ( 'validate' ) : validator . validate_file ( mwtabfile = f , section_schema_mapping = m...
Construct a generator that yields file instances .
6,287
def is_url ( path ) : try : parse_result = urlparse ( path ) return all ( ( parse_result . scheme , parse_result . netloc , parse_result . path ) ) except ValueError : return False
Test if path represents a valid URL .
6,288
def AuthMiddleware ( app ) : basic_redirect_form = BasicRedirectFormPlugin ( login_form_url = "/signin" , login_handler_path = "/login" , post_login_url = "/" , logout_handler_path = "/logout" , post_logout_url = "/signin" , rememberer_name = "cookie" ) return setup_sql_auth ( app , user_class = model . User , group_cl...
Add authentication and authorization middleware to the app .
6,289
def _get_full_path ( self , path , environ ) : if path . startswith ( '/' ) : path = environ . get ( 'SCRIPT_NAME' , '' ) + path return path
Return the full path to path by prepending the SCRIPT_NAME . If path is a URL do nothing .
6,290
def _replace_qs ( self , url , qs ) : url_parts = list ( urlparse ( url ) ) url_parts [ 4 ] = qs return urlunparse ( url_parts )
Replace the query string of url with qs and return the new URL .
6,291
def write ( self ) : with open ( storage . config_file , 'w' ) as cfg : yaml . dump ( self . as_dict ( ) , cfg , default_flow_style = False ) storage . refresh ( )
write the current settings to the config file
6,292
def process_bind_param ( self , value , dialect ) : if value is not None : value = simplejson . dumps ( value ) return value
convert value from python object to json
6,293
def process_result_value ( self , value , dialect ) : if value is not None : value = simplejson . loads ( value ) return value
convert value from json to a python object
6,294
def getBriefModuleInfoFromFile ( fileName ) : modInfo = BriefModuleInfo ( ) _cdmpyparser . getBriefModuleInfoFromFile ( modInfo , fileName ) modInfo . flush ( ) return modInfo
Builds the brief module info from file
6,295
def getBriefModuleInfoFromMemory ( content ) : modInfo = BriefModuleInfo ( ) _cdmpyparser . getBriefModuleInfoFromMemory ( modInfo , content ) modInfo . flush ( ) return modInfo
Builds the brief module info from memory
6,296
def getDisplayName ( self ) : if self . alias == "" : return self . name return self . name + " as " + self . alias
Provides a name for display purpose respecting the alias
6,297
def flush ( self ) : self . __flushLevel ( 0 ) if self . __lastImport is not None : self . imports . append ( self . __lastImport )
Flushes the collected information
6,298
def __flushLevel ( self , level ) : objectsCount = len ( self . objectsStack ) while objectsCount > level : lastIndex = objectsCount - 1 if lastIndex == 0 : if self . objectsStack [ 0 ] . __class__ . __name__ == "Class" : self . classes . append ( self . objectsStack [ 0 ] ) else : self . functions . append ( self . ob...
Merge the found objects to the required level
6,299
def _onEncoding ( self , encString , line , pos , absPosition ) : self . encoding = Encoding ( encString , line , pos , absPosition )
Memorizes module encoding