idx
int64
0
252k
question
stringlengths
48
5.28k
target
stringlengths
5
1.23k
2,400
def parse ( self , file_name ) : self . object = self . parsed_class ( ) with open ( file_name , encoding = 'utf-8' ) as f : self . parse_str ( f . read ( ) ) return self . object
Parse entire file and return relevant object .
2,401
def has_next ( self ) : try : next_item = self . paginator . object_list [ self . paginator . per_page ] except IndexError : return False return True
Checks for one more item than last on this page .
2,402
def parse_miss_cann ( node , m , c ) : if node [ 2 ] : m1 = node [ 0 ] m2 = m - node [ 0 ] c1 = node [ 1 ] c2 = c - node [ 1 ] else : m1 = m - node [ 0 ] m2 = node [ 0 ] c1 = c - node [ 1 ] c2 = node [ 1 ] return m1 , c1 , m2 , c2
extracts names from the node to get counts of miss + cann on both sides
2,403
def solve ( m , c ) : G = { ( m , c , 1 ) : [ ] } frontier = [ ( m , c , 1 ) ] while len ( frontier ) > 0 : hold = list ( frontier ) for node in hold : newnode = [ ] frontier . remove ( node ) newnode . extend ( pick_next_boat_trip ( node , m , c , frontier ) ) for neighbor in newnode : if neighbor not in G : G [ node ...
run the algorithm to find the path list
2,404
def create_script_fact ( self ) : self . ddl_text += '---------------------------------------------\n' self . ddl_text += '-- CREATE Fact Table - ' + self . fact_table + '\n' self . ddl_text += '---------------------------------------------\n' self . ddl_text += 'DROP TABLE ' + self . fact_table + ' CASCADE CONSTRAINTS...
appends the CREATE TABLE index etc to self . ddl_text
2,405
def create_script_staging_table ( self , output_table , col_list ) : self . ddl_text += '---------------------------------------------\n' self . ddl_text += '-- CREATE Staging Table - ' + output_table + '\n' self . ddl_text += '---------------------------------------------\n' self . ddl_text += 'DROP TABLE ' + output_t...
appends the CREATE TABLE index etc to another table
2,406
def distinct_values ( t_old , t_new ) : res = [ ] res . append ( [ ' -- NOT IN check -- ' ] ) for new_col in t_new . header : dist_new = t_new . get_distinct_values_from_cols ( [ new_col ] ) for old_col in t_old . header : if old_col == new_col : dist_old = t_old . get_distinct_values_from_cols ( [ old_col ] ) not_in_n...
for all columns check which values are not in the other table
2,407
def aikif_web_menu ( cur = '' ) : pgeHdg = '' pgeBlurb = '' if cur == '' : cur = 'Home' txt = get_header ( cur ) txt += '<div id = "container">\n' txt += ' <div id = "header">\n' txt += ' <!-- Banner txt += ' <img src = "' + os . path . join ( '/static' , 'aikif_banner.jpg' ) + '" alt="AIKIF Banner"/>\n' txt += ...
returns the web page header containing standard AIKIF top level web menu
2,408
def main ( ) : print ( "Generating research notes..." ) if os . path . exists ( fname ) : os . remove ( fname ) append_rst ( '================================================\n' ) append_rst ( 'Comparison of Information Aggregation Techniques\n' ) append_rst ( '================================================\n\n' ) ap...
This generates the research document based on the results of the various programs and includes RST imports for introduction and summary
2,409
def find ( self , txt ) : result = [ ] for d in self . data : if txt in d : result . append ( d ) return result
returns a list of records containing text
2,410
def schema_complete ( ) : return Schema ( { 'stage' : And ( str , len ) , 'timestamp' : int , 'status' : And ( str , lambda s : s in [ 'started' , 'succeeded' , 'failed' ] ) , Optional ( 'matrix' , default = 'default' ) : And ( str , len ) , Optional ( 'information' , default = { } ) : { Optional ( Regex ( r'([a-z][_a-...
Schema for data in CollectorUpdate .
2,411
def schema_event_items ( ) : return { 'timestamp' : And ( int , lambda n : n > 0 ) , Optional ( 'information' , default = { } ) : { Optional ( Regex ( r'([a-z][_a-z]*)' ) ) : object } }
Schema for event items .
2,412
def schema_complete ( ) : return Schema ( { 'stage' : And ( str , len ) , 'status' : And ( str , lambda s : s in [ 'started' , 'succeeded' , 'failed' ] ) , Optional ( 'events' , default = [ ] ) : And ( len , [ CollectorStage . schema_event_items ( ) ] ) } )
Schema for data in CollectorStage .
2,413
def add ( self , timestamp , information ) : try : item = Schema ( CollectorStage . schema_event_items ( ) ) . validate ( { 'timestamp' : timestamp , 'information' : information } ) self . events . append ( item ) except SchemaError as exception : Logger . get_logger ( __name__ ) . error ( exception ) raise RuntimeErro...
Add event information .
2,414
def duration ( self ) : duration = 0.0 if len ( self . events ) > 0 : first = datetime . fromtimestamp ( self . events [ 0 ] [ 'timestamp' ] ) last = datetime . fromtimestamp ( self . events [ - 1 ] [ 'timestamp' ] ) duration = ( last - first ) . total_seconds ( ) return duration
Calculate how long the stage took .
2,415
def count_stages ( self , matrix_name ) : return len ( self . data [ matrix_name ] ) if matrix_name in self . data else 0
Number of registered stages for given matrix name .
2,416
def get_stage ( self , matrix_name , stage_name ) : found_stage = None if matrix_name in self . data : result = Select ( self . data [ matrix_name ] ) . where ( lambda entry : entry . stage == stage_name ) . build ( ) found_stage = result [ 0 ] if len ( result ) > 0 else None return found_stage
Get Stage of a concrete matrix .
2,417
def get_duration ( self , matrix_name ) : duration = 0.0 if matrix_name in self . data : duration = sum ( [ stage . duration ( ) for stage in self . data [ matrix_name ] ] ) return duration
Get duration for a concrete matrix .
2,418
def update ( self , item ) : if item . matrix not in self . data : self . data [ item . matrix ] = [ ] result = Select ( self . data [ item . matrix ] ) . where ( lambda entry : entry . stage == item . stage ) . build ( ) if len ( result ) > 0 : stage = result [ 0 ] stage . status = item . status stage . add ( item . t...
Add a collector item .
2,419
def run ( self ) : while True : data = self . queue . get ( ) if data is None : Logger . get_logger ( __name__ ) . info ( "Stopping collector process ..." ) break self . store . update ( data ) generate ( self . store , 'html' , os . getcwd ( ) )
Collector main loop .
2,420
def read_map ( fname ) : lst = [ ] with open ( fname , "r" ) as f : for line in f : lst . append ( line ) return lst
reads a saved text file to list
2,421
def show_grid_from_file ( self , fname ) : with open ( fname , "r" ) as f : for y , row in enumerate ( f ) : for x , val in enumerate ( row ) : self . draw_cell ( y , x , val )
reads a saved grid file and paints it on the canvas
2,422
def draw_cell ( self , row , col , val ) : if val == 'T' : self . paint_target ( row , col ) elif val == '#' : self . paint_block ( row , col ) elif val == 'X' : self . paint_hill ( row , col ) elif val == '.' : self . paint_land ( row , col ) elif val in [ 'A' ] : self . paint_agent_location ( row , col ) elif val in ...
draw a cell as position row col containing val
2,423
def paint_agent_trail ( self , y , x , val ) : for j in range ( 1 , self . cell_height - 1 ) : for i in range ( 1 , self . cell_width - 1 ) : self . img . put ( self . agent_color ( val ) , ( x * self . cell_width + i , y * self . cell_height + j ) )
paint an agent trail as ONE pixel to allow for multiple agent trails to be seen in the same cell
2,424
def agent_color ( self , val ) : if val == '0' : colour = 'blue' elif val == '1' : colour = 'navy' elif val == '2' : colour = 'firebrick' elif val == '3' : colour = 'blue' elif val == '4' : colour = 'blue2' elif val == '5' : colour = 'blue4' elif val == '6' : colour = 'gray22' elif val == '7' : colour = 'gray57' elif v...
gets a colour for agent 0 - 9
2,425
def create_random_population ( num = 100 ) : people = [ ] for _ in range ( num ) : nme = 'blah' tax_min = random . randint ( 1 , 40 ) / 100 tax_max = tax_min + random . randint ( 1 , 40 ) / 100 tradition = random . randint ( 1 , 100 ) / 100 equity = random . randint ( 1 , 100 ) / 100 pers = mod_hap_env . Person ( nme ,...
create a list of people with randomly generated names and stats
2,426
def cleanup ( self ) : if self . data . hooks and len ( self . data . hooks . cleanup ) > 0 : env = self . data . env_list [ 0 ] . copy ( ) env . update ( { 'PIPELINE_RESULT' : 'SUCCESS' , 'PIPELINE_SHELL_EXIT_CODE' : '0' } ) config = ShellConfig ( script = self . data . hooks . cleanup , model = self . model , env = e...
Run cleanup script of pipeline when hook is configured .
2,427
def process ( self , pipeline ) : output = [ ] for entry in pipeline : key = list ( entry . keys ( ) ) [ 0 ] if key == "env" : self . data . env_list [ 0 ] . update ( entry [ key ] ) self . logger . debug ( "Updating environment at level 0 with %s" , self . data . env_list [ 0 ] ) continue stage = Stage ( self , re . m...
Processing the whole pipeline definition .
2,428
def process ( self , txt , mode ) : result = '' if mode == 'ADD' : if txt in self . all_commands [ 'cmd' ] [ 0 ] : self . show_output ( 'Returning to Command mode' ) mode = 'COMMAND' self . prompt = '> ' else : self . show_output ( 'Adding Text : ' , txt ) result = self . cmd_add ( txt ) elif mode == 'QUERY' : if txt i...
Top level function to process the command mainly depending on mode . This should work by using the function name defined in all_commamnds
2,429
def cmd_add ( self , txt ) : self . show_output ( 'Adding ' , txt ) self . raw . add ( txt ) print ( self . raw ) return 'Added ' + txt
Enter add mode - all text entered now will be processed as adding information until cancelled
2,430
def cmd_query ( self , txt ) : self . show_output ( 'Searching for ' , txt ) res = self . raw . find ( txt ) for d in res : self . show_output ( d ) return str ( len ( res ) ) + ' results for ' + txt
search and query the AIKIF
2,431
def verify_integrity ( self ) : if not self . __integrity_check : if not self . __appid : raise Exception ( 'U2F_APPID was not defined! Please define it in configuration file.' ) if self . __facets_enabled and not len ( self . __facets_list ) : raise Exception ( ) undefined_message = 'U2F {name} handler is not defined!...
Verifies that all required functions been injected .
2,432
def devices ( self ) : self . verify_integrity ( ) if session . get ( 'u2f_device_management_authorized' , False ) : if request . method == 'GET' : return jsonify ( self . get_devices ( ) ) , 200 elif request . method == 'DELETE' : response = self . remove_device ( request . json ) if response [ 'status' ] == 'ok' : re...
Manages users enrolled u2f devices
2,433
def facets ( self ) : self . verify_integrity ( ) if self . __facets_enabled : data = json . dumps ( { 'trustedFacets' : [ { 'version' : { 'major' : 1 , 'minor' : 0 } , 'ids' : self . __facets_list } ] } , sort_keys = True , indent = 2 , separators = ( ',' , ': ' ) ) mime = 'application/fido.trusted-apps+json' resp = R...
Provides facets support . REQUIRES VALID HTTPS!
2,434
def get_enroll ( self ) : devices = [ DeviceRegistration . wrap ( device ) for device in self . __get_u2f_devices ( ) ] enroll = start_register ( self . __appid , devices ) enroll [ 'status' ] = 'ok' session [ '_u2f_enroll_' ] = enroll . json return enroll
Returns new enroll seed
2,435
def verify_enroll ( self , response ) : seed = session . pop ( '_u2f_enroll_' ) try : new_device , cert = complete_register ( seed , response , self . __facets_list ) except Exception as e : if self . __call_fail_enroll : self . __call_fail_enroll ( e ) return { 'status' : 'failed' , 'error' : 'Invalid key handle!' } f...
Verifies and saves U2F enroll
2,436
def get_signature_challenge ( self ) : devices = [ DeviceRegistration . wrap ( device ) for device in self . __get_u2f_devices ( ) ] if devices == [ ] : return { 'status' : 'failed' , 'error' : 'No devices been associated with the account!' } challenge = start_authenticate ( devices ) challenge [ 'status' ] = 'ok' sess...
Returns new signature challenge
2,437
def remove_device ( self , request ) : devices = self . __get_u2f_devices ( ) for i in range ( len ( devices ) ) : if devices [ i ] [ 'keyHandle' ] == request [ 'id' ] : del devices [ i ] self . __save_u2f_devices ( devices ) return { 'status' : 'ok' , 'message' : 'Successfully deleted your device!' } return { 'status'...
Removes device specified by id
2,438
def verify_counter ( self , signature , counter ) : devices = self . __get_u2f_devices ( ) for device in devices : if device [ 'keyHandle' ] == signature [ 'keyHandle' ] : if counter > device [ 'counter' ] : device [ 'counter' ] = counter self . __save_u2f_devices ( devices ) return True else : return False
Verifies that counter value is greater than previous signature
2,439
def validate ( data ) : try : return Schema ( Validator . SCHEMA ) . validate ( data ) except SchemaError as exception : logging . getLogger ( __name__ ) . error ( exception ) return None
Validate data against the schema .
2,440
def include ( self , node ) : result = None if isinstance ( node , ScalarNode ) : result = Loader . include_file ( self . construct_scalar ( node ) ) else : raise RuntimeError ( "Not supported !include on type %s" % type ( node ) ) return result
Include the defined yaml file .
2,441
def load ( filename ) : if os . path . isfile ( filename ) : with open ( filename ) as handle : return yaml_load ( handle , Loader = Loader ) raise RuntimeError ( "File %s doesn't exist!" % filename )
Load yaml file with specific include loader .
2,442
def pivot ( self ) : self . op_data = [ list ( i ) for i in zip ( * self . ip_data ) ]
transposes rows and columns
2,443
def key_value_pairs ( self ) : self . op_data = [ ] hdrs = self . ip_data [ 0 ] for row in self . ip_data [ 1 : ] : id_col = row [ 0 ] for col_num , col in enumerate ( row ) : self . op_data . append ( [ id_col , hdrs [ col_num ] , col ] )
convert list to key value pairs This should also create unique id s to allow for any dataset to be transposed and then later manipulated r1c1 r1c2 r1c3 r2c1 r2c2 r2c3 should be converted to ID COLNUM VAL r1c1
2,444
def links_to_data ( self , col_name_col_num , col_val_col_num , id_a_col_num , id_b_col_num ) : print ( 'Converting links to data' ) self . op_data unique_ids = [ ] unique_vals = [ ] self . op_data . append ( [ 'Name' , self . ip_data [ 1 ] [ col_name_col_num ] ] ) for r in self . ip_data [ 1 : ] : if r [ id_a_col_num ...
This is the reverse of data_to_links and takes a links table and generates a data table as follows Input Table Output Table Cat_Name CAT_val Person_a person_b NAME Location Location Perth John Fred John Perth Location Perth John Cindy Cindy Perth Location Perth Fred Cindy Fred Perth
2,445
def find_best_plan ( self ) : for plan in self . plans : for strat in self . strategy : self . run_plan ( plan , strat )
try each strategy with different amounts
2,446
def load_data ( fname ) : print ( 'Loading ' + fname + ' to redis' ) r = redis . StrictRedis ( host = '127.0.0.1' , port = 6379 , db = 0 ) with open ( fname , 'r' ) as f : for line_num , row in enumerate ( f ) : if row . strip ( '' ) != '' : if line_num < 100000000 : l_key , l_val = parse_n3 ( row , 'csv' ) if line_num...
loads previously exported CSV file to redis database
2,447
def parse_n3 ( row , src = 'csv' ) : if row . strip ( ) == '' : return '' , '' l_root = 'opencyc' key = '' val = '' if src == 'csv' : cols = row . split ( ',' ) if len ( cols ) < 3 : return '' , '' key = '' val = '' key = l_root + ':' + cols [ 1 ] . strip ( '"' ) . strip ( ) + ':' + cols [ 2 ] . strip ( '"' ) . strip (...
takes a row from an n3 file and returns the triple NOTE - currently parses a CSV line already split via cyc_extract . py
2,448
def summarise_file_as_html ( fname ) : txt = '<H1>' + fname + '</H1>' num_lines = 0 print ( 'Reading OpenCyc file - ' , fname ) with open ( ip_folder + os . sep + fname , 'r' ) as f : txt += '<PRE>' for line in f : if line . strip ( ) != '' : num_lines += 1 if num_lines < 80 : txt += str ( num_lines ) + ': ' + escape_h...
takes a large data file and produces a HTML summary as html
2,449
def main ( ) : iterations = 9 years = 3 width = 22 height = 78 time_delay = 0.03 lg = mod_log . Log ( 'test' ) lg . record_process ( 'Game of Life' , 'game_of_life_console.py' ) for _ in range ( iterations ) : s , e = run_game_of_life ( years , width , height , time_delay , 'N' ) lg . record_result ( "Started with " + ...
Example to show AIKIF logging of results . Generates a sequence of random grids and runs the Game of Life saving results
2,450
def run_game_of_life ( years , width , height , time_delay , silent = "N" ) : lfe = mod_grid . GameOfLife ( width , height , [ '.' , 'x' ] , 1 ) set_random_starting_grid ( lfe ) lg . record_source ( lfe , 'game_of_life_console.py' ) print ( lfe ) start_cells = lfe . count_filled_positions ( ) for ndx , dummy_idx in enu...
run a single game of life for years and log start and end living cells to aikif
2,451
def print_there ( x , y , text ) : sys . stdout . write ( "\x1b7\x1b[%d;%df%s\x1b8" % ( x , y , text ) ) sys . stdout . flush ( )
allows display of a game of life on a console via resetting cursor position to a set point - looks ok for testing but not production quality .
2,452
def identify_col_pos ( txt ) : res = [ ] lines = txt . split ( '\n' ) prev_ch = '' for col_pos , ch in enumerate ( lines [ 0 ] ) : if _is_white_space ( ch ) is False and _is_white_space ( prev_ch ) is True : res . append ( col_pos ) prev_ch = ch res . append ( col_pos ) return res
assume no delimiter in this file so guess the best fixed column widths to split by
2,453
def load_tbl_from_csv ( fname ) : import csv rows_to_load = [ ] with open ( fname , 'r' , encoding = 'cp1252' , errors = 'ignore' ) as csvfile : csvreader = csv . reader ( csvfile , delimiter = ',' ) reader = csv . reader ( csvfile ) rows_to_load = list ( reader ) return rows_to_load
read a CSV file to list without worrying about odd characters
2,454
def _get_dict_char_count ( txt ) : dct = { } for letter in txt : if letter in dct : dct [ letter ] += 1 else : dct [ letter ] = 1 return dct
reads the characters in txt and returns a dictionary of all letters
2,455
def creator ( entry , config ) : template_file = os . path . join ( os . path . dirname ( __file__ ) , 'templates/docker-container.sh.j2' ) with open ( template_file ) as handle : template = handle . read ( ) wrapped_script = render ( template , container = { 'image' : 'centos:7' if 'image' not in entry else entry [ 'i...
Creator function for creating an instance of a Bash .
2,456
def creator ( entry , config ) : dockerfile = render ( config . script , model = config . model , env = config . env , variables = config . variables , item = config . item ) filename = "dockerfile.dry.run.see.comment" if not config . dry_run : temp = tempfile . NamedTemporaryFile ( prefix = "dockerfile-" , mode = 'w+t...
Creator function for creating an instance of a Docker image script .
2,457
def stdout_redirector ( ) : old_stdout = sys . stdout sys . stdout = Stream ( ) try : yield sys . stdout finally : sys . stdout . close ( ) sys . stdout = old_stdout
Simplify redirect of stdout .
2,458
def write_temporary_file ( content , prefix = '' , suffix = '' ) : temp = tempfile . NamedTemporaryFile ( prefix = prefix , suffix = suffix , mode = 'w+t' , delete = False ) temp . writelines ( content ) temp . close ( ) return temp . name
Generating a temporary file with content .
2,459
def print_new ( ctx , name , migration_type ) : click . echo ( ctx . obj . repository . generate_migration_name ( name , migration_type ) )
Prints filename of a new migration
2,460
def start ( self ) : self . running = True self . status = 'RUNNING' self . mylog . record_process ( 'agent' , self . name + ' - starting' )
Starts an agent with standard logging
2,461
def set_coords ( self , x = 0 , y = 0 , z = 0 , t = 0 ) : self . coords = { } self . coords [ 'x' ] = x self . coords [ 'y' ] = y self . coords [ 'z' ] = z self . coords [ 't' ] = t
set coords of agent in an arbitrary world
2,462
def from_file ( file_path , incl_pot = True ) : filename , ext = os . path . splitext ( file_path ) am_file_path = filename + '.AM' pot_file_path = filename + '.PT' parser_by_ext = { '.cd3' : parsers . Cd3Parser , '.xml' : parsers . XmlCatchmentParser } catchment = parser_by_ext [ ext . lower ( ) ] ( ) . parse ( file_p...
Load catchment object from a . CD3 or . xml file .
2,463
def to_db ( catchment , session , method = 'create' , autocommit = False ) : if not catchment . id : raise ValueError ( "Catchment/station number (`catchment.id`) must be set." ) if method == 'create' : session . add ( catchment ) elif method == 'update' : session . merge ( catchment ) else : raise ValueError ( "Method...
Load catchment object into the database .
2,464
def userdata_to_db ( session , method = 'update' , autocommit = False ) : try : folder = config [ 'import' ] [ 'folder' ] except KeyError : return if folder : folder_to_db ( folder , session , method = method , autocommit = autocommit )
Add catchments from a user folder to the database .
2,465
def send_text ( hwnd , txt ) : try : for c in txt : if c == '\n' : win32api . SendMessage ( hwnd , win32con . WM_KEYDOWN , win32con . VK_RETURN , 0 ) win32api . SendMessage ( hwnd , win32con . WM_KEYUP , win32con . VK_RETURN , 0 ) else : win32api . SendMessage ( hwnd , win32con . WM_CHAR , ord ( c ) , 0 ) except Except...
sends the text txt to the window handle hwnd using SendMessage
2,466
def launch_app ( app_path , params = [ ] , time_before_kill_app = 15 ) : import subprocess try : res = subprocess . call ( [ app_path , params ] , timeout = time_before_kill_app , shell = True ) print ( 'res = ' , res ) if res == 0 : return True else : return False except Exception as ex : print ( 'error launching app ...
start an app
2,467
def app_activate ( caption ) : try : shell = win32com . client . Dispatch ( "WScript.Shell" ) shell . AppActivate ( caption ) except Exception as ex : print ( 'error calling win32com.client.Dispatch (AppActivate)' )
use shell to bring the application with caption to front
2,468
def most_similar_catchments ( self , subject_catchment , similarity_dist_function , records_limit = 500 , include_subject_catchment = 'auto' ) : if include_subject_catchment not in [ 'auto' , 'force' , 'exclude' ] : raise ValueError ( "Parameter `include_subject_catchment={}` invalid." . format ( include_subject_catchm...
Return a list of catchments sorted by hydrological similarity defined by similarity_distance_function
2,469
def readSAM ( SAMfile , header = False ) : if header == True : f = open ( SAMfile , "r+" ) head = [ ] for line in f . readlines ( ) : if line [ 0 ] == "@" : head . append ( line ) else : continue f . close ( ) sam = pd . read_table ( SAMfile , sep = "this_gives_one_column" , comment = "@" , header = None ) sam = pd . D...
Reads and parses a sam file .
2,470
def SAMflags ( x ) : flags = [ ] if x & 1 : l = "1: Read paired" else : l = "0: Read unpaired" flags . append ( l ) if x & 2 : l = "1: Read mapped in proper pair" else : l = "0: Read not mapped in proper pair" flags . append ( l ) if x & 4 : l = "1: Read unmapped" else : l = "0: Read mapped" flags . append ( l ) if x &...
Explains a SAM flag .
2,471
def get_bias_details ( self ) : res = 'Bias File Details\n' for b in self . bias_details : if len ( b ) > 2 : res += b [ 0 ] . ljust ( 35 ) res += b [ 1 ] . ljust ( 35 ) res += b [ 2 ] . ljust ( 9 ) res += '\n' return res
returns a string representation of the bias details
2,472
def _read_bias_rating ( self , short_filename ) : res = { } full_name = os . path . join ( root_fldr , 'aikif' , 'data' , 'ref' , short_filename ) lg . record_process ( 'bias.py' , 'reading ' + full_name ) with open ( full_name , 'r' ) as f : for line in f : if line . strip ( '' ) == '' : break bias_line = [ ] cols = l...
read the bias file based on the short_filename and return as a dictionary
2,473
def get_root_folder ( ) : locations = { 'linux' : { 'hme' : '/home/duncan/' , 'core_folder' : '/home/duncan/dev/src/python/AIKIF' } , 'win32' : { 'hme' : 'T:\\user\\' , 'core_folder' : 'T:\\user\\dev\\src\\python\\AIKIF' } , 'cygwin' : { 'hme' : os . getcwd ( ) + os . sep , 'core_folder' : os . getcwd ( ) } , 'darwin' ...
returns the home folder and program root depending on OS
2,474
def read_credentials ( fname ) : with open ( fname , 'r' ) as f : username = f . readline ( ) . strip ( '\n' ) password = f . readline ( ) . strip ( '\n' ) return username , password
read a simple text file from a private location to get username and password
2,475
def show_config ( ) : res = '' res += '\n---------- Folder Locations ---------\n' for k , v in fldrs . items ( ) : res += str ( k ) + ' = ' + str ( v ) + '\n' res += '\n---------- Logfiles ---------\n' for k , v in logs . items ( ) : res += str ( k ) + ' = ' + str ( v ) + '\n' res += '\n---------- Parameters ---------\...
module intended to be imported in most AIKIF utils to manage folder paths user settings etc . Modify the parameters at the top of this file to suit
2,476
def filterMotifs ( memeFile , outFile , minSites ) : with open ( memeFile , "r" ) as mF : oldMEME = mF . readlines ( ) newMEME = oldMEME [ : 7 ] i = 7 while i < len ( oldMEME ) : if oldMEME [ i ] . split ( " " ) [ 0 ] == "MOTIF" : print ( oldMEME [ i ] . split ( "\n" ) [ 0 ] , int ( oldMEME [ i + 2 ] . split ( "nsites=...
Selectes motifs from a meme file based on the number of sites .
2,477
def _read_file ( self ) : self . raw = [ ] with open ( self . fname , 'r' ) as f : for line in f : if line . startswith ( '#' ) : pass elif line . strip ( '\n' ) == '' : pass else : self . raw . append ( line . strip ( '\n' ) )
reads the file and cleans into standard text ready for parsing
2,478
def reset ( self ) : try : os . remove ( self . _user_config_file ) except FileNotFoundError : pass for section_name in self . sections ( ) : self . remove_section ( section_name ) self . read_defaults ( )
Restore the default configuration and remove the user s config file .
2,479
def save ( self ) : with open ( self . _user_config_file , 'w' , encoding = 'utf-8' ) as f : self . write ( f )
Write data to user config file .
2,480
def _magic_data ( filename = os . path . join ( here , 'magic_data.json' ) ) : with open ( filename ) as f : data = json . load ( f ) headers = [ _create_puremagic ( x ) for x in data [ 'headers' ] ] footers = [ _create_puremagic ( x ) for x in data [ 'footers' ] ] return headers , footers
Read the magic file
2,481
def _max_lengths ( ) : max_header_length = max ( [ len ( x . byte_match ) + x . offset for x in magic_header_array ] ) max_footer_length = max ( [ len ( x . byte_match ) + abs ( x . offset ) for x in magic_footer_array ] ) return max_header_length , max_footer_length
The length of the largest magic string + its offset
2,482
def _confidence ( matches , ext = None ) : results = [ ] for match in matches : con = ( 0.8 if len ( match . extension ) > 9 else float ( "0.{0}" . format ( len ( match . extension ) ) ) ) if ext == match . extension : con = 0.9 results . append ( PureMagicWithConfidence ( confidence = con , ** match . _asdict ( ) ) ) ...
Rough confidence based on string length and file extension
2,483
def _identify_all ( header , footer , ext = None ) : matches = list ( ) for magic_row in magic_header_array : start = magic_row . offset end = magic_row . offset + len ( magic_row . byte_match ) if end > len ( header ) : continue if header [ start : end ] == magic_row . byte_match : matches . append ( magic_row ) for m...
Attempt to identify data by its magic numbers
2,484
def _magic ( header , footer , mime , ext = None ) : if not header : raise ValueError ( "Input was empty" ) info = _identify_all ( header , footer , ext ) [ 0 ] if mime : return info . mime_type return info . extension if not isinstance ( info . extension , list ) else info [ 0 ] . extension
Discover what type of file it is based on the incoming string
2,485
def _file_details ( filename ) : max_head , max_foot = _max_lengths ( ) with open ( filename , "rb" ) as fin : head = fin . read ( max_head ) try : fin . seek ( - max_foot , os . SEEK_END ) except IOError : fin . seek ( 0 ) foot = fin . read ( ) return head , foot
Grab the start and end of the file
2,486
def ext_from_filename ( filename ) : try : base , ext = filename . lower ( ) . rsplit ( "." , 1 ) except ValueError : return '' ext = ".{0}" . format ( ext ) all_exts = [ x . extension for x in chain ( magic_header_array , magic_footer_array ) ] if base [ - 4 : ] . startswith ( "." ) : long_ext = base [ - 4 : ] + ext i...
Scan a filename for it s extension .
2,487
def from_file ( filename , mime = False ) : head , foot = _file_details ( filename ) return _magic ( head , foot , mime , ext_from_filename ( filename ) )
Opens file attempts to identify content based off magic number and will return the file extension . If mime is True it will return the mime type instead .
2,488
def from_string ( string , mime = False , filename = None ) : head , foot = _string_details ( string ) ext = ext_from_filename ( filename ) if filename else None return _magic ( head , foot , mime , ext )
Reads in string attempts to identify content based off magic number and will return the file extension . If mime is True it will return the mime type instead . If filename is provided it will be used in the computation .
2,489
def retrieve_GTF_field ( field , gtf ) : inGTF = gtf . copy ( ) def splits ( x ) : l = x . split ( ";" ) l = [ s . split ( " " ) for s in l ] res = np . nan for s in l : if field in s : if '"' in s [ - 1 ] : res = s [ - 1 ] [ 1 : - 1 ] else : res = s [ - 1 ] return res inGTF [ field ] = inGTF [ 'attribute' ] . apply ( ...
Returns a field of choice from the attribute column of the GTF
2,490
def attributesGTF ( inGTF ) : df = pd . DataFrame ( inGTF [ 'attribute' ] . str . split ( ";" ) . tolist ( ) ) desc = [ ] for i in df . columns . tolist ( ) : val = df [ [ i ] ] . dropna ( ) val = pd . DataFrame ( val [ i ] . str . split ( ' "' ) . tolist ( ) ) [ 0 ] val = list ( set ( val ) ) for v in val : if len ( v...
List the type of attributes in a the attribute section of a GTF file
2,491
def parseGTF ( inGTF ) : desc = attributesGTF ( inGTF ) ref = inGTF . copy ( ) ref . reset_index ( inplace = True , drop = True ) df = ref . drop ( [ 'attribute' ] , axis = 1 ) . copy ( ) for d in desc : field = retrieve_GTF_field ( d , ref ) df = pd . concat ( [ df , field ] , axis = 1 ) return df
Reads an extracts all attributes in the attributes section of a GTF and constructs a new dataframe wiht one collumn per attribute instead of the attributes column
2,492
def writeGTF ( inGTF , file_path ) : cols = inGTF . columns . tolist ( ) if len ( cols ) == 9 : if 'attribute' in cols : df = inGTF else : df = inGTF [ cols [ : 8 ] ] df [ 'attribute' ] = "" for c in cols [ 8 : ] : if c == cols [ len ( cols ) - 1 ] : df [ 'attribute' ] = df [ 'attribute' ] + c + ' "' + inGTF [ c ] . as...
Write a GTF dataframe into a file
2,493
def GTFtoBED ( inGTF , name ) : bed = inGTF . copy ( ) bed . reset_index ( inplace = True , drop = True ) if name not in bed . columns . tolist ( ) : field = retrieve_GTF_field ( name , bed ) bed = pd . concat ( [ bed , field ] , axis = 1 ) bed = bed [ [ 'seqname' , 'start' , 'end' , name , 'score' , 'strand' ] ] bed ....
Transform a GTF dataframe into a bed dataframe
2,494
def MAPGenoToTrans ( parsedGTF , feature ) : GenTransMap = parsedGTF [ parsedGTF [ "feature" ] == feature ] def getExonsPositions ( df ) : start = int ( df [ "start" ] ) stop = int ( df [ "end" ] ) strand = df [ "strand" ] r = range ( start , stop + 1 ) if strand == "-" : r . sort ( reverse = True ) r = [ str ( s ) for...
Gets all positions of all bases in an exon
2,495
def GetTransPosition ( df , field , dic , refCol = "transcript_id" ) : try : gen = str ( int ( df [ field ] ) ) transid = df [ refCol ] bases = dic . get ( transid ) . split ( "," ) bases = bases . index ( str ( gen ) ) + 1 except : bases = np . nan return bases
Maps a genome position to transcript positon
2,496
def get_protected_page ( url , user , pwd , filename ) : import requests r = requests . get ( url , auth = ( user , pwd ) ) print ( r . status_code ) if r . status_code == 200 : print ( 'success' ) with open ( filename , 'wb' ) as fd : for chunk in r . iter_content ( 4096 ) : fd . write ( chunk ) lg . record_result ( "...
having problems with urllib on a specific site so trying requests
2,497
def read_rawFilesTable ( filename ) : exp = pd . read_table ( filename ) expected_columns = { 'File' , 'Exists' , 'Size' , 'Data format' , 'Parameter group' , 'Experiment' , 'Fraction' } found_columns = set ( exp . columns ) if len ( expected_columns - found_columns ) > 0 : message = '\n' . join ( [ 'The raw files tabl...
parse the rawFilesTable . txt file into a pandas dataframe
2,498
def add_method ( self , m , ** kwargs ) : if isinstance ( m , types . FunctionType ) : self [ 'function' , id ( m ) ] = m else : f , obj = get_method_vars ( m ) wrkey = ( f , id ( obj ) ) self [ wrkey ] = obj
Add an instance method or function
2,499
def del_method ( self , m ) : if isinstance ( m , types . FunctionType ) and not iscoroutinefunction ( m ) : wrkey = ( 'function' , id ( m ) ) else : f , obj = get_method_vars ( m ) wrkey = ( f , id ( obj ) ) if wrkey in self : del self [ wrkey ]
Remove an instance method or function if it exists