idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
16,000
def add ( self , command_template , job_class ) : job = JobTemplate ( command_template . alias , command_template = command_template , depends_on = command_template . depends_on , queue = self . queue , job_class = job_class ) self . queue . push ( job )
Given a command template add it as a job to the queue .
67
13
16,001
def run ( self ) : iterations = 0 queue = self . queue . tick ( ) while True : try : next ( queue ) except StopIteration : break iterations += 1 sleep ( self . sleep_time ) return iterations
Begins the runtime execution .
46
6
16,002
def make_dynamic_class ( typename , field_names ) : if isinstance ( field_names , basestring ) : field_names = field_names . replace ( "," , " " ) . split ( ) field_names = map ( str , field_names ) safe_fields_names = map ( _encode_property_name , field_names ) attr = dict ( ( safe_name , _property ( name ) ) for name , safe_name in zip ( field_names , safe_fields_names ) ) attr [ '__doc__' ] = typename attr [ '__identifier__' ] = "dolphin" attr [ '__init__' ] = _dynamic__init attr [ '__getitem__' ] = lambda self , key : self . __dict__ . get ( key ) attr [ '__setitem__' ] = _dynamic__setitem attr [ '__iter__' ] = lambda self : iter ( self . __dict__ ) attr [ '__repr__' ] = lambda self : "{%s}" % ( ', ' . join ( [ "%s=%r" % ( key , self [ key ] ) for key in sorted ( self . __dict__ . keys ( ) ) ] ) ) return type ( typename , ( object , ) , attr )
a factory function to create type dynamically
296
7
16,003
def get_memory_usage ( ) : process = psutil . Process ( os . getpid ( ) ) mem = process . memory_info ( ) . rss return mem / ( 1024 * 1024 )
Gets RAM memory usage
43
5
16,004
def create_db ( file_pth ) : conn = sqlite3 . connect ( file_pth ) c = conn . cursor ( ) c . execute ( 'DROP TABLE IF EXISTS library_spectra_source' ) c . execute ( '''CREATE TABLE library_spectra_source ( id integer PRIMARY KEY, name text NOT NULL, created_at date, parsing_software text )''' ) c . execute ( 'DROP TABLE IF EXISTS metab_compound' ) c . execute ( '''CREATE TABLE metab_compound ( inchikey_id text PRIMARY KEY, name text, pubchem_id text, chemspider_id text, other_names text, exact_mass real, molecular_formula text, molecular_weight real, compound_class text, smiles text, created_at date, updated_at date )''' ) c . execute ( 'DROP TABLE IF EXISTS library_spectra_meta' ) c . execute ( '''CREATE TABLE library_spectra_meta ( id integer PRIMARY KEY, name text, collision_energy text, ms_level real, accession text NOT NULL, resolution text, polarity integer, fragmentation_type text, precursor_mz real, precursor_type text, instrument_type text, instrument text, copyright text, column text, mass_accuracy real, mass_error real, origin text, splash text, retention_index real, retention_time real, library_spectra_source_id integer NOT NULL, inchikey_id text NOT NULL, FOREIGN KEY(library_spectra_source_id) REFERENCES library_spectra_source(id), FOREIGN KEY(inchikey_id) REFERENCES metab_compound(inchikey_id) )''' ) c . execute ( 'DROP TABLE IF EXISTS library_spectra' ) c . execute ( '''CREATE TABLE library_spectra ( id integer PRIMARY KEY, mz real NOT NULL, i real NOT NULL, other text, library_spectra_meta_id integer NOT NULL, FOREIGN KEY (library_spectra_meta_id) REFERENCES library_spectra_meta(id) )''' ) c . execute ( 'DROP TABLE IF EXISTS library_spectra_annotation' ) c . execute ( '''CREATE TABLE library_spectra_annotation ( id integer PRIMARY KEY, mz real, tentative_formula text, mass_error real, library_spectra_meta_id integer NOT NULL, FOREIGN KEY (library_spectra_meta_id) REFERENCES library_spectra_meta(id) )''' )
Create an empty SQLite database for library spectra .
597
11
16,005
def get_connection ( db_type , db_pth , user = None , password = None , name = None ) : if db_type == 'sqlite' : print ( db_pth ) conn = sqlite3 . connect ( db_pth ) elif db_type == 'mysql' : import mysql . connector conn = mysql . connector . connect ( user = user , password = password , database = name ) elif db_type == 'django_mysql' : from django . db import connection as conn else : print ( 'unsupported database type: {}, choices are "sqlite", "mysql" or "django_mysql"' . format ( db_type ) ) return conn
Get a connection to a SQL database . Can be used for SQLite MySQL or Django MySQL database
155
19
16,006
def db_dict ( c ) : db_d = { } c . execute ( 'SELECT * FROM library_spectra' ) db_d [ 'library_spectra' ] = [ list ( row ) for row in c ] c . execute ( 'SELECT * FROM library_spectra_meta' ) db_d [ 'library_spectra_meta' ] = [ list ( row ) for row in c ] c . execute ( 'SELECT * FROM library_spectra_annotation' ) db_d [ 'library_spectra_annotations' ] = [ list ( row ) for row in c ] c . execute ( 'SELECT * FROM library_spectra_source' ) db_d [ 'library_spectra_source' ] = [ list ( row ) for row in c ] c . execute ( 'SELECT * FROM metab_compound' ) db_d [ 'metab_compound' ] = [ list ( row ) for row in c ] return db_d
Get a dictionary of the library spectra from a database
213
11
16,007
def insert_query_m ( data , table , conn , columns = None , db_type = 'mysql' ) : # if length of data is very large we need to break into chunks the insert_query_m is then used recursively untill # all data has been inserted if len ( data ) > 10000 : _chunk_query ( data , 10000 , columns , conn , table , db_type ) else : # sqlite and mysql have type string (? or %s) reference to use if db_type == 'sqlite' : type_sign = '?' else : type_sign = '%s' # create a string of types for the insertion string (e.g. ?,?,? if inserting 3 columns of data) type_com = type_sign + ", " type = type_com * ( len ( data [ 0 ] ) - 1 ) type = type + type_sign # if using specific columns to insert data if columns : stmt = "INSERT INTO " + table + "( " + columns + ") VALUES (" + type + ")" else : stmt = "INSERT INTO " + table + " VALUES (" + type + ")" # execute query cursor = conn . cursor ( ) cursor . executemany ( stmt , data ) conn . commit ( )
Insert python list of tuples into SQL table
279
9
16,008
def _chunk_query ( l , n , cn , conn , table , db_type ) : # For item i in a range that is a length of l, [ insert_query_m ( l [ i : i + n ] , table , conn , cn , db_type ) for i in range ( 0 , len ( l ) , n ) ]
Call for inserting SQL query in chunks based on n rows
79
11
16,009
async def send ( from_addr , to_addrs , subject = "Ellis" , msg = "" , * * kwargs ) : async with SMTP ( ) as client : msg = "Subject: {0}\n\n{1}" . format ( subject , msg ) if kwargs : # To append kwargs to the given message, we first # transform it into a more human friendly string: values = "\n" . join ( [ "{0}: {1}" . format ( k , v ) for k , v in kwargs . items ( ) ] ) # Actually append caught values to the message: msg = ( "{0}\n\nThe following variables have been caught:" "\n{1}" . format ( msg , values ) ) try : await client . sendmail ( from_addr , to_addrs , msg ) except : # FIXME: print a friendly message to stdout. raise
Sends an e - mail to the provided address .
198
11
16,010
def show_correlation_matrix ( self , correlation_matrix ) : cr_plot . create_correlation_matrix_plot ( correlation_matrix , self . title , self . headers_to_test ) pyplot . show ( )
Shows the given correlation matrix as image
54
8
16,011
def save_to_file ( self , out_file ) : correlation_matrix = self . get_correlation_matrix_from_columns ( ) cr_plot . create_correlation_matrix_plot ( correlation_matrix , self . title , self . headers_to_test ) fig = pyplot . gcf ( ) # get reference to figure fig . set_size_inches ( 23.4 , 23.4 ) pyplot . savefig ( out_file , dpi = 120 )
Saves correlation matrix of selected headers
111
7
16,012
def save_correlation_matrix_from_folder ( folder_path ) : file_name = "output-" + str ( int ( time . time ( ) ) ) output_folder = os . path . join ( folder_path , file_name ) os . makedirs ( output_folder ) # make necessary folders to create directory for file in list_content ( folder_path , False , False ) : if is_file ( file ) and str ( file ) . endswith ( "csv" ) : print ( "Analysing file " , str ( file ) ) file_name = Document ( file ) . name . strip ( ) output_file_name = file_name + ".png" # save output as image output_file_path = os . path . join ( output_folder , output_file_name ) headers , data = CSVParser . get_headers_data ( file ) # parse matrix = CorrelationMatrix ( "Correlation of logs data for file " + file_name , headers , headers , data ) matrix . save_to_file ( output_file_path )
Saves each file s correlation matrix of common headers
236
10
16,013
def run ( self , * args , * * kwargs ) : pm = MayaPluginManager . get ( ) guerilla = pm . get_plugin ( "GuerillaMGMT" ) mayawin = maya_main_window ( ) guerilla . run ( parent = mayawin )
Start the tool
66
3
16,014
def login_github ( token_path = None , token = None ) : token = codetools . github_token ( token_path = token_path , token = token ) g = Github ( token ) debug_ratelimit ( g ) return g
Log into GitHub using an existing token .
53
8
16,015
def find_tag_by_name ( repo , tag_name , safe = True ) : tagfmt = 'tags/{ref}' . format ( ref = tag_name ) try : ref = repo . get_git_ref ( tagfmt ) if ref and ref . ref : return ref except github . UnknownObjectException : if not safe : raise return None
Find tag by name in a github Repository
79
9
16,016
def debug_ratelimit ( g ) : assert isinstance ( g , github . MainClass . Github ) , type ( g ) debug ( "github ratelimit: {rl}" . format ( rl = g . rate_limiting ) )
Log debug of github ratelimit information from last API call
51
11
16,017
def get_default_ref ( repo ) : assert isinstance ( repo , github . Repository . Repository ) , type ( repo ) # XXX this probably should be resolved via repos.yaml default_branch = repo . default_branch default_branch_ref = "heads/{ref}" . format ( ref = default_branch ) # if accessing the default branch fails something is seriously wrong... try : head = repo . get_git_ref ( default_branch_ref ) except github . RateLimitExceededException : raise except github . GithubException as e : msg = "error getting ref: {ref}" . format ( ref = default_branch_ref ) raise CaughtRepositoryError ( repo , e , msg ) from None return head
Return a github . GitRef object for the HEAD of the default branch .
165
15
16,018
def main ( argv = None ) : t = CrfTokenizer ( ) print t . tokenize ( "This is a sentence." ) print t . tokenize ( "Buy???This...Now!!!" ) print t . tokenize ( "The <bold>only</bold> source." ) print t . tokenize ( "The<bold>only</bold>source." ) print t . tokenize ( "Big&gt;little." ) print t . tokenize ( "Big & little." ) print t . tokenize ( "blond&curly." ) print t . tokenize ( "&brokenHtml" ) t . setGroupPunctuation ( True ) t . setRecognizeHtmlTags ( True ) t . setRecognizeHtmlEntities ( True ) print t . tokenize ( "Buy???This...Now!!!" ) print t . tokenize ( "The <bold>only</bold> source." ) print t . tokenize ( "The<bold>only</bold>source." ) print t . tokenize ( "Big&gt;little." ) print t . tokenize ( "Big & little." ) print t . tokenize ( "blond&curly." ) print t . tokenize ( "&brokenHtml" ) t . setSkipHtmlTags ( True ) t . setSkipHtmlEntities ( True ) print t . tokenize ( "Buy???This...Now!!!" ) print t . tokenize ( "The <bold>only</bold> source." ) print t . tokenize ( "The<bold>only</bold>source." ) print t . tokenize ( "Big&gt;little." ) print t . tokenize ( "Big & little." ) print t . tokenize ( "blond&curly." ) print t . tokenize ( "&brokenHtml" ) t . setTokenPrefix ( "X:" ) print t . tokenize ( "Tokenize with prefixes." ) t . setTokenPrefix ( None ) print t . tokenize ( "No more prefixes." ) t . setRecognizePunctuation ( False ) print t . tokenize ( "This is a sentence." ) print t . tokenize ( "Buy???This...Now!!!" ) print t . tokenize ( "The <bold>only</bold> source." ) print t . tokenize ( "The<bold>only</bold>source." ) print t . tokenize ( "Big&gt;little." ) print t . tokenize ( "Big & little." ) print t . tokenize ( "blond&curly." ) print t . tokenize ( "&brokenHtml" ) print t . tokenize ( "A line break goes here\n\t \rand a new line starts" ) t . setRecognizeLinebreaks ( True ) print t . tokenize ( "A line break goes here\n\r \rand a new line starts" )
this is called if run from command line
633
8
16,019
def updateUnitLabels ( self , tscale , fscale ) : AbstractEditorWidget . updateScales ( tscale , fscale ) SmartDelegate . updateScales ( tscale , fscale ) # purges stored label references from deleted parent widgets AbstractEditorWidget . purgeDeletedWidgets ( ) self . tscale = tscale # updates labels for components # add the list of all time unit labels out there to our update # list here time_inputs = self . timeInputs + AbstractEditorWidget . tunit_fields # now go through our list of labels and fields and scale/update for field in time_inputs : field . setScale ( tscale ) self . fscale = fscale # add the list of all frequency unit labels out there to our update # list here frequency_inputs = self . frequencyInputs + AbstractEditorWidget . funit_fields # now go through our list of labels and fields and scale/update for field in frequency_inputs : field . setScale ( fscale )
When the GUI unit scale changes it is neccessary to update the unit labels on all fields throughout the GUI . This handles The main window and also notifys other windows to update
216
37
16,020
def reset_device_channels ( self ) : # clear boxes first self . ui . aochanBox . clear ( ) devname = self . advanced_options [ 'device_name' ] device_list = get_devices ( ) if devname in device_list : cnames = get_ao_chans ( devname ) self . ui . aochanBox . addItems ( cnames ) cnames = get_ai_chans ( devname ) # filter list for channels that are present in current device self . _aichans = [ chan for chan in self . _aichans if chan in cnames ] self . _aichan_details = { chan : deets for chan , deets in self . _aichan_details . items ( ) if chan in cnames } elif devname == '' and len ( device_list ) > 0 : devname = device_list [ 0 ] cnames = get_ao_chans ( devname ) self . ui . aochanBox . addItems ( cnames ) self . advanced_options [ 'device_name' ] = devname self . _aichans = [ ] self . _aichan_details = { } else : self . _aichans = [ ] self . _aichan_details = { } self . ui . chanNumLbl . setText ( str ( len ( self . _aichans ) ) ) # remove all plots and re-add from new list self . display . removeResponsePlot ( * self . display . responseNameList ( ) ) self . display . addResponsePlot ( * self . _aichans ) # update details on plots for name , deets in self . _aichan_details . items ( ) : self . display . setThreshold ( deets [ 'threshold' ] , name ) self . display . setRasterBounds ( deets [ 'raster_bounds' ] , name ) self . display . setAbs ( deets [ 'abs' ] , name ) # can't find a function in DAQmx that gets the trigger # channel names, so add manually self . ui . trigchanBox . addItems ( [ '/' + devname + '/PFI0' , '/' + devname + '/PFI1' ] )
Updates the input channel selection boxes based on the current device name stored in this object
508
17
16,021
def saveInputs ( self , fname ) : # save current inputs to file for loading next time if not fname : return appdir = systools . get_appdir ( ) if not os . path . isdir ( appdir ) : os . makedirs ( appdir ) fname = os . path . join ( appdir , fname ) savedict = { } savedict [ 'binsz' ] = self . ui . binszSpnbx . value ( ) savedict [ 'aifs' ] = self . ui . aifsSpnbx . value ( ) savedict [ 'tscale' ] = self . tscale savedict [ 'fscale' ] = self . fscale savedict [ 'saveformat' ] = self . saveformat savedict [ 'ex_nreps' ] = self . ui . exploreStimEditor . repCount ( ) savedict [ 'reprate' ] = self . ui . reprateSpnbx . value ( ) savedict [ 'windowsz' ] = self . ui . windowszSpnbx . value ( ) savedict [ 'specargs' ] = self . specArgs savedict [ 'viewSettings' ] = self . viewSettings savedict [ 'calvals' ] = self . calvals savedict [ 'calparams' ] = self . acqmodel . calibration_template ( ) savedict [ 'calreps' ] = self . ui . calibrationWidget . ui . nrepsSpnbx . value ( ) savedict [ 'mphonesens' ] = self . ui . mphoneSensSpnbx . value ( ) savedict [ 'mphonedb' ] = self . ui . mphoneDBSpnbx . value ( ) savedict [ 'vocalpaths' ] = Vocalization . paths savedict [ 'aichans' ] = self . _aichans savedict [ 'aichan_details' ] = self . _aichan_details # parameter settings -- save all tracks present savedict [ 'explorestims' ] = self . ui . exploreStimEditor . saveTemplate ( ) savedict [ 'advanced_options' ] = self . advanced_options savedict [ 'stim_view_defaults' ] = StimulusView . getDefaults ( ) savedict [ 'tuning_curve' ] = TCFactory . defaultInputs # filter out and non-native python types that are not json serializable savedict = convert2native ( savedict ) try : with open ( fname , 'w' ) as jf : json . dump ( savedict , jf ) except : logger = logging . getLogger ( 'main' ) logger . exception ( "Unable to save app data to file: {}" . format ( fname ) )
Save the values in the input fields so they can be loaded next time the GUI is run
613
18
16,022
def closeEvent ( self , event ) : self . acqmodel . stop_listening ( ) # close listener threads self . saveInputs ( self . inputsFilename ) # save GUI size settings = QtCore . QSettings ( "audiolab" ) settings . setValue ( "geometry" , self . saveGeometry ( ) ) settings . setValue ( "windowState" , self . saveState ( ) ) logger = logging . getLogger ( 'main' ) logger . info ( 'All user settings saved' ) self . garbage_timer . stop ( ) gc . enable ( )
Closes listening threads and saves GUI data for later use .
127
12
16,023
def ordered_async_call ( func_list ) : def worker ( function , f_args , f_kwargs , queue , index ) : """ Runs the function and appends the output to list, and the Exception in the case of error """ response = { 'index' : index , # For tracking the index of each function in actual list. # Since, this function is called asynchronously, order in # queue may differ 'data' : None , 'error' : None } # Handle error in the function call try : response [ 'data' ] = function ( * f_args , * * f_kwargs ) except Exception as e : response [ 'error' ] = e # send back the exception along with the queue queue . put ( response ) queue = Queue ( ) # For preserving state across threads processes = [ Process ( target = worker , args = ( func , args , kwargs , queue , i ) ) for i , ( func , args , kwargs ) in enumerate ( func_list ) ] for process in processes : process . start ( ) response_list = [ ] for process in processes : # Wait for process to finish process . join ( ) # Get back the response from the queue response = queue . get ( ) if response [ 'error' ] : raise response [ 'error' ] # Raise exception if the function call failed response_list . append ( response ) return [ content [ 'data' ] for content in sorted ( response_list , key = lambda x : x [ 'index' ] ) ]
Runs the list of function asynchronously returns the response maintaining the order
327
15
16,024
def add_params_to_url ( url , params ) : url_parts = list ( urlparse . urlparse ( url ) ) # get url parts query = dict ( urlparse . parse_qsl ( url_parts [ 4 ] ) ) # get url query query . update ( params ) # add new params url_parts [ 4 ] = urlencode ( query ) return urlparse . urlunparse ( url_parts )
Adds params to url
92
4
16,025
def is_internet_on ( host = "8.8.8.8" , port = 53 , timeout = 3 ) : socket . setdefaulttimeout ( timeout ) socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) . connect ( ( host , port ) )
Checks if machine has internet connection
64
7
16,026
def wait_until_internet ( time_between_attempts = 3 , max_attempts = 10 ) : counter = 0 while not is_internet_on ( ) : time . sleep ( time_between_attempts ) # wait until internet is on counter += 1 if counter > max_attempts : return False return True
Waits until machine has internet
73
6
16,027
def transform_key ( startkey , seed_key , seed_rand , rounds ) : masterkey = startkey aes = AES . new ( seed_key , AES . MODE_ECB ) # Encrypt the created hash <rounds> times for _i in range ( rounds ) : masterkey = aes . encrypt ( masterkey ) # Finally, hash it again... masterkey = hashlib . sha256 ( masterkey ) . digest ( ) # ...and hash the result together with the randomseed return hashlib . sha256 ( seed_rand + masterkey ) . digest ( )
This method creates the key to decrypt the database .
128
10
16,028
def complete ( command_line , current_token , position , shell : arg ( choices = ( 'bash' , 'fish' ) ) ) : position = int ( position ) tokens = shlex . split ( command_line [ : position ] ) all_argv , run_argv , command_argv = run . partition_argv ( tokens [ 1 : ] ) run_args = run . parse_args ( run_argv ) module = run_args . get ( 'commands_module' ) module = module or DEFAULT_COMMANDS_MODULE module = normalize_path ( module ) try : collection = Collection . load_from_module ( module ) except Exception : collection = { } found_command = find_command ( collection , tokens ) or run if current_token : # Completing either a command name, option name, or path. if current_token . startswith ( '-' ) : if current_token not in found_command . option_map : print_command_options ( found_command , current_token ) else : print_commands ( collection , shell ) path = os . path . expanduser ( current_token ) path = os . path . expandvars ( path ) paths = glob . glob ( '%s*' % path ) if paths : for entry in paths : if os . path . isdir ( entry ) : print ( '%s/' % entry ) else : print ( entry ) else : # Completing option value. If a value isn't expected, show the # options for the current command and the list of commands # instead. option = found_command . option_map . get ( tokens [ - 1 ] ) if option and option . takes_value : if option . choices : for choice in option . choices : print ( choice ) else : for entry in os . listdir ( ) : if os . path . isdir ( entry ) : print ( '%s/' % entry ) else : print ( entry ) else : print_command_options ( found_command ) print_commands ( collection , shell )
Find completions for current command .
446
7
16,029
def install_package ( self , name , index = None , force = False , update = False ) : cmd = 'install' if force : cmd = '{0} {1}' . format ( cmd , '--force-reinstall' ) if update : cmd = '{0} {1}' . format ( cmd , '--update' ) if index : cmd = '{0} {1}' . format ( cmd , '--index-url {0}' . format ( index ) ) self . pip ( '{0} {1}' . format ( cmd , name ) )
Install a given package .
129
5
16,030
def install_requirements ( self , path , index = None ) : cmd = 'install -r {0}' . format ( path ) if index : cmd = 'install --index-url {0} -r {1}' . format ( index , path ) self . pip ( cmd )
Install packages from a requirements . txt file .
63
10
16,031
def get_next ( weekday , including_today = False ) : now = datetime . datetime . now ( ) if now . weekday ( ) == weekday . value and including_today : delta = datetime . timedelta ( days = 0 ) elif now . weekday ( ) == weekday . value and not including_today : delta = datetime . timedelta ( days = 7 ) else : delta = datetime . timedelta ( ( 7 + weekday . value - now . weekday ( ) ) % 7 ) # times delta to next instance return Day ( now + delta ) . get_just_date ( )
Gets next day of week
128
6
16,032
def get_just_date ( self ) : return datetime . datetime ( self . date_time . year , self . date_time . month , self . date_time . day )
Parses just date from date - time
41
9
16,033
def is_date_in_between ( self , start , end , include_start = True , include_end = True ) : start = Day ( start ) . get_just_date ( ) now = self . get_just_date ( ) end = Day ( end ) . get_just_date ( ) if start < now < end : return True if include_start and now == start : return True if include_end and now == end : return True return False
Checks if date is in between dates
100
8
16,034
def get_next_weekday ( self , including_today = False ) : weekday = self . date_time . weekday ( ) return Weekday . get_next ( weekday , including_today = including_today )
Gets next week day
46
5
16,035
def get_last_weekday ( self , including_today = False ) : weekday = self . date_time . weekday ( ) return Weekday . get_last ( weekday , including_today = including_today )
Gets last week day
46
5
16,036
def cli ( context , verbose , quiet , database , sense ) : logger = logging . getLogger ( ) handler = logging . StreamHandler ( sys . stderr ) handler . setFormatter ( LevelFormatter ( ) ) logger . addHandler ( handler ) logger . setLevel ( logging . WARNING + ( quiet - verbose ) * 10 ) logging . debug ( _ ( 'Subcommand: %s' ) , context . invoked_subcommand ) context . obj [ 'database' ] = Database ( database ) try : context . obj [ 'sense' ] = SenseWithExport ( sense ) . __enter__ ( ) except Exception : pass
Position Independent Programming For Humans .
137
6
16,037
def search ( context , keywords , module , raw , kind ) : logging . info ( _ ( 'Entering search mode' ) ) sense = context . obj [ 'sense' ] func = sense . query_names if module else sense . query_info none = True for keyword in keywords : output = func ( keyword , raw , kind ) if output : none = False print ( output ) else : logging . warning ( _ ( 'No results: %s' ) , keyword ) sys . exit ( 1 if none else 0 )
Query Windows identifiers and locations .
110
6
16,038
def winapi ( context , names ) : logging . info ( _ ( 'Entering winapi mode' ) ) sense = context . obj [ 'sense' ] none = True for name in names : code = sense . query_args ( name ) if code : none = False print ( stylify_code ( code ) ) else : logging . warning ( _ ( 'Function not found: %s' ) , name ) sys . exit ( 1 if none else 0 )
Query Win32 API declarations .
98
6
16,039
def kinds ( context , show_all , ids_or_names ) : logging . info ( _ ( 'Entering kind mode' ) ) logging . debug ( 'args: %s' , ids_or_names ) sense = context . obj [ 'sense' ] none = True if show_all : none = False print ( sense . query_kinds ( None ) ) else : for id_or_name in ids_or_names : id_name = sense . query_kinds ( id_or_name ) if id_name : none = False print ( id_name ) sys . exit ( 1 if none else 0 )
Operate on IntelliSense kind ids and names .
139
12
16,040
def export ( context , keywords , module , update ) : logging . info ( _ ( 'Export Mode' ) ) database = context . obj [ 'sense' ] none = True if update : exports = OrderedDict ( ) from . executables . pe import PE for filename in keywords : module = split_ext ( filename , basename = True ) [ 0 ] with open ( filename , 'rb' ) as stream : exports . update ( { module : PE ( stream ) . get_export_table ( ) } ) database . make_export ( exports ) none = False elif module : for module_name in keywords : funcs = database . query_module_funcs ( module_name ) if funcs : none = False print ( ', ' . join ( map ( str , funcs ) ) ) else : logging . warning ( _ ( 'No function for module: %s' ) , module_name ) else : for func_name in keywords : module_name = database . query_func_module ( func_name ) if module_name : none = False print ( repr ( module_name ) ) else : logging . warning ( _ ( 'No module for function: %s' ) , func_name ) sys . exit ( 1 if none else 0 )
Operate on libraries and exported functions .
269
8
16,041
def add ( context , filenames ) : logging . info ( _ ( 'Current Mode: Add Linux data' ) ) context . obj [ 'database' ] . add_data ( filenames ) sys . exit ( 0 )
Add data on Linux system calls .
49
7
16,042
def make ( filenames , x64 , cl_args , link_args , output ) : from . msbuild import Builder builder = Builder ( ) builder . build ( list ( filenames ) , x64 = x64 , cl_args = cl_args , link_args = link_args , out_dir = output )
Make binaries from sources .
71
5
16,043
def info ( context , keywords , x86 , x64 , x32 , common ) : logging . info ( _ ( 'Current Mode: Find in Linux' ) ) database = context . obj [ 'database' ] for one in keywords : abis = [ 'i386' , 'x64' , 'common' , 'x32' ] if x86 : abis = [ 'i386' ] if x64 : abis = [ 'x64' , 'common' ] if x32 : abis = [ 'x32' , 'common' ] if common : abis = [ 'common' ] items = database . query_item ( one , abis ) if not items : logging . warning ( _ ( 'Item not found: %s %s' ) , one , abis ) continue for item in items : print ( item . name , item . abi , item . number ) decl = database . query_decl ( name = item . name ) if not decl : logging . warning ( _ ( 'Decl not found: %s' ) , item . name ) continue for one in decl : print ( one . decl ( ) , '/* {} */' . format ( one . filename ) ) sys . exit ( 0 )
Find in the Linux system calls .
263
7
16,044
def conv ( arg , source , target , filename , section ) : logging . info ( _ ( 'This is Binary Conversion mode.' ) ) section = section . encode ( 'utf-8' ) if source == 'sec' : arg = open ( arg , 'rb' ) if source == 'sec' : kwargs = dict ( section_name = section ) else : kwargs = dict ( ) result = Converter . uni_from ( source , arg , * * kwargs ) . uni_to ( target ) if result : if filename : logging . info ( _ ( 'Writing shellcode to the file: %s' ) , filename ) mode = 'wb' if target == 'bin' else 'w' with open ( filename , mode ) as output : output . write ( result ) else : print ( result ) else : logging . error ( _ ( 'Failed.' ) ) if source == 'sec' : arg . close ( ) return 0
Convert binary .
206
4
16,045
def get_source ( label , source_type , * * kwargs ) : if source_type not in yapconf . ALL_SUPPORTED_SOURCES : raise YapconfSourceError ( 'Invalid source type %s. Supported types are %s.' % ( source_type , yapconf . ALL_SUPPORTED_SOURCES ) ) if source_type not in yapconf . SUPPORTED_SOURCES : raise YapconfSourceError ( 'Unsupported source type "%s". If you want to use this type, you ' 'will need to install the correct client for it (try `pip install ' 'yapconf[%s]. Currently supported types are %s. All supported ' 'types are %s' % ( source_type , source_type , yapconf . SUPPORTED_SOURCES , yapconf . ALL_SUPPORTED_SOURCES ) ) # We pop arguments from kwargs because the individual config sources # have better error messages if a keyword argument is missed. if source_type == 'dict' : return DictConfigSource ( label , data = kwargs . get ( 'data' ) ) elif source_type == 'json' : return JsonConfigSource ( label , * * kwargs ) elif source_type == 'yaml' : filename = kwargs . get ( 'filename' ) if 'filename' in kwargs : kwargs . pop ( 'filename' ) return YamlConfigSource ( label , filename , * * kwargs ) elif source_type == 'environment' : return EnvironmentConfigSource ( label ) elif source_type == 'etcd' : return EtcdConfigSource ( label , kwargs . get ( 'client' ) , kwargs . get ( 'key' , '/' ) ) elif source_type == 'kubernetes' : name = kwargs . get ( 'name' ) if 'name' in kwargs : kwargs . pop ( 'name' ) client = kwargs . get ( 'client' ) if 'client' in kwargs : kwargs . pop ( 'client' ) return KubernetesConfigSource ( label , client , name , * * kwargs ) else : raise NotImplementedError ( 'No implementation for source type %s' % source_type )
Get a config source based on type and keyword args .
519
11
16,046
def make_simple_merged_vcf_with_no_combinations ( self , ref_seq ) : if len ( self ) <= 1 : return merged_vcf_record = self . vcf_records [ 0 ] for i in range ( 1 , len ( self . vcf_records ) , 1 ) : if self . vcf_records [ i ] . intersects ( merged_vcf_record ) : return else : merged_vcf_record = merged_vcf_record . merge ( self . vcf_records [ i ] , ref_seq ) self . vcf_records = [ merged_vcf_record ]
Does a simple merging of all variants in this cluster . Assumes one ALT in each variant . Uses the ALT for each variant making one new vcf_record that has all the variants put together
145
41
16,047
def make_simple_gt_aware_merged_vcf_with_no_combinations ( self , ref_seq ) : if len ( self ) <= 1 : return merged_vcf_record = self . vcf_records [ 0 ] for i in range ( 1 , len ( self . vcf_records ) , 1 ) : if self . vcf_records [ i ] . intersects ( merged_vcf_record ) : return else : merged_vcf_record = merged_vcf_record . gt_aware_merge ( self . vcf_records [ i ] , ref_seq ) self . vcf_records = [ merged_vcf_record ]
Does a simple merging of all variants in this cluster . Assumes one ALT in each variant . Uses the called allele for each variant making one new vcf_record that has all the variants put together
155
41
16,048
def make_separate_indels_and_one_alt_with_all_snps_no_combinations ( self , ref_seq ) : final_start_position = min ( [ x . POS for x in self . vcf_records ] ) final_end_position = max ( [ x . ref_end_pos ( ) for x in self . vcf_records ] ) snps = [ ] new_vcf_records = [ ] for record in self . vcf_records : if record . is_snp ( ) : snps . append ( copy . copy ( record ) ) else : new_record = copy . copy ( record ) new_record . add_flanking_seqs ( ref_seq , final_start_position , final_end_position ) new_vcf_records . append ( new_record ) if len ( snps ) : new_record = copy . copy ( snps [ 0 ] ) for snp in snps [ 1 : ] : merged = new_record . merge ( snp , ref_seq ) if merged is not None : new_record = merged new_record . add_flanking_seqs ( ref_seq , final_start_position , final_end_position ) new_vcf_records . append ( new_record ) alts = ',' . join ( sorted ( list ( set ( [ x . ALT [ 0 ] for x in new_vcf_records ] ) ) ) ) new_record = vcf_record . VcfRecord ( '\t' . join ( [ self . vcf_records [ 0 ] . CHROM , str ( final_start_position + 1 ) , '.' , new_vcf_records [ 0 ] . REF , alts , '.' , 'PASS' , '.' ] ) ) return new_record
Returns a VCF record where each indel from this cluster is in a separate ALT . Then all the remaining SNPs are applied to make one ALT . If > 1 SNP in same place either one might be used
409
45
16,049
def _get_header ( self ) : out = self . _get_row ( self . labels ) out += "\n" out += self . _get_row ( [ "---" ] * len ( self . labels ) ) # line below headers return out
Gets header of table
55
5
16,050
def setModel ( self , model ) : self . paramList . setModel ( model ) model . hintRequested . connect ( self . hintRequested ) model . rowsInserted . connect ( self . updateTitle ) model . rowsRemoved . connect ( self . updateTitle ) self . updateTitle ( )
sets the model for the auto parameters
64
7
16,051
def updateTitle ( self ) : title = 'Auto Parameters ({})' . format ( self . paramList . model ( ) . rowCount ( ) ) self . titleChange . emit ( title ) self . setWindowTitle ( title )
Updates the Title of this widget according to how many parameters are currently in the model
49
17
16,052
def showEvent ( self , event ) : selected = self . paramList . selectedIndexes ( ) model = self . paramList . model ( ) self . visibilityChanged . emit ( 1 ) if len ( selected ) > 0 : # select the correct components in the StimulusView self . paramList . parameterChanged . emit ( model . selection ( selected [ 0 ] ) ) self . hintRequested . emit ( 'Select parameter to edit. \n\nParameter must have selected components in order to edit fields' ) elif model . rowCount ( ) > 0 : # just select first item self . paramList . selectRow ( 0 ) self . paramList . parameterChanged . emit ( model . selection ( model . index ( 0 , 0 ) ) ) self . hintRequested . emit ( 'Select parameter to edit. \n\nParameter must have selected components in order to edit fields' ) else : model . emptied . emit ( True ) self . hintRequested . emit ( 'To add a parameter, Drag "Add" onto empty auto-parameter table' )
When this widget is shown it has an effect of putting other widgets in the parent widget into different editing modes emits signal to notify other widgets . Restores the previous selection the last time this widget was visible
225
40
16,053
def closeEvent ( self , event ) : self . visibilityChanged . emit ( 0 ) model = self . paramList . model ( ) model . hintRequested . disconnect ( ) model . rowsInserted . disconnect ( ) model . rowsRemoved . disconnect ( )
Emits a signal to update start values on components
54
10
16,054
def authenticate_with_access_token ( access_token ) : credentials = Credentials ( access_token = access_token ) client = YamcsClient ( 'localhost:8090' , credentials = credentials ) for link in client . list_data_links ( 'simulator' ) : print ( link )
Authenticate using an existing access token .
67
8
16,055
def pretty_format_table ( labels , data , num_format = "{:.3f}" , line_separator = "\n" ) : table = SqlTable ( labels , data , num_format , line_separator ) return table . build ( )
Parses and creates pretty table
57
7
16,056
def _parse ( self ) : for i in range ( len ( self . data ) ) : self . _parse_row ( i )
Parses raw data
29
5
16,057
def _calculate_optimal_column_widths ( self ) : columns = len ( self . data [ 0 ] ) # number of columns str_labels = [ parse_colorama ( str ( l ) ) for l in self . labels ] # labels as strings str_data = [ [ parse_colorama ( str ( col ) ) for col in row ] for row in self . data ] # values as strings widths = [ 0 ] * columns # length of longest string in each column for row in str_data : # calculate max width in each column widths = [ max ( w , len ( c ) ) for w , c in zip ( widths , row ) ] # check if label name is longer than data for col , label in enumerate ( str_labels ) : if len ( label ) > widths [ col ] : widths [ col ] = len ( label ) self . widths = widths
Calculates widths of columns
199
7
16,058
def get_blank_row ( self , filler = "-" , splitter = "+" ) : return self . get_pretty_row ( [ "" for _ in self . widths ] , # blanks filler , # fill with this splitter , # split columns with this )
Gets blank row
59
4
16,059
def build ( self ) : self . _calculate_optimal_column_widths ( ) pretty_table = self . get_blank_row ( ) + self . new_line # first row pretty_table += self . pretty_format_row ( self . labels ) + self . new_line pretty_table += self . get_blank_row ( ) + self . new_line for row in self . data : # append each row pretty_table += self . pretty_format_row ( row ) + self . new_line pretty_table += self . get_blank_row ( ) # ending line return pretty_table
Builds pretty - formatted table
136
6
16,060
def from_df ( data_frame ) : labels = data_frame . keys ( ) . tolist ( ) data = data_frame . values . tolist ( ) return SqlTable ( labels , data , "{:.3f}" , "\n" )
Parses data and builds an instance of this class
56
11
16,061
def editheaders ( ) : # Read stdin - this will be the output from samtools view for line in fileinput . input ( ) : try : # Get the flag value from the input columns = line . split ( '\t' ) # The FLAG is in the second column flag = int ( columns [ 1 ] ) # Subtracts 256 from the flag if the & bitwise operator evaluates to true # See http://www.tutorialspoint.com/python/bitwise_operators_example.htm # For the test case, flags of 256 became 0, and flags of 272 became 16 columns [ 1 ] = str ( ( flag - 256 ) if ( flag & 256 ) else flag ) # update = [columns[0], str(flag), columns[2:]] sys . stdout . write ( '\t' . join ( columns ) ) # Don't fail on IOErrors, or ValueErrors, and still print line to stdout except ( IOError , ValueError ) : sys . stdout . write ( line ) pass # Try except statements to get rid of file closing errors try : sys . stdout . flush ( ) sys . stdout . close ( ) except : pass try : sys . stderr . close ( ) except : pass
Edits the headers of SAM files to remove secondary alignments
271
12
16,062
def ref_string_matches_ref_sequence ( self , ref_sequence ) : # you never know what you're gonna get... if self . POS < 0 : return False end_pos = self . ref_end_pos ( ) if end_pos >= len ( ref_sequence ) : return False return self . REF == ref_sequence [ self . POS : end_pos + 1 ]
Returns true iff the REF string in the record agrees with the given ref_sequence
85
18
16,063
def ref_string_matches_dict_of_ref_sequences ( self , ref_sequences ) : return self . CHROM in ref_sequences and self . ref_string_matches_ref_sequence ( ref_sequences [ self . CHROM ] )
Returns true iff there is a sequence called self . CHROM in the dict of ref_sequences and the REF string matches
60
27
16,064
def is_snp ( self ) : nucleotides = { 'A' , 'C' , 'G' , 'T' } return len ( self . REF ) == 1 and self . REF in nucleotides and set ( self . ALT ) . issubset ( nucleotides )
Returns true iff this variant is a SNP
66
9
16,065
def add_flanking_seqs ( self , ref_seq , new_start , new_end ) : if new_start > self . POS or new_end < self . ref_end_pos ( ) : raise Error ( 'new start and end positions must not try to shrink VCF record. new_start=' + str ( new_start ) + ', new_end=' + str ( new_end ) + '. VCF=' + str ( self ) ) new_start_nucleotides = ref_seq [ new_start : self . POS ] new_end_nucleotodes = ref_seq [ self . ref_end_pos ( ) + 1 : new_end + 1 ] self . POS = new_start self . REF = new_start_nucleotides + self . REF + new_end_nucleotodes self . ALT = [ new_start_nucleotides + x + new_end_nucleotodes for x in self . ALT ]
Adds new_start many nucleotides at the start and new_end many nucleotides at the end from the appropriate nucleotides in reference sequence ref_seq .
219
35
16,066
def remove_useless_start_nucleotides ( self ) : if len ( self . REF ) == 1 or len ( self . ALT ) != 1 : return i = 0 while i < len ( self . REF ) and i < len ( self . ALT [ 0 ] ) and self . REF [ i ] == self . ALT [ 0 ] [ i ] : i += 1 if i > 0 : self . REF = self . REF [ i - 1 : ] self . ALT = [ self . ALT [ 0 ] [ i - 1 : ] ] self . POS += i - 1
Removes duplicated nucleotides at the start of REF and ALT . But always leaves at least one nucleotide in each of REF and ALT . eg if variant is at position 42 REF = GCTGA ALT = GCA then sets position = 41 REF = CTGA ALT = CA . Assumes only one ALT and does nothing if there is > 1 ALT
134
83
16,067
def inferred_var_seqs_plus_flanks ( self , ref_seq , flank_length ) : flank_start = max ( 0 , self . POS - flank_length ) flank_end = min ( len ( ref_seq ) - 1 , self . ref_end_pos ( ) + flank_length ) seqs = [ ref_seq [ flank_start : self . POS ] + self . REF + ref_seq [ self . ref_end_pos ( ) + 1 : flank_end + 1 ] ] for alt in self . ALT : seqs . append ( ref_seq [ flank_start : self . POS ] + alt + ref_seq [ self . ref_end_pos ( ) + 1 : flank_end + 1 ] ) return flank_start , seqs
Returns start position of first flank sequence plus a list of sequences - the REF plus one for each ALT . sequence . Order same as in ALT column
172
32
16,068
def total_coverage ( self ) : if 'COV' in self . FORMAT : return sum ( [ int ( x ) for x in self . FORMAT [ 'COV' ] . split ( ',' ) ] ) else : return None
Returns the sum of COV data if present . Otherwise returns None
53
13
16,069
def set_parent ( self , child , parent ) : parents = cmds . listConnections ( "%s.parent" % child , plugs = True , source = True ) if parents : # there is only one parent at a time cmds . disconnectAttr ( "%s.parent" % child , "%s" % parents [ 0 ] ) if parent : cmds . connectAttr ( "%s.parent" % child , "%s.children" % parent , force = True , nextAvailable = True )
Set the parent of the child reftrack node
108
10
16,070
def get_children ( self , refobj ) : children = cmds . listConnections ( "%s.children" % refobj , d = False ) if not children : children = [ ] return children
Get the children reftrack nodes of the given node
43
11
16,071
def get_typ ( self , refobj ) : enum = cmds . getAttr ( "%s.type" % refobj ) try : return JB_ReftrackNode . types [ enum ] except IndexError : raise ValueError ( "The type on the node %s could not be associated with an available type: %s" % ( refobj , JB_ReftrackNode . types ) )
Return the entity type of the given reftrack node
88
11
16,072
def set_typ ( self , refobj , typ ) : try : enum = JB_ReftrackNode . types . index ( typ ) except ValueError : raise ValueError ( "The given type %s could not be found in available types: %" % ( typ , JB_ReftrackNode . types ) ) cmds . setAttr ( "%s.type" % refobj , enum )
Set the type of the given refobj
88
8
16,073
def create_refobj ( self , ) : n = cmds . createNode ( "jb_reftrack" ) cmds . lockNode ( n , lock = True ) return n
Create and return a new reftrack node
41
9
16,074
def referenced_by ( self , refobj ) : try : ref = cmds . referenceQuery ( refobj , referenceNode = True ) return ref except RuntimeError as e : if str ( e ) . endswith ( "' is not from a referenced file.\n" ) : return None else : raise e
Return the reference that holds the given reftrack node .
66
12
16,075
def delete_refobj ( self , refobj ) : with common . locknode ( refobj , lock = False ) : cmds . delete ( refobj )
Delete the given reftrack node
34
7
16,076
def get_current_element ( self , ) : n = jbscene . get_current_scene_node ( ) if not n : return None tfid = cmds . getAttr ( "%s.taskfile_id" % n ) try : tf = djadapter . taskfiles . get ( pk = tfid ) return tf . task . element except djadapter . models . TaskFile . DoesNotExist : raise djadapter . models . TaskFile . DoesNotExist ( "Could not find the taskfile that was set on the scene node. Id was %s" % tfid )
Return the currently open Shot or Asset
132
7
16,077
def set_reference ( self , refobj , reference ) : refnodeattr = "%s.referencenode" % refobj if reference : cmds . connectAttr ( "%s.message" % reference , refnodeattr , force = True ) ns = cmds . referenceQuery ( reference , namespace = True ) cmds . setAttr ( "%s.namespace" % refobj , ns , type = "string" ) else : conns = cmds . listConnections ( refnodeattr , plugs = True ) if not conns : return for c in conns : cmds . disconnectAttr ( c , refnodeattr )
Connect the given reftrack node with the given refernce node
139
14
16,078
def get_reference ( self , refobj ) : c = cmds . listConnections ( "%s.referencenode" % refobj , d = False ) return c [ 0 ] if c else None
Return the reference node that the reftrack node is connected to or None if it is imported .
46
20
16,079
def get_status ( self , refobj ) : reference = self . get_reference ( refobj ) return Reftrack . IMPORTED if not reference else Reftrack . LOADED if cmds . referenceQuery ( reference , isLoaded = True ) else Reftrack . UNLOADED
Return the status of the given reftrack node
64
10
16,080
def get_taskfile ( self , refobj ) : tfid = cmds . getAttr ( "%s.taskfile_id" % refobj ) try : return djadapter . taskfiles . get ( pk = tfid ) except djadapter . models . TaskFile . DoesNotExist : raise djadapter . models . TaskFile . DoesNotExist ( "Could not find the taskfile that was set on the node %s. Id was %s" % ( refobj , tfid ) )
Return the taskfile that is loaded and represented by the refobj
112
13
16,081
def connect_reftrack_scenenode ( self , refobj , scenenode ) : conns = [ ( "%s.scenenode" % refobj , "%s.reftrack" % scenenode ) , ( "%s.taskfile_id" % scenenode , "%s.taskfile_id" % refobj ) ] for src , dst in conns : if not cmds . isConnected ( src , dst ) : cmds . connectAttr ( src , dst , force = True )
Connect the given reftrack node with the given scene node
116
12
16,082
def get_search_page ( self , query ) : query_web_page = Webpage ( self . url + self . parse_query ( query ) ) query_web_page . get_html_source ( ) # get html source return query_web_page . source
Gets HTML source
59
4
16,083
def set ( constants ) : if not constants : return constants = wrap ( constants ) for k , new_value in constants . leaves ( ) : errors = [ ] try : old_value = mo_dots_set_attr ( sys . modules , k , new_value ) continue except Exception as e : errors . append ( e ) # ONE MODULE IS MISSING, THE CALLING MODULE try : caller_globals = sys . _getframe ( 1 ) . f_globals caller_file = caller_globals [ "__file__" ] if not caller_file . endswith ( ".py" ) : raise Exception ( "do not know how to handle non-python caller" ) caller_module = caller_file [ : - 3 ] . replace ( "/" , "." ) path = split_field ( k ) for i , p in enumerate ( path ) : if i == 0 : continue prefix = join_field ( path [ : 1 ] ) name = join_field ( path [ i : ] ) if caller_module . endswith ( prefix ) : old_value = mo_dots_set_attr ( caller_globals , name , new_value ) if DEBUG : from mo_logs import Log Log . note ( "Changed {{module}}[{{attribute}}] from {{old_value}} to {{new_value}}" , module = prefix , attribute = name , old_value = old_value , new_value = new_value ) break except Exception as e : errors . append ( e ) if errors : from mo_logs import Log Log . error ( "Can not set constant {{path}}" , path = k , cause = errors )
REACH INTO THE MODULES AND OBJECTS TO SET CONSTANTS . THINK OF THIS AS PRIMITIVE DEPENDENCY INJECTION FOR MODULES . USEFUL FOR SETTING DEBUG FLAGS .
364
50
16,084
def values ( self ) : lower = float ( self . lowerSpnbx . value ( ) ) upper = float ( self . upperSpnbx . value ( ) ) return ( lower , upper )
Gets the user enter max and min values of where the raster points should appear on the y - axis
42
22
16,085
def _delete ( self , ) : for k in self . keys ( ) : try : self [ k ] . _delete ( ) except KeyError : pass if self . __parent is not None : del self . __parent [ self . __name ] self . __parent = None cmds . deleteUI ( self . __menustring )
Delete the menu and remove it from parent
72
8
16,086
def create_menu ( self , name , parent = None , * * kwargs ) : m = Menu ( name , parent , * * kwargs ) if parent is None : self . menus [ name ] = m return m
Creates a maya menu or menu item
49
9
16,087
def delete_menu ( self , menu ) : if menu . parent is None : del self . menus [ menu . name ( ) ] menu . _delete ( )
Delete the specified menu
34
4
16,088
def delete_all_menus ( self , ) : for m in self . menus . itervalues ( ) : m . _delete ( ) self . menus . clear ( )
Delete all menues managed by this manager
39
8
16,089
def add_mismatch ( self , entity , * traits ) : for trait in traits : self . index [ trait ] . add ( entity )
Add a mismatching entity to the index .
31
9
16,090
def add_match ( self , entity , * traits ) : # The index traits of `traits_indexed_by` might have already been used to index some other entities. Those # relations are to be preserved. If the trait was not used to index any entity, we initialize them to mismatch # all matching entities known so far. for trait in traits : if trait not in self . index : self . index [ trait ] = self . mismatch_unknown . copy ( ) # Now each known trait this entity is not matching, will explicitly mismatch currently added entity. for existing_trait in self . index : if existing_trait not in traits : self . index [ existing_trait ] . add ( entity ) # From now on, any new matching or mismatching index will mismatch this entity by default. self . mismatch_unknown . add ( entity )
Add a matching entity to the index .
179
8
16,091
def hist_axis_func ( axis_type : enum . Enum ) -> Callable [ [ Hist ] , Axis ] : def axis_func ( hist : Hist ) -> Axis : """ Retrieve the axis associated with the ``HistAxisRange`` object for a given hist. Args: hist: Histogram from which the selected axis should be retrieved. axis_type: Enumeration corresponding to the axis to be restricted. The numerical value of the enum should be axis number (for a THnBase). Returns: ROOT.TAxis: The axis associated with the ``HistAxisRange`` object. """ # Determine the axis_type value # Use try here instead of checking for a particular type to protect against type changes # (say in the enum) try : # Try to extract the value from an enum hist_axis_type = axis_type . value except AttributeError : # Seems that we received an int, so just use that value hist_axis_type = axis_type if hasattr ( hist , "ProjectionND" ) and hasattr ( hist , "Projection" ) : # THnBase defines ProjectionND and Projection, so we will use those as proxies. # Return the proper THn access #logger.debug(f"From hist: {hist}, hist_axis_type: {hist_axis_type}, axis: {hist.GetAxis(hist_axis_type.value)}") return hist . GetAxis ( hist_axis_type ) else : # If it's not a THn, then it must be a TH1 derived axis_function_map = { TH1AxisType . x_axis . value : hist . GetXaxis , TH1AxisType . y_axis . value : hist . GetYaxis , TH1AxisType . z_axis . value : hist . GetZaxis } # Retrieve the axis function and execute it. It is done separately to # clarify any possible errors. return_func = axis_function_map [ hist_axis_type ] return return_func ( ) return axis_func
Wrapper to retrieve the axis of a given histogram .
446
12
16,092
def axis ( self ) -> Callable [ [ Any ] , Any ] : axis_func = hist_axis_func ( axis_type = self . axis_type ) return axis_func
Determine the axis to return based on the hist type .
40
13
16,093
def apply_range_set ( self , hist : Hist ) -> None : # Do individual assignments to clarify which particular value is causing an error here. axis = self . axis ( hist ) #logger.debug(f"axis: {axis}, axis(): {axis.GetName()}") # Help out mypy assert not isinstance ( self . min_val , float ) assert not isinstance ( self . max_val , float ) # Evaluate the functions to determine the values. min_val = self . min_val ( axis ) max_val = self . max_val ( axis ) # NOTE: Using SetRangeUser() here was a bug, since I've been passing bin values! In general, # passing bin values is more flexible, but requires the values to be passed to # ``apply_func_to_find_bin()`` to be shifted by some small epsilon to get the desired bin. self . axis ( hist ) . SetRange ( min_val , max_val )
Apply the associated range set to the axis of a given hist .
213
13
16,094
def apply_func_to_find_bin ( func : Union [ None , Callable [ ... , Union [ float , int , Any ] ] ] , values : Optional [ float ] = None ) -> Callable [ [ Any ] , Union [ float , int ] ] : def return_func ( axis ) -> Any : """ Apply the stored function and value to a given axis. Args: axis (TAxis or similar): Axis to which the function should be applied. Returns: any: The value returned by the function. Often a float or int, but not necessarily. """ #logger.debug(f"func: {func}, values: {values}") if func : if values is not None : return func ( axis , values ) else : return func ( axis ) else : return values return return_func
Closure to determine the bin associated with a value on an axis .
172
13
16,095
def call_projection_function ( self , hist : Hist ) -> Hist : # Restrict projection axis ranges for axis in self . projection_axes : logger . debug ( f"Apply projection axes hist range: {axis.name}" ) axis . apply_range_set ( hist ) projected_hist = None if hasattr ( hist , "ProjectionND" ) and hasattr ( hist , "Projection" ) : # THnBase defines ProjectionND and Projection, so we will use those as proxies. projected_hist = self . _project_THn ( hist = hist ) elif hasattr ( hist , "ProjectionZ" ) and hasattr ( hist , "Project3D" ) : # TH3 defines ProjectionZ and Project3D, so we will use those as proxies. projected_hist = self . _project_TH3 ( hist = hist ) elif hasattr ( hist , "ProjectionX" ) and hasattr ( hist , "ProjectionY" ) : # TH2 defines ProjectionX and ProjectionY, so we will use those as proxies. projected_hist = self . _project_TH2 ( hist = hist ) else : raise TypeError ( type ( hist ) , f"Could not recognize hist {hist} of type {type(hist)}" ) # Cleanup restricted axes self . cleanup_cuts ( hist , cut_axes = self . projection_axes ) return projected_hist
Calls the actual projection function for the hist .
307
10
16,096
def _project_THn ( self , hist : Hist ) -> Any : # THnBase projections args are given as a list of axes, followed by any possible options. projection_axes = [ axis . axis_type . value for axis in self . projection_axes ] # Handle ROOT THnBase quirk... # 2D projection are called as (y, x, options), so we should reverse the order so it performs # as expected if len ( projection_axes ) == 2 : # Reverses in place projection_axes . reverse ( ) # Test calculating errors # Add "E" to ensure that errors will be calculated args = projection_axes + [ "E" ] # Do the actual projection logger . debug ( f"hist: {hist.GetName()} args: {args}" ) if len ( projection_axes ) > 3 : # Project into a THnBase object. projected_hist = hist . ProjectionND ( * args ) else : # Project a TH1 derived object. projected_hist = hist . Projection ( * args ) return projected_hist
Perform the actual THn - > THn or TH1 projection .
233
15
16,097
def _project_TH3 ( self , hist : Hist ) -> Any : # Axis length validation if len ( self . projection_axes ) < 1 or len ( self . projection_axes ) > 2 : raise ValueError ( len ( self . projection_axes ) , "Invalid number of axes" ) # Need to concatenate the names of the axes together projection_axis_name = "" for axis in self . projection_axes : # Determine the axis name based on the name of the axis type. # [:1] returns just the first letter. For example, we could get "xy" if the first axis as # x_axis and the second was y_axis. # NOTE: Careful. This depends on the name of the enumerated values!!! Since this isn't terribly # safe, we then perform additional validation on the same to ensure that it is one of the # expected axis names. proj_axis_name = axis . axis_type . name [ : 1 ] if proj_axis_name not in [ "x" , "y" , "z" ] : raise ValueError ( f"Projection axis name {proj_axis_name} is not 'x', 'y', or 'z'. Please check your configuration." ) projection_axis_name += proj_axis_name # Handle ROOT Project3D quirk... # 2D projection are called as (y, x, options), so we should reverse the order so it performs # as expected. # NOTE: This isn't well documented in TH3. It is instead described in THnBase.Projection(...) if len ( self . projection_axes ) == 2 : # Reverse the axes projection_axis_name = projection_axis_name [ : : - 1 ] # Do the actual projection logger . info ( f"Projecting onto axes \"{projection_axis_name}\" from hist {hist.GetName()}" ) projected_hist = hist . Project3D ( projection_axis_name ) return projected_hist
Perform the actual TH3 - > TH1 projection .
433
12
16,098
def _project_TH2 ( self , hist : Hist ) -> Any : if len ( self . projection_axes ) != 1 : raise ValueError ( len ( self . projection_axes ) , "Invalid number of axes" ) #logger.debug(f"self.projection_axes[0].axis: {self.projection_axes[0].axis}, axis range name: {self.projection_axes[0].name}, axis_type: {self.projection_axes[0].axis_type}") # NOTE: We cannot use TH3.ProjectionZ(...) because it has different semantics than ProjectionX # and ProjectionY. In particular, it doesn't respect the axis limits of axis onto which it # is projected. So we have to separate the projection by histogram type as opposed to axis # length. projection_func_map = { TH1AxisType . x_axis . value : hist . ProjectionX , TH1AxisType . y_axis . value : hist . ProjectionY } # Determine the axis_type value # Use try here instead of checking for a particular type to protect against type changes (say # in the enum) try : # Try to extract the value from an enum axis_type = self . projection_axes [ 0 ] . axis_type . value except ValueError : # Seems that we received an int, so just use that value axis_type = self . axis_type # type: ignore projection_func = projection_func_map [ axis_type ] # Do the actual projection logger . info ( f"Projecting onto axis range {self.projection_axes[0].name} from hist {hist.GetName()}" ) projected_hist = projection_func ( ) return projected_hist
Perform the actual TH2 - > TH1 projection .
386
12
16,099
def _project_single_observable ( self , * * kwargs : Dict [ str , Any ] ) -> Hist : # Help out mypy assert isinstance ( self . output_attribute_name , str ) # Run the actual projection. output_hist , projection_name , projection_name_args , = self . _project_observable ( input_key = "single_observable" , input_observable = self . observable_to_project_from , * * kwargs , ) # Store the output. output_hist_args = projection_name_args output_hist_args . update ( { # type: ignore "output_hist" : output_hist , "projection_name" : projection_name } ) # Store the final histogram. output_hist = self . output_hist ( * * output_hist_args ) # type: ignore # Store the final output hist if not hasattr ( self . output_observable , self . output_attribute_name ) : raise ValueError ( f"Attempted to assign hist to non-existent attribute {self.output_attribute_name} of object {self.output_observable}. Check the attribute name!" ) # Actually store the histogram. setattr ( self . output_observable , self . output_attribute_name , output_hist ) # Return the observable return output_hist
Driver function for projecting and storing a single observable .
301
10