idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
24,600
def run_pipelines ( pipeline_id_pattern , root_dir , use_cache = True , dirty = False , force = False , concurrency = 1 , verbose_logs = True , progress_cb = None , slave = False ) : with concurrent . futures . ThreadPoolExecutor ( max_workers = concurrency , thread_name_prefix = 'T' ) as executor : try : results = [ ] pending_futures = set ( ) done_futures = set ( ) finished_futures = [ ] progress_thread = None progress_queue = None status_manager = status_mgr ( root_dir ) if progress_cb is not None : progress_queue = Queue ( ) progress_thread = threading . Thread ( target = progress_report_handler , args = ( progress_cb , progress_queue ) ) progress_thread . start ( ) all_specs = specs_to_execute ( pipeline_id_pattern , root_dir , status_manager , force , dirty , results ) while True : done = None if len ( done_futures ) > 0 : done = done_futures . pop ( ) finished_futures . append ( done ) done = done . result ( ) [ 0 ] try : spec = all_specs . send ( done ) except StopIteration : spec = None if spec is None : if len ( done_futures ) == 0 : if len ( pending_futures ) > 0 : done_futures , pending_futures = concurrent . futures . wait ( pending_futures , return_when = concurrent . futures . FIRST_COMPLETED ) continue else : break else : continue if len ( spec . validation_errors ) > 0 : results . append ( ExecutionResult ( spec . pipeline_id , False , { } , [ 'init' ] + list ( map ( str , spec . validation_errors ) ) ) ) continue if slave : ps = status_manager . get ( spec . pipeline_id ) ps . init ( spec . pipeline_details , spec . source_details , spec . validation_errors , spec . cache_hash ) eid = gen_execution_id ( ) if ps . queue_execution ( eid , 'manual' ) : success , stats , errors = execute_pipeline ( spec , eid , use_cache = use_cache ) results . append ( ExecutionResult ( spec . pipeline_id , success , stats , errors ) ) else : results . append ( ExecutionResult ( spec . pipeline_id , False , None , [ 'Already Running' ] ) ) else : f = executor . submit ( remote_execute_pipeline , spec , root_dir , use_cache , verbose_logs , progress_queue ) pending_futures . add ( f ) for f in finished_futures : ret = f . result ( ) results . append ( ExecutionResult ( * ret ) ) except KeyboardInterrupt : pass finally : if slave : finalize ( ) if progress_thread is not None : progress_queue . put ( None ) progress_thread . join ( ) return results
Run a pipeline by pipeline - id . pipeline - id supports the % wildcard for any - suffix matching . Use all or % for running all pipelines
24,601
def insert ( self , index , key ) : if key in self . map : return size = len ( self . items ) if index < 0 : index = size + index if size + index > 0 else 0 else : index = index if index < size else size self . items . insert ( index , key ) for k , v in self . map . items ( ) : if v >= index : self . map [ k ] = v + 1 self . map [ key ] = index
Adds an element at a dedicated position in an OrderedSet .
24,602
def pop ( self , index = None ) : if not self . items : raise KeyError ( 'Set is empty' ) def remove_index ( i ) : elem = self . items [ i ] del self . items [ i ] del self . map [ elem ] return elem if index is None : elem = remove_index ( - 1 ) else : size = len ( self . items ) if index < 0 : index = size + index if index < 0 : raise IndexError ( 'assignement index out of range' ) elif index >= size : raise IndexError ( 'assignement index out of range' ) elem = remove_index ( index ) for k , v in self . map . items ( ) : if v >= index and v > 0 : self . map [ k ] = v - 1 return elem
Removes an element at the tail of the OrderedSet or at a dedicated position .
24,603
def EMetaclass ( cls ) : superclass = cls . __bases__ if not issubclass ( cls , EObject ) : sclasslist = list ( superclass ) if object in superclass : index = sclasslist . index ( object ) sclasslist . insert ( index , EObject ) sclasslist . remove ( object ) else : sclasslist . insert ( 0 , EObject ) superclass = tuple ( sclasslist ) orig_vars = cls . __dict__ . copy ( ) slots = orig_vars . get ( '__slots__' ) if slots is not None : if isinstance ( slots , str ) : slots = [ slots ] for slots_var in slots : orig_vars . pop ( slots_var ) orig_vars . pop ( '__dict__' , None ) orig_vars . pop ( '__weakref__' , None ) return MetaEClass ( cls . __name__ , superclass , orig_vars )
Class decorator for creating PyEcore metaclass .
24,604
def getEAnnotation ( self , source ) : for annotation in self . eAnnotations : if annotation . source == source : return annotation return None
Return the annotation with a matching source attribute .
24,605
def get_week_URL ( date , day = 0 ) : if day < 1 : day = 1 date = datetime ( year = date . year , month = date . month , day = day , tzinfo = utc ) return reverse ( 'calendar_week' , kwargs = { 'year' : date . isocalendar ( ) [ 0 ] , 'week' : date . isocalendar ( ) [ 1 ] } )
Returns the week view URL for a given date .
24,606
def monday_of_week ( year , week ) : str_time = time . strptime ( '{0} {1} 1' . format ( year , week ) , '%Y %W %w' ) date = timezone . datetime ( year = str_time . tm_year , month = str_time . tm_mon , day = str_time . tm_mday , tzinfo = timezone . utc ) if timezone . datetime ( year , 1 , 4 ) . isoweekday ( ) > 4 : date -= timezone . timedelta ( days = 7 ) return date
Returns a datetime for the monday of the given week of the given year .
24,607
def get_occurrence ( self , occ ) : return self . lookup . pop ( ( occ . event , occ . original_start , occ . original_end ) , occ )
Return a persisted occurrences matching the occ and remove it from lookup since it has already been matched
24,608
def _request ( self , uri , method = 'GET' , params = None , files = None , headers = None , auth = None ) : for i in range ( 3 ) : try : full_url = '{b}{u}' . format ( b = self . api_url , u = uri ) response = None if method == 'POST' : response = requests . post ( full_url , data = params , files = files , headers = headers , verify = self . verify_ssl , auth = auth , proxies = self . proxies ) else : response = requests . get ( full_url , params = params , headers = headers , verify = self . verify_ssl , auth = auth , proxies = self . proxies ) if response . status_code >= 500 : self . server_available = False raise SandboxError ( "server returned {c} status code on {u}, assuming unavailable..." . format ( c = response . status_code , u = response . url ) ) else : return response except requests . exceptions . RequestException : time . sleep ( random . uniform ( 0 , 4 ** i * 100 / 1000.0 ) ) self . server_available = False msg = "exceeded 3 attempts with sandbox API: {u}, p:{p}, f:{f}" . format ( u = full_url , p = params , f = files ) try : msg += "\n" + response . content . decode ( 'utf-8' ) except AttributeError : pass raise SandboxError ( msg )
Robustness wrapper . Tries up to 3 times to dance with the Sandbox API .
24,609
def analyses ( self ) : response = self . _request ( "tasks/list" ) return json . loads ( response . content . decode ( 'utf-8' ) ) [ 'tasks' ]
Retrieve a list of analyzed samples .
24,610
def check ( self , item_id ) : response = self . _request ( "tasks/view/{id}" . format ( id = item_id ) ) if response . status_code == 404 : return False try : content = json . loads ( response . content . decode ( 'utf-8' ) ) status = content [ 'task' ] [ "status" ] if status == 'completed' or status == "reported" : return True except ValueError as e : raise sandboxapi . SandboxError ( e ) return False
Check if an analysis is complete
24,611
def delete ( self , item_id ) : try : response = self . _request ( "tasks/delete/{id}" . format ( id = item_id ) ) if response . status_code == 200 : return True except sandboxapi . SandboxError : pass return False
Delete the reports associated with the given item_id .
24,612
def is_available ( self ) : if self . server_available : return True else : try : response = self . _request ( "cuckoo/status" ) if response . status_code == 200 : self . server_available = True return True except sandboxapi . SandboxError : pass self . server_available = False return False
Determine if the Cuckoo Sandbox API servers are alive or in maintenance mode .
24,613
def queue_size ( self ) : response = self . _request ( "tasks/list" ) tasks = json . loads ( response . content . decode ( 'utf-8' ) ) [ "tasks" ] return len ( [ t for t in tasks if t [ 'status' ] == 'pending' ] )
Determine Cuckoo sandbox queue length
24,614
def is_available ( self ) : if self . server_available : return True else : try : self . server_available = self . jbx . server_online ( ) return self . server_available except jbxapi . JoeException : pass self . server_available = False return False
Determine if the Joe Sandbox API server is alive .
24,615
def get_isolated_cpus ( ) : path = sysfs_path ( 'devices/system/cpu/isolated' ) isolated = read_first_line ( path ) if isolated : return parse_cpu_list ( isolated ) cmdline = read_first_line ( proc_path ( 'cmdline' ) ) if cmdline : match = re . search ( r'\bisolcpus=([^ ]+)' , cmdline ) if match : isolated = match . group ( 1 ) return parse_cpu_list ( isolated ) return None
Get the list of isolated CPUs .
24,616
def has_same_unique_benchmark ( self ) : "True if all suites have one benchmark with the same name" if any ( len ( suite ) > 1 for suite in self . suites ) : return False names = self . suites [ 0 ] . get_benchmark_names ( ) return all ( suite . get_benchmark_names ( ) == names for suite in self . suites [ 1 : ] )
True if all suites have one benchmark with the same name
24,617
def send_message ( message , params , site , logger ) : client . capture ( 'Message' , message = message , params = tuple ( params ) , data = { 'site' : site , 'logger' : logger , } , )
Send a message to the Sentry server
24,618
def get_command_line_args ( ) : parser = argparse . ArgumentParser ( description = 'Send logs to Django Sentry.' ) parser . add_argument ( '--sentryconfig' , '-c' , default = None , help = 'A configuration file (.ini, .yaml) of some ' 'Sentry integration to extract the Sentry DSN from' ) parser . add_argument ( '--sentrydsn' , '-s' , default = "" , help = 'The Sentry DSN string (overrides -c)' ) parser . add_argument ( '--daemonize' , '-d' , default = False , action = 'store_const' , const = True , help = 'Run this script in background' ) parser . add_argument ( '--follow' , '-f' , default = "all" , help = 'Which logs to follow, default ALL' ) parser . add_argument ( '--nginxerrorpath' , '-n' , default = None , help = 'Nginx error log path' ) return parser . parse_args ( )
CLI command line arguments handling
24,619
def process_arguments ( args ) : if args . sentryconfig : print ( 'Parsing DSN from %s' % args . sentryconfig ) os . environ [ 'SENTRY_DSN' ] = parse_sentry_configuration ( args . sentryconfig ) if args . sentrydsn : print ( 'Using the DSN %s' % args . sentrydsn ) os . environ [ 'SENTRY_DSN' ] = args . sentrydsn if args . nginxerrorpath : print ( 'Using the Nginx error log path %s' % args . nginxerrorpath ) os . environ [ 'NGINX_ERROR_PATH' ] = args . nginxerrorpath from . . conf import settings if args . daemonize : print ( 'Running process in background' ) from . . daemonize import create_daemon create_daemon ( )
Deal with arguments passed on the command line
24,620
def parse_sentry_configuration ( filename ) : filetype = os . path . splitext ( filename ) [ - 1 ] [ 1 : ] . lower ( ) if filetype == 'ini' : config = ConfigParser ( ) config . read ( filename ) ini_key = 'dsn' ini_sections = [ 'sentry' , 'filter:raven' ] for section in ini_sections : if section in config : print ( '- Using value from [{section}]:[{key}]' . format ( section = section , key = ini_key ) ) try : return config [ section ] [ ini_key ] except KeyError : print ( '- Warning: Key "{key}" not found in section ' '[{section}]' . format ( section = section , key = ini_key ) ) raise SystemExit ( 'No DSN found in {file}. Tried sections [{sec_list}]' . format ( file = filename , sec_list = '], [' . join ( ini_sections ) , ) ) elif filetype == 'py' : raise SystemExit ( 'Parsing configuration from pure Python (Django,' 'Flask, Bottle, etc.) not implemented yet.' ) else : raise SystemExit ( 'Configuration file type not supported for parsing: ' '%s' % filetype )
Parse Sentry DSN out of an application or Sentry configuration file
24,621
def read_file ( filename ) : with open ( join ( abspath ( dirname ( __file__ ) ) , filename ) ) as file : return file . read ( )
Read the contents of a file located relative to setup . py
24,622
def parse ( self , line ) : csv_list = line . split ( "," ) date_time_message = csv_list . pop ( 0 ) . split ( " " , 2 ) otherinfo = dict ( ) for item in csv_list : key_value_pair = item . split ( ":" , 1 ) key = key_value_pair [ 0 ] . strip ( ) if len ( key_value_pair ) > 1 : value = key_value_pair [ 1 ] . strip ( ) if not value : value = "-" else : value = "-" otherinfo [ key ] = value self . message = '%s\n' 'Date: %s\n' 'Time: %s\n' 'Request: %s\n' 'Referrer: %s\n' 'Server: %s\n' 'Client: %s\n' 'Host: %s\n' 'Upstream: %s\n' self . params = [ date_time_message [ 2 ] , date_time_message [ 0 ] , date_time_message [ 1 ] , otherinfo . get ( "request" , "-" ) , otherinfo . get ( "referrer" , "-" ) , otherinfo . get ( "server" , "-" ) , otherinfo . get ( "client" , "-" ) , otherinfo . get ( "host" , "-" ) , otherinfo . get ( "upstream" , "-" ) , ] self . site = otherinfo . get ( "referrer" , "-" )
Parse a line of the Nginx error log
24,623
def validate_json ( self ) : if not hasattr ( self , 'guidance_json' ) : return False checksum = self . guidance_json . get ( 'checksum' ) contents = self . guidance_json . get ( 'db' ) hash_key = ( "{}{}" . format ( json . dumps ( contents , sort_keys = True ) , self . assignment . endpoint ) . encode ( ) ) digest = hashlib . md5 ( hash_key ) . hexdigest ( ) if not checksum : log . warning ( "Checksum on guidance not found. Invalidating file" ) return False if digest != checksum : log . warning ( "Checksum %s did not match actual digest %s" , checksum , digest ) return False return True
Ensure that the checksum matches .
24,624
def set_tg ( self ) : if not os . path . isfile ( self . current_working_dir + LOCAL_TG_FILE ) : cur_email = self . assignment . get_student_email ( ) log . info ( "Current email is %s" , cur_email ) if not cur_email : self . tg_id = - 1 return EMPTY_MISUCOUNT_TGID_PRNTEDMSG tg_url = ( "{}{}/{}{}" . format ( TGSERVER , cur_email , self . assignment_name , TG_SERVER_ENDING ) ) try : log . info ( "Accessing treatment server at %s" , tg_url ) data = requests . get ( tg_url , timeout = 1 ) . json ( ) except IOError : data = { "tg" : - 1 } log . warning ( "Failed to communicate to server" , exc_info = True ) if data . get ( "tg" ) is None : log . warning ( "Server returned back a bad treatment group ID." ) data = { "tg" : - 1 } with open ( self . current_working_dir + LOCAL_TG_FILE , "w" ) as fd : fd . write ( str ( data [ "tg" ] ) ) tg_file = open ( self . current_working_dir + LOCAL_TG_FILE , 'r' ) self . tg_id = int ( tg_file . read ( ) )
Try to grab the treatment group number for the student . If there is no treatment group number available request it from the server .
24,625
def prompt_with_prob ( self , orig_response = None , prob = None ) : if self . load_error : return 'Failed to read guidance config file' if hasattr ( self . assignment , 'is_test' ) : log . info ( "Skipping prompt due to test mode" ) return "Test response" if prob is None : prob = self . prompt_probability if random . random ( ) > prob : log . info ( "Did not prompt for rationale: Insufficient Probability" ) return "Did not prompt for rationale" with format . block ( style = "-" ) : rationale = prompt . explanation_msg ( EXPLANTION_PROMPT , short_msg = CONFIRM_BLANK_EXPLANATION ) if prob is None : self . prompt_probability = 0 if orig_response : print ( 'Thanks! Your original response was: {}' . format ( '\n' . join ( orig_response ) ) ) return rationale
Ask for rationale with a specific level of probability .
24,626
def patch_requests ( ) : config . create_config_directory ( ) ca_certs_file = config . CERT_FILE ca_certs_contents = requests . __loader__ . get_data ( 'requests/cacert.pem' ) should_write_certs = True if os . path . isfile ( ca_certs_file ) : with open ( ca_certs_file , 'rb' ) as f : existing_certs = f . read ( ) if existing_certs != ca_certs_contents : should_write_certs = True print ( "Updating local SSL certificates" ) else : should_write_certs = False if should_write_certs : with open ( ca_certs_file , 'wb' ) as f : f . write ( ca_certs_contents ) os . environ [ 'REQUESTS_CA_BUNDLE' ] = ca_certs_file
Customize the cacerts . pem file that requests uses . Automatically updates the cert file if the contents are different .
24,627
def run ( self , messages ) : files = { } if self . args . submit : files [ 'submit' ] = True for file in self . assignment . src : if not self . is_file ( file ) : contents = '' log . warning ( 'File {} does not exist' . format ( file ) ) else : contents = self . read_file ( file ) log . info ( 'Loaded contents of {} to send to server' . format ( file ) ) files [ file ] = contents messages [ 'file_contents' ] = files
Find all source files and return their complete contents .
24,628
def run ( self , messages ) : if not self . args . lock : return format . print_line ( '~' ) print ( 'Locking tests' ) print ( ) for test in self . assignment . test_map . values ( ) : log . info ( 'Locking {}' . format ( test . name ) ) test . lock ( self . _hash_fn )
Responsible for locking each test .
24,629
def pick_free_port ( hostname = REDIRECT_HOST , port = 0 ) : import socket s = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) try : s . bind ( ( hostname , port ) ) except OSError as e : log . warning ( "Could not bind to %s:%s %s" , hostname , port , e ) if port == 0 : print ( 'Unable to find an open port for authentication.' ) raise AuthenticationException ( e ) else : return pick_free_port ( hostname , 0 ) addr , port = s . getsockname ( ) s . close ( ) return port
Try to bind a port . Default = 0 selects a free port .
24,630
def make_token_post ( server , data ) : try : response = requests . post ( server + TOKEN_ENDPOINT , data = data , timeout = TIMEOUT ) body = response . json ( ) except Exception as e : log . warning ( 'Other error when exchanging code' , exc_info = True ) raise OAuthException ( error = 'Authentication Failed' , error_description = str ( e ) ) if 'error' in body : log . error ( body ) raise OAuthException ( error = body . get ( 'error' , 'Unknown Error' ) , error_description = body . get ( 'error_description' , '' ) ) return body
Try getting an access token from the server . If successful returns the JSON response . If unsuccessful raises an OAuthException .
24,631
def authenticate ( cmd_args , endpoint = '' , force = False ) : server = server_url ( cmd_args ) network . check_ssl ( ) access_token = None try : assert not force access_token = refresh_local_token ( server ) except Exception : print ( 'Performing authentication' ) access_token = perform_oauth ( get_code , cmd_args , endpoint ) email = display_student_email ( cmd_args , access_token ) if not email : log . warning ( 'Could not get login email. Try logging in again.' ) log . debug ( 'Authenticated with access token={}' . format ( access_token ) ) return access_token
Returns an OAuth token that can be passed to the server for identification . If FORCE is False it will attempt to use a cached token or refresh the OAuth token .
24,632
def notebook_authenticate ( cmd_args , force = False , silent = True ) : server = server_url ( cmd_args ) network . check_ssl ( ) access_token = None if not force : try : access_token = refresh_local_token ( server ) except OAuthException as e : if not silent : raise e return notebook_authenticate ( cmd_args , force = True , silent = False ) if not access_token : access_token = perform_oauth ( get_code_via_terminal , cmd_args , copy_msg = NOTEBOOK_COPY_MESSAGE , paste_msg = NOTEBOOK_PASTE_MESSAGE ) email = display_student_email ( cmd_args , access_token ) if email is None and not force : return notebook_authenticate ( cmd_args , force = True ) elif email is None : log . warning ( 'Could not get login email. You may have been logged out. ' ' Try logging in again.' ) return access_token
Similiar to authenticate but prints student emails after all calls and uses a different way to get codes . If SILENT is True it will suppress the error message and redirect to FORCE = True
24,633
def get_student_email ( cmd_args , endpoint = '' ) : log . info ( "Attempting to get student email" ) if cmd_args . local : return None access_token = authenticate ( cmd_args , endpoint = endpoint , force = False ) if not access_token : return None try : return get_info ( cmd_args , access_token ) [ 'email' ] except IOError as e : return None
Attempts to get the student s email . Returns the email or None .
24,634
def get_identifier ( cmd_args , endpoint = '' ) : student_email = get_student_email ( cmd_args , endpoint ) if not student_email : return "Unknown" return hashlib . md5 ( student_email . encode ( ) ) . hexdigest ( )
Obtain anonmyzied identifier .
24,635
def check_version ( server , version , filename , timeout = SHORT_TIMEOUT ) : address = VERSION_ENDPOINT . format ( server = server ) print ( 'Checking for software updates...' ) log . info ( 'Existing OK version: %s' , version ) log . info ( 'Checking latest version from %s' , address ) try : response = requests . get ( address , timeout = timeout ) response . raise_for_status ( ) except ( requests . exceptions . RequestException , requests . exceptions . BaseHTTPError ) as e : print ( 'Network error when checking for updates.' ) log . warning ( 'Network error when checking version from %s: %s' , address , str ( e ) , stack_info = True ) return False response_json = response . json ( ) if not _validate_api_response ( response_json ) : print ( 'Error while checking updates: malformed server response' ) log . info ( 'Malformed response from %s: %s' , address , response . text ) return False current_version = response_json [ 'data' ] [ 'results' ] [ 0 ] [ 'current_version' ] if current_version == version : print ( 'OK is up to date' ) return True download_link = response_json [ 'data' ] [ 'results' ] [ 0 ] [ 'download_link' ] log . info ( 'Downloading version %s from %s' , current_version , download_link ) try : response = requests . get ( download_link , timeout = timeout ) response . raise_for_status ( ) except ( requests . exceptions . RequestException , requests . exceptions . BaseHTTPError ) as e : print ( 'Error when downloading new version of OK' ) log . warning ( 'Error when downloading new version of OK: %s' , str ( e ) , stack_info = True ) return False log . info ( 'Writing new version to %s' , filename ) zip_binary = response . content try : _write_zip ( filename , zip_binary ) except IOError as e : print ( 'Error when downloading new version of OK' ) log . warning ( 'Error writing to %s: %s' , filename , str ( e ) ) return False else : print ( 'Updated to version: {}' . format ( current_version ) ) log . info ( 'Successfully wrote to %s' , filename ) return True
Check for the latest version of OK and update accordingly .
24,636
def run ( self ) : self . console . load ( self . lines , setup = self . setup , teardown = self . teardown ) return self . console . interpret ( )
Implements the GradedTestCase interface .
24,637
def unlock ( self , unique_id_prefix , case_id , interact ) : print ( self . setup . strip ( ) ) prompt_num = 0 current_prompt = [ ] try : for line in self . lines : if isinstance ( line , str ) and line : print ( line ) current_prompt . append ( line ) elif isinstance ( line , CodeAnswer ) : prompt_num += 1 if not line . locked : print ( '\n' . join ( line . output ) ) continue unique_id = self . _construct_unique_id ( unique_id_prefix , self . lines ) line . output = interact ( unique_id , case_id + ' > Prompt {}' . format ( prompt_num ) , '\n' . join ( current_prompt ) , line . output , line . choices ) line . locked = False current_prompt = [ ] self . locked = False finally : self . _sync_code ( )
Unlocks the CodeCase .
24,638
def split_code ( cls , code , PS1 , PS2 ) : processed_lines = [ ] for line in textwrap . dedent ( code ) . splitlines ( ) : if not line or line . startswith ( PS1 ) or line . startswith ( PS2 ) : processed_lines . append ( line ) continue assert len ( processed_lines ) > 0 , 'code improperly formatted: {}' . format ( code ) if not isinstance ( processed_lines [ - 1 ] , CodeAnswer ) : processed_lines . append ( CodeAnswer ( ) ) processed_lines [ - 1 ] . update ( line ) return processed_lines
Splits the given string of code based on the provided PS1 and PS2 symbols .
24,639
def _sync_code ( self ) : new_code = [ ] for line in self . lines : if isinstance ( line , CodeAnswer ) : new_code . append ( line . dump ( ) ) else : new_code . append ( line ) self . code = '\n' . join ( new_code )
Syncs the current state of self . lines with self . code the serializable string representing the set of code .
24,640
def _construct_unique_id ( self , id_prefix , lines ) : text = [ ] for line in lines : if isinstance ( line , str ) : text . append ( line ) elif isinstance ( line , CodeAnswer ) : text . append ( line . dump ( ) ) return id_prefix + '\n' + '\n' . join ( text )
Constructs a unique ID for a particular prompt in this case based on the id_prefix and the lines in the prompt .
24,641
def interpret ( self ) : if not self . _interpret_lines ( self . _setup ) : return False success = self . _interpret_lines ( self . _code , compare_all = True ) success &= self . _interpret_lines ( self . _teardown ) return success
Interprets the console on the loaded code .
24,642
def _interpret_lines ( self , lines , compare_all = False ) : current = [ ] for line in lines + [ '' ] : if isinstance ( line , str ) : if current and ( line . startswith ( self . PS1 ) or not line ) : try : if compare_all : self . _compare ( CodeAnswer ( ) , '\n' . join ( current ) ) else : self . evaluate ( '\n' . join ( current ) ) except ConsoleException : return False current = [ ] if line : print ( line ) line = self . _strip_prompt ( line ) current . append ( line ) elif isinstance ( line , CodeAnswer ) : assert len ( current ) > 0 , 'Answer without a prompt' try : self . _compare ( line , '\n' . join ( current ) ) except ConsoleException : return False current = [ ] return True
Interprets the set of lines .
24,643
def dump ( self ) : result = list ( self . output_lines ( ) ) if self . locked : result . append ( '# locked' ) if self . choices : for choice in self . choices : result . append ( '# choice: ' + choice ) if self . explanation : result . append ( '# explanation: ' + self . explanation ) return '\n' . join ( result )
Serialize a test case to a string .
24,644
def output_lines ( self ) : if self . exception : return [ self . EXCEPTION_HEADERS [ 0 ] , ' ...' ] + self . exception_detail else : return self . output
Return a sequence of lines suitable for printing or comparing answers .
24,645
def prettyjson ( json , indentation = ' ' ) : if isinstance ( json , int ) or isinstance ( json , float ) : return str ( json ) elif isinstance ( json , str ) : if '\n' in json : return 'r' return repr ( json ) elif isinstance ( json , list ) : lst = [ indent ( prettyjson ( el , indentation ) , indentation ) for el in json ] return '[\n' + ',\n' . join ( lst ) + '\n]' elif isinstance ( json , dict ) : pairs = [ ] for k , v in sorted ( json . items ( ) ) : k = prettyjson ( k , indentation ) v = prettyjson ( v , indentation ) pairs . append ( indent ( k + ': ' + v , indentation ) ) return '{\n' + ',\n' . join ( pairs ) + '\n}' else : raise exceptions . SerializeException ( 'Invalid json type: {}' . format ( json ) )
Formats a Python - object into a string in a JSON like way but uses triple quotes for multiline strings .
24,646
def validate_contents ( file_contents ) : for name , contents in file_contents . items ( ) : if os . path . splitext ( name ) [ 1 ] != '.ipynb' : continue if not contents : return False try : json_object = json . loads ( contents ) except ValueError : return False return True
Ensures that all ipynb files in FILE_CONTENTS are valid JSON files .
24,647
def wait_for_save ( filename , timeout = 5 ) : modification_time = os . path . getmtime ( filename ) start_time = time . time ( ) while time . time ( ) < start_time + timeout : if ( os . path . getmtime ( filename ) > modification_time and os . path . getsize ( filename ) > 0 ) : return True time . sleep ( 0.2 ) return False
Waits for FILENAME to update waiting up to TIMEOUT seconds . Returns True if a save was detected and False otherwise .
24,648
def score ( self , env = None , score_out = None ) : messages = { } self . assignment . set_args ( score = True , score_out = score_out , ) if env is None : import __main__ env = __main__ . __dict__ self . run ( 'scoring' , messages , env = env ) return messages [ 'scoring' ]
Run the scoring protocol .
24,649
def save_notebook ( self ) : try : from IPython . display import display , Javascript except ImportError : log . warning ( "Could not import IPython Display Function" ) print ( "Make sure to save your notebook before sending it to OK!" ) return if self . mode == "jupyter" : display ( Javascript ( 'IPython.notebook.save_checkpoint();' ) ) display ( Javascript ( 'IPython.notebook.save_notebook();' ) ) elif self . mode == "jupyterlab" : display ( Javascript ( 'document.querySelector(\'[data-command="docmanager:save"]\').click();' ) ) print ( 'Saving notebook...' , end = ' ' ) ipynbs = [ path for path in self . assignment . src if os . path . splitext ( path ) [ 1 ] == '.ipynb' ] if ipynbs : if wait_for_save ( ipynbs [ 0 ] ) : print ( "Saved '{}'." . format ( ipynbs [ 0 ] ) ) else : log . warning ( "Timed out waiting for IPython save" ) print ( "Could not automatically save \'{}\'" . format ( ipynbs [ 0 ] ) ) print ( "Make sure your notebook" " is correctly named and saved before submitting to OK!" . format ( ipynbs [ 0 ] ) ) return False else : print ( "No valid file sources found" ) return True
Saves the current notebook by injecting JavaScript to save to . ipynb file .
24,650
def main ( ) : args = parse_input ( ) args . lock = True args . question = [ ] args . all = False args . timeout = 0 args . verbose = False args . interactive = False try : assign = assignment . load_assignment ( args . config , args ) msgs = messages . Messages ( ) lock . protocol ( args , assign ) . run ( msgs ) except ( ex . LoadingException , ex . SerializeException ) as e : log . warning ( 'Assignment could not instantiate' , exc_info = True ) print ( 'Error: ' + str ( e ) . strip ( ) ) exit ( 1 ) except ( KeyboardInterrupt , EOFError ) : log . info ( 'Quitting...' ) else : assign . dump_tests ( )
Run the LockingProtocol .
24,651
def write_tree ( zipf , src_directory , dst_directory ) : if not os . path . exists ( src_directory ) : abort ( 'Tree ' + src_directory + ' does not exist.' ) for root , _ , files in os . walk ( src_directory ) : for filename in files : if not filename . endswith ( ( '.py' , '.pem' ) ) : continue fullname = os . path . join ( root , filename ) arcname = fullname . replace ( src_directory , dst_directory ) zipf . write ( fullname , arcname = arcname )
Write all . py files in a source directory to a destination directory inside a zip archive .
24,652
def new_log ( self ) : log_id = self . _num_logs self . _logs [ log_id ] = [ ] self . _num_logs += 1 return log_id
Registers a new log so that calls to write will append to the log .
24,653
def display_breakdown ( scores , outfile = None ) : total = 0 outfile = open ( outfile , 'w' ) if outfile else sys . stdout format . print_line ( '-' ) print ( 'Point breakdown' , file = outfile ) for name , ( score , max_score ) in scores . items ( ) : print ( ' {}: {}/{}' . format ( name , score , max_score ) , file = outfile ) total += score print ( file = outfile ) print ( 'Score:' , file = outfile ) print ( ' Total: {}' . format ( total ) , file = outfile ) return { 'Total' : total }
Writes the point breakdown to outfile given a dictionary of scores . outfile should be a string . If outfile is None write to stdout .
24,654
def run ( self , messages , env = None ) : if not self . args . score or self . args . testing : return format . print_line ( '~' ) print ( 'Scoring tests' ) print ( ) raw_scores = OrderedDict ( ) for test in self . assignment . specified_tests : assert isinstance ( test , sources_models . Test ) , 'ScoringProtocol received invalid test' log . info ( 'Scoring test {}' . format ( test . name ) ) if type ( test ) == ok_test_models . OkTest : score = test . score ( env = env ) else : score = test . score ( ) raw_scores [ test . name ] = ( score , test . points ) messages [ 'scoring' ] = display_breakdown ( raw_scores , self . args . score_out ) print ( )
Score tests and print results . Tests are taken from self . assignment . specified_tests . A score breakdown by question and the total score are both printed .
24,655
def lock ( key , text ) : return hmac . new ( key . encode ( 'utf-8' ) , text . encode ( 'utf-8' ) ) . hexdigest ( )
Locks the given text using the given key and returns the result
24,656
def run ( self , messages ) : if not self . args . unlock : return format . print_line ( '~' ) print ( 'Unlocking tests' ) print ( ) print ( 'At each "{}", type what you would expect the output to be.' . format ( self . PROMPT ) ) print ( 'Type {} to quit' . format ( self . EXIT_INPUTS [ 0 ] ) ) print ( ) for test in self . assignment . specified_tests : log . info ( 'Unlocking test {}' . format ( test . name ) ) self . current_test = test . name self . guidance_util . prompt_probability = guidance . DEFAULT_PROMPT_PROBABILITY try : test . unlock ( self . interact ) except ( KeyboardInterrupt , EOFError ) : try : print ( ) print ( '-- Exiting unlocker --' ) except ( KeyboardInterrupt , EOFError ) : pass print ( ) break messages [ 'unlock' ] = self . analytics
Responsible for unlocking each test .
24,657
def interact ( self , unique_id , case_id , question_prompt , answer , choices = None , randomize = True ) : if randomize and choices : choices = random . sample ( choices , len ( choices ) ) correct = False while not correct : if choices : assert len ( answer ) == 1 , 'Choices must have 1 line of output' choice_map = self . _display_choices ( choices ) question_timestamp = datetime . now ( ) input_lines = [ ] for line_number , line in enumerate ( answer ) : if len ( answer ) == 1 : prompt = self . PROMPT else : prompt = '(line {}){}' . format ( line_number + 1 , self . PROMPT ) student_input = format . normalize ( self . _input ( prompt ) ) self . _add_history ( student_input ) if student_input in self . EXIT_INPUTS : raise EOFError if choices and student_input in choice_map : student_input = choice_map [ student_input ] correct_answer = self . _verify_student_input ( student_input , line ) if correct_answer : input_lines . append ( correct_answer ) else : input_lines . append ( student_input ) break else : correct = True tg_id = - 1 misU_count_dict = { } rationale = "Unknown - Default Value" if not correct : guidance_data = self . guidance_util . show_guidance_msg ( unique_id , input_lines , self . hash_key ) misU_count_dict , tg_id , printed_msg , rationale = guidance_data else : rationale = self . guidance_util . prompt_with_prob ( ) print ( "-- OK! --" ) printed_msg = [ "-- OK! --" ] self . analytics . append ( { 'id' : unique_id , 'case_id' : case_id , 'question timestamp' : self . unix_time ( question_timestamp ) , 'answer timestamp' : self . unix_time ( datetime . now ( ) ) , 'prompt' : question_prompt , 'answer' : input_lines , 'correct' : correct , 'treatment group id' : tg_id , 'rationale' : rationale , 'misU count' : misU_count_dict , 'printed msg' : printed_msg } ) print ( ) return input_lines
Reads student input for unlocking tests until the student answers correctly .
24,658
def _verify_student_input ( self , student_input , locked ) : guesses = [ student_input ] try : guesses . append ( repr ( ast . literal_eval ( student_input ) ) ) except Exception : pass if student_input . title ( ) in self . SPECIAL_INPUTS : guesses . append ( student_input . title ( ) ) for guess in guesses : if self . _verify ( guess , locked ) : return guess
If the student s answer is correct returns the normalized answer . Otherwise returns None .
24,659
def _display_choices ( self , choices ) : print ( "Choose the number of the correct choice:" ) choice_map = { } for i , choice in enumerate ( choices ) : i = str ( i ) print ( '{}) {}' . format ( i , format . indent ( choice , ' ' * ( len ( i ) + 2 ) ) . strip ( ) ) ) choice = format . normalize ( choice ) choice_map [ i ] = choice return choice_map
Prints a mapping of numbers to choices and returns the mapping as a dictionary .
24,660
def timed ( timeout , fn , args = ( ) , kargs = { } ) : if timeout == 0 : return fn ( * args , ** kargs ) submission = __ReturningThread ( fn , args , kargs ) submission . start ( ) submission . join ( timeout ) if submission . is_alive ( ) : raise exceptions . Timeout ( timeout ) if submission . error is not None : raise submission . error return submission . result
For a nonzero timeout evaluates a call expression in a separate thread . If the timeout is 0 the expression is evaluated in the main thread .
24,661
def grade ( self , question , env = None , skip_locked_cases = False ) : if env is None : import __main__ env = __main__ . __dict__ messages = { } tests = self . _resolve_specified_tests ( [ question ] , all_tests = False ) for test in tests : try : for suite in test . suites : suite . skip_locked_cases = skip_locked_cases suite . console . skip_locked_cases = skip_locked_cases suite . console . hash_key = self . name except AttributeError : pass test_name = tests [ 0 ] . name grade ( tests , messages , env ) return messages [ 'grading' ] [ test_name ]
Runs tests for a particular question . The setup and teardown will always be executed .
24,662
def run ( self , messages ) : statistics = { } statistics [ 'time' ] = str ( datetime . now ( ) ) statistics [ 'time-utc' ] = str ( datetime . utcnow ( ) ) statistics [ 'unlock' ] = self . args . unlock if self . args . question : statistics [ 'question' ] = [ t . name for t in self . assignment . specified_tests ] statistics [ 'requested-questions' ] = self . args . question if self . args . suite : statistics [ 'requested-suite' ] = self . args . suite if self . args . case : statistics [ 'requested-case' ] = self . args . case messages [ 'analytics' ] = statistics self . log_run ( messages )
Returns some analytics about this autograder run .
24,663
def log_run ( self , messages ) : history = self . read_history ( ) history [ 'all_attempts' ] += 1 questions = messages [ 'analytics' ] . get ( 'question' , [ ] ) grading = messages . get ( 'grading' ) if not questions and grading : failed = first_failed_test ( self . assignment . specified_tests , grading ) logging . info ( 'First failed test: {}' . format ( failed ) ) if failed : questions = [ failed ] for saved_q , details in history [ 'questions' ] . items ( ) : finished = details [ 'solved' ] if not finished and saved_q in grading : scoring = grading [ saved_q ] details [ 'solved' ] = is_correct ( scoring ) history [ 'question' ] = questions for question in questions : detail = history [ 'questions' ] if grading and question in grading : scoring = is_correct ( grading [ question ] ) else : scoring = False if question in history [ 'questions' ] : q_info = detail [ question ] if grading and question in grading : if q_info [ 'solved' ] != True : q_info [ 'solved' ] = scoring else : continue q_info [ 'attempts' ] += 1 else : detail [ question ] = { 'attempts' : 1 , 'solved' : scoring } logging . info ( 'Attempt %d for Question %s : %r' , history [ 'questions' ] , question , scoring ) with open ( self . ANALYTICS_FILE , 'wb' ) as f : log . info ( 'Saving history to %s' , self . ANALYTICS_FILE ) pickle . dump ( history , f ) os . fsync ( f ) messages [ 'analytics' ] [ 'history' ] = history
Record this run of the autograder to a local file .
24,664
def check_ssl ( ) : try : import ssl except : log . warning ( 'Error importing SSL module' , stack_info = True ) print ( SSL_ERROR_MESSAGE ) sys . exit ( 1 ) else : log . info ( 'SSL module is available' ) return ssl
Attempts to import SSL or raises an exception .
24,665
def run ( self , messages , env = None ) : if self . args . score or self . args . unlock or self . args . testing : return tests = self . assignment . specified_tests for test in tests : if self . args . suite and hasattr ( test , 'suites' ) : test . run_only = int ( self . args . suite ) try : suite = test . suites [ int ( self . args . suite ) - 1 ] except IndexError as e : sys . exit ( ( 'python3 ok: error: ' 'Suite number must be valid.({})' . format ( len ( test . suites ) ) ) ) if self . args . case : suite . run_only = [ int ( c ) for c in self . args . case ] grade ( tests , messages , env , verbose = self . args . verbose )
Run gradeable tests and print results and return analytics .
24,666
def coerce ( self , value ) : if not self . is_valid ( value ) : raise ex . SerializeException ( '{} is not a valid value for ' 'type {}' . format ( value , self . __class__ . __name__ ) ) return value
Subclasses should override this method for type coercion .
24,667
def to_json ( self , value ) : if not self . is_valid ( value ) : raise ex . SerializeException ( 'Invalid value: {}' . format ( value ) ) return value
Subclasses should override this method for JSON encoding .
24,668
def parse_content_encoding ( self , response_headers , response_data ) : if response_headers [ 'content-encoding' ] == 'gzip' : buf = StringIO . StringIO ( response_data ) zipbuf = gzip . GzipFile ( fileobj = buf ) response_data = zipbuf . read ( ) elif response_headers [ 'content-encoding' ] == 'deflate' : data = StringIO . StringIO ( zlib . decompress ( response_data ) ) response_data = data . read ( ) else : raise errors . TestError ( 'Received unknown Content-Encoding' , { 'content-encoding' : str ( response_headers [ 'content-encoding' ] ) , 'function' : 'http.HttpResponse.parse_content_encoding' } ) return response_data
Parses a response that contains Content - Encoding to retrieve response_data
24,669
def process_response ( self ) : split_response = self . response . split ( self . CRLF ) response_line = split_response [ 0 ] response_headers = { } response_data = None data_line = None for line_num in range ( 1 , len ( split_response [ 1 : ] ) ) : if split_response [ line_num ] == '' : data_line = line_num + 1 break else : header = split_response [ line_num ] . split ( ':' , 1 ) if len ( header ) != 2 : raise errors . TestError ( 'Did not receive a response with valid headers' , { 'header_rcvd' : str ( header ) , 'function' : 'http.HttpResponse.process_response' } ) response_headers [ header [ 0 ] . lower ( ) ] = header [ 1 ] . lstrip ( ) if 'set-cookie' in response_headers . keys ( ) : try : cookie = Cookie . SimpleCookie ( ) cookie . load ( response_headers [ 'set-cookie' ] ) except Cookie . CookieError as err : raise errors . TestError ( 'Error processing the cookie content into a SimpleCookie' , { 'msg' : str ( err ) , 'set_cookie' : str ( response_headers [ 'set-cookie' ] ) , 'function' : 'http.HttpResponse.process_response' } ) if self . check_for_cookie ( cookie ) is False : raise errors . TestError ( 'An invalid cookie was specified' , { 'set_cookie' : str ( response_headers [ 'set-cookie' ] ) , 'function' : 'http.HttpResponse.process_response' } ) else : self . cookiejar . append ( ( cookie , self . dest_addr ) ) if data_line is not None and data_line < len ( split_response ) : response_data = self . CRLF . join ( split_response [ data_line : ] ) if 'content-encoding' in response_headers . keys ( ) : response_data = self . parse_content_encoding ( response_headers , response_data ) if len ( response_line . split ( ' ' , 2 ) ) != 3 : raise errors . TestError ( 'The HTTP response line returned the wrong args' , { 'response_line' : str ( response_line ) , 'function' : 'http.HttpResponse.process_response' } ) try : self . status = int ( response_line . split ( ' ' , 2 ) [ 1 ] ) except ValueError : raise errors . TestError ( 'The status num of the response line isn\'t convertable' , { 'msg' : 'This may be an HTTP 1.0 \'Simple Req\\Res\', it \ doesn\'t have HTTP headers and FTW will not parse these' , 'response_line' : str ( response_line ) , 'function' : 'http.HttpResponse.process_response' } ) self . status_msg = response_line . split ( ' ' , 2 ) [ 2 ] self . version = response_line . split ( ' ' , 2 ) [ 0 ] self . response_line = response_line self . headers = response_headers self . data = response_data
Parses an HTTP response after an HTTP request is sent
24,670
def send_request ( self , http_request ) : self . request_object = http_request self . build_socket ( ) self . build_request ( ) try : self . sock . send ( self . request ) except socket . error as err : raise errors . TestError ( 'We were unable to send the request to the socket' , { 'msg' : err , 'function' : 'http.HttpUA.send_request' } ) finally : self . get_response ( )
Send a request and get response
24,671
def build_socket ( self ) : try : self . sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) self . sock . settimeout ( self . SOCKET_TIMEOUT ) self . sock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 ) if self . request_object . protocol == 'https' : self . sock = ssl . wrap_socket ( self . sock , ciphers = self . CIPHERS ) self . sock . connect ( ( self . request_object . dest_addr , self . request_object . port ) ) except socket . error as msg : raise errors . TestError ( 'Failed to connect to server' , { 'host' : self . request_object . dest_addr , 'port' : self . request_object . port , 'proto' : self . request_object . protocol , 'message' : msg , 'function' : 'http.HttpUA.build_socket' } )
Generate either an HTTPS or HTTP socket
24,672
def find_cookie ( self ) : return_cookies = [ ] origin_domain = self . request_object . dest_addr for cookie in self . cookiejar : for cookie_morsals in cookie [ 0 ] . values ( ) : cover_domain = cookie_morsals [ 'domain' ] if cover_domain == '' : if origin_domain == cookie [ 1 ] : return_cookies . append ( cookie [ 0 ] ) else : bvalue = cover_domain . lower ( ) hdn = origin_domain . lower ( ) nend = hdn . find ( bvalue ) if nend is not False : return_cookies . append ( cookie [ 0 ] ) return return_cookies
Find a list of all cookies for a given domain
24,673
def get_response ( self ) : self . sock . setblocking ( 0 ) our_data = [ ] begin = time . time ( ) while True : if our_data and time . time ( ) - begin > self . HTTP_TIMEOUT : break elif time . time ( ) - begin > self . HTTP_TIMEOUT * 2 : break try : data = self . sock . recv ( self . RECEIVE_BYTES ) if data : our_data . append ( data ) begin = time . time ( ) else : time . sleep ( self . HTTP_TIMEOUT ) except socket . error as err : if err . errno == errno . EAGAIN : pass elif sys . platform == 'win32' and err . errno == errno . WSAEWOULDBLOCK : pass elif ( self . request_object . protocol == 'https' and err [ 0 ] == ssl . SSL_ERROR_WANT_READ ) : continue else : raise errors . TestError ( 'Failed to connect to server' , { 'host' : self . request_object . dest_addr , 'port' : self . request_object . port , 'proto' : self . request_object . protocol , 'message' : err , 'function' : 'http.HttpUA.get_response' } ) if '' . join ( our_data ) == '' : raise errors . TestError ( 'No response from server. Request likely timed out.' , { 'host' : self . request_object . dest_addr , 'port' : self . request_object . port , 'proto' : self . request_object . protocol , 'msg' : 'Please send the request and check Wireshark' , 'function' : 'http.HttpUA.get_response' } ) self . response_object = HttpResponse ( '' . join ( our_data ) , self ) try : self . sock . shutdown ( 1 ) self . sock . close ( ) except socket . error as err : raise errors . TestError ( 'We were unable to close the socket as expected.' , { 'msg' : err , 'function' : 'http.HttpUA.get_response' } )
Get the response from the socket
24,674
def process_regex ( self , key ) : return re . compile ( self . output_dict [ key ] ) if key in self . output_dict else None
Extract the value of key from dictionary if available and process it as a python regex
24,675
def instantiate_database ( sqlite_file = 'ftwj.sqlite' ) : table_name = 'ftw' col1 = 'rule_id' col1_t = 'INTEGER' col2 = 'test_id' col2_t = 'STRING' col3 = 'time_start' col3_t = 'TEXT' col4 = 'time_end' col4_t = 'TEXT' col5 = 'response_blob' col5_t = 'TEXT' col6 = 'status_code' col6_t = 'INTEGER' col7 = 'stage' col7_t = 'INTEGER' conn = sqlite3 . connect ( sqlite_file ) cur = conn . cursor ( ) q = 'CREATE TABLE {tn}({col1} {col1_t},{col2} {col2_t},{col3} {col3_t},{col4} {col4_t},{col5} {col5_t},{col6} {col6_t},{col7} {col7_t})' . format ( tn = table_name , col1 = col1 , col1_t = col1_t , col2 = col2 , col2_t = col2_t , col3 = col3 , col3_t = col3_t , col4 = col4 , col4_t = col4_t , col5 = col5 , col5_t = col5_t , col6 = col6 , col6_t = col6_t , col7 = col7 , col7_t = col7_t ) cur . execute ( q ) conn . commit ( ) conn . close ( )
Create journal database for FTW runs
24,676
def get_rulesets ( ruledir , recurse ) : if os . path . isdir ( ruledir ) and recurse : yaml_files = [ y for x in os . walk ( ruledir ) for y in glob ( os . path . join ( x [ 0 ] , '*.yaml' ) ) ] elif os . path . isdir ( ruledir ) and not recurse : yaml_files = get_files ( ruledir , 'yaml' ) elif os . path . isfile ( ruledir ) : yaml_files = [ ruledir ] extracted_files = extract_yaml ( yaml_files ) rulesets = [ ] for extracted_yaml in extracted_files : rulesets . append ( ruleset . Ruleset ( extracted_yaml ) ) return rulesets
List of ruleset objects extracted from the yaml directory
24,677
def extract_yaml ( yaml_files ) : loaded_yaml = [ ] for yaml_file in yaml_files : try : with open ( yaml_file , 'r' ) as fd : loaded_yaml . append ( yaml . safe_load ( fd ) ) except IOError as e : print ( 'Error reading file' , yaml_file ) raise e except yaml . YAMLError as e : print ( 'Error parsing file' , yaml_file ) raise e except Exception as e : print ( 'General error' ) raise e return loaded_yaml
Take a list of yaml_files and load them to return back to the testing program
24,678
def deserialize ( cls , raw_bytes ) : instance , _ = cls . parse ( raw_bytes , offset = 0 ) return instance
Deserializes the given raw bytes into an instance .
24,679
def render ( self , parts = None ) : if not parts : parts = self . parts fmt = [ ] data = [ ] for name , part_class in parts : if issubclass ( part_class , Primitive ) : part = part_class ( getattr ( self , name , None ) ) else : part = getattr ( self , name , None ) part_format , part_data = part . render ( ) fmt . extend ( part_format ) data . extend ( part_data ) return "" . join ( fmt ) , data
Returns a two - element tuple with the struct format and values .
24,680
def render ( self ) : size_format = self . size_primitive . fmt if self . value is None : return size_format , [ - 1 ] value = self . render_value ( self . value ) size = len ( value ) fmt = "%s%ds" % ( size_format , size ) return fmt , [ size , value ]
Returns the struct format and list of the size and value .
24,681
def of ( cls , part_class ) : copy = type ( "VectorOf%s" % part_class . __name__ , cls . __bases__ , dict ( cls . __dict__ ) ) copy . item_class = part_class return copy
Creates a new class with the item_class attribute properly set .
24,682
def render ( self ) : value = self . value if value is None : value = [ ] fmt = [ Int . fmt ] data = [ len ( value ) ] for item_value in value : if issubclass ( self . item_class , Primitive ) : item = self . item_class ( item_value ) else : item = item_value item_format , item_data = item . render ( ) fmt . extend ( item_format ) data . extend ( item_data ) return "" . join ( fmt ) , data
Creates a composite struct format and the data to render with it .
24,683
def parse ( cls , buff , offset ) : count , offset = Int . parse ( buff , offset ) values = [ ] for _ in range ( count ) : value , new_offset = cls . item_class . parse ( buff , offset ) values . append ( value ) offset = new_offset return values , offset
Parses a raw buffer at offset and returns the resulting array value .
24,684
def round_robin ( members , items ) : allocation = collections . defaultdict ( set ) for member , item in zip ( itertools . cycle ( members ) , items ) : allocation [ member ] . add ( item ) return allocation
Default allocator with a round robin approach .
24,685
def xfrange ( start , stop , step = 1 , maxSize = - 1 ) : if start <= stop : stop , step = stop + 1 , abs ( step ) else : stop , step = stop - 1 , - abs ( step ) if maxSize >= 0 : size = lenRange ( start , stop , step ) if size > maxSize : raise exceptions . MaxSizeException ( "Size %d > %s (MAX_FRAME_SIZE)" % ( size , maxSize ) ) return ( f for f in xrange ( start , stop , step ) )
Returns a generator that yields the frames from start to stop inclusive . In other words it adds or subtracts a frame as necessary to return the stop value as well if the stepped range would touch that value .
24,686
def unique ( seen , * iterables ) : _add = seen . add return ( i for i in chain ( * iterables ) if i not in seen and not _add ( i ) )
Get the unique items in iterables while preserving order . Note that this mutates the seen set provided only when the returned generator is used .
24,687
def copy ( self ) : fs = self . __class__ . __new__ ( self . __class__ ) fs . __dict__ = self . __dict__ . copy ( ) fs . _frameSet = None if self . _frameSet is not None : fs . _frameSet = self . _frameSet . copy ( ) return fs
Create a deep copy of this sequence
24,688
def format ( self , template = "{basename}{range}{padding}{extension}" ) : inverted = ( self . invertedFrameRange ( ) or "" ) if "{inverted}" in template else "" return template . format ( basename = self . basename ( ) , extension = self . extension ( ) , start = self . start ( ) , end = self . end ( ) , length = len ( self ) , padding = self . padding ( ) , range = self . frameRange ( ) or "" , inverted = inverted , dirname = self . dirname ( ) )
Return the file sequence as a formatted string according to the given template .
24,689
def setDirname ( self , dirname ) : sep = utils . _getPathSep ( dirname ) if not dirname . endswith ( sep ) : dirname += sep self . _dir = utils . asString ( dirname )
Set a new directory name for the sequence .
24,690
def setExtension ( self , ext ) : if ext [ 0 ] != "." : ext = "." + ext self . _ext = utils . asString ( ext )
Set a new file extension for the sequence .
24,691
def frame ( self , frame ) : try : zframe = str ( int ( frame ) ) . zfill ( self . _zfill ) except ValueError : zframe = frame if self . _zfill == 0 : zframe = "" return "" . join ( ( self . _dir , self . _base , zframe , self . _ext ) )
Return a path go the given frame in the sequence . Integer or string digits are treated as a frame number and padding is applied all other values are passed though .
24,692
def yield_sequences_in_list ( paths ) : seqs = { } _check = DISK_RE . match for match in ifilter ( None , imap ( _check , imap ( utils . asString , paths ) ) ) : dirname , basename , frame , ext = match . groups ( ) if not basename and not ext : continue key = ( dirname , basename , ext ) seqs . setdefault ( key , set ( ) ) if frame : seqs [ key ] . add ( frame ) for ( dirname , basename , ext ) , frames in seqs . iteritems ( ) : seq = FileSequence . __new__ ( FileSequence ) seq . _dir = dirname or '' seq . _base = basename or '' seq . _ext = ext or '' if frames : seq . _frameSet = FrameSet ( set ( imap ( int , frames ) ) ) if frames else None seq . _pad = FileSequence . getPaddingChars ( min ( imap ( len , frames ) ) ) else : seq . _frameSet = None seq . _pad = '' seq . __init__ ( str ( seq ) ) yield seq
Yield the discrete sequences within paths . This does not try to determine if the files actually exist on disk it assumes you already know that .
24,693
def findSequencesOnDisk ( cls , pattern , include_hidden = False , strictPadding = False ) : _not_hidden = lambda f : not f . startswith ( '.' ) _match_pattern = None _filter_padding = None _join = os . path . join seq = None dirpath = pattern if not os . path . isdir ( pattern ) : dirpath , filepat = os . path . split ( pattern ) if not os . path . isdir ( dirpath ) : return [ ] seq = cls ( filepat ) patt = seq . basename ( ) . replace ( '.' , r'\.' ) if seq . padding ( ) : patt += '\d+' if seq . extension ( ) : patt += seq . extension ( ) view = bytearray ( patt ) matches = re . finditer ( r'{(.*?)(?:,(.*?))*}' , patt ) for match in reversed ( list ( matches ) ) : i , j = match . span ( ) view [ i : j ] = '(%s)' % '|' . join ( [ m . strip ( ) for m in match . groups ( ) ] ) view = view . replace ( '*' , '.*' ) view = view . replace ( '?' , '.' ) view += '$' try : _match_pattern = re . compile ( str ( view ) ) . match except re . error : msg = 'Invalid file pattern: {}' . format ( filepat ) raise FileSeqException ( msg ) if seq . padding ( ) and strictPadding : _filter_padding = functools . partial ( cls . _filterByPaddingNum , num = seq . zfill ( ) ) ret = next ( os . walk ( dirpath ) , None ) files = ret [ - 1 ] if ret else [ ] if not include_hidden : files = ifilter ( _not_hidden , files ) if _match_pattern : files = ifilter ( _match_pattern , files ) if _filter_padding : files = _filter_padding ( files ) sep = utils . _getPathSep ( dirpath ) if not dirpath . endswith ( sep ) : dirpath += sep files = ( _join ( dirpath , f ) for f in files ) files = list ( files ) seqs = list ( FileSequence . yield_sequences_in_list ( files ) ) if _filter_padding and seq : pad = cls . conformPadding ( seq . padding ( ) ) for s in seqs : s . setPadding ( pad ) return seqs
Yield the sequences found in the given directory .
24,694
def findSequenceOnDisk ( cls , pattern , strictPadding = False ) : seq = cls ( pattern ) if seq . frameRange ( ) == '' and seq . padding ( ) == '' : if os . path . isfile ( pattern ) : return seq patt = seq . format ( '{dirname}{basename}*{extension}' ) ext = seq . extension ( ) basename = seq . basename ( ) pad = seq . padding ( ) globbed = iglob ( patt ) if pad and strictPadding : globbed = cls . _filterByPaddingNum ( globbed , seq . zfill ( ) ) pad = cls . conformPadding ( pad ) matches = cls . yield_sequences_in_list ( globbed ) for match in matches : if match . basename ( ) == basename and match . extension ( ) == ext : if pad and strictPadding : match . setPadding ( pad ) return match msg = 'no sequence found on disk matching {0}' raise FileSeqException ( msg . format ( pattern ) )
Search for a specific sequence on disk .
24,695
def _filterByPaddingNum ( cls , iterable , num ) : _check = DISK_RE . match for item in iterable : matches = _check ( item ) if not matches : if num <= 0 : yield item continue frame = matches . group ( 3 ) or '' if not frame : if num <= 0 : yield item continue if frame [ 0 ] == '0' or frame [ : 2 ] == '-0' : if len ( frame ) == num : yield item continue if len ( frame ) >= num : yield item continue
Yield only path elements from iterable which have a frame padding that matches the given target padding number
24,696
def getPaddingNum ( chars ) : match = PRINTF_SYNTAX_PADDING_RE . match ( chars ) if match : return int ( match . group ( 1 ) ) try : return sum ( [ PAD_MAP [ char ] for char in chars ] ) except KeyError : msg = "Detected an unsupported padding character: \"{}\"." msg += " Supported padding characters: {} or printf syntax padding" msg += " %<int>d" raise ValueError ( msg . format ( char , str ( PAD_MAP . keys ( ) ) ) )
Given a supported group of padding characters return the amount of padding .
24,697
def conformPadding ( cls , chars ) : pad = chars if pad and pad [ 0 ] not in PAD_MAP : pad = cls . getPaddingChars ( cls . getPaddingNum ( pad ) ) return pad
Ensure alternate input padding formats are conformed to formats defined in PAD_MAP
24,698
def _cast_to_frameset ( cls , other ) : if isinstance ( other , FrameSet ) : return other try : return FrameSet ( other ) except Exception : return NotImplemented
Private method to simplify comparison operations .
24,699
def issubset ( self , other ) : other = self . _cast_to_frameset ( other ) if other is NotImplemented : return NotImplemented return self . items <= other . items
Check if the contents of self is a subset of the contents of other .