idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
15,100
def process ( self ) : client = self . _get_client_by_hostname ( self . host ) self . _await_flow ( client , self . flow_id ) collected_flow_data = self . _download_files ( client , self . flow_id ) if collected_flow_data : print ( '{0:s}: Downloaded: {1:s}' . format ( self . flow_id , collected_flow_data ) ) fqdn = client . data . os_info . fqdn . lower ( ) self . state . output . append ( ( fqdn , collected_flow_data ) )
Collect the results .
15,101
def process ( self ) : for disk in self . disks_to_copy : print ( "Disk copy of {0:s} started..." . format ( disk . name ) ) snapshot = disk . snapshot ( ) new_disk = self . analysis_project . create_disk_from_snapshot ( snapshot , disk_name_prefix = "incident" + self . incident_id ) self . analysis_vm . attach_disk ( new_disk ) snapshot . delete ( ) print ( "Disk {0:s} successfully copied to {1:s}" . format ( disk . name , new_disk . name ) ) self . state . output . append ( ( self . analysis_vm . name , new_disk ) )
Copy a disk to the analysis project .
15,102
def setup ( self , analysis_project_name , remote_project_name , incident_id , zone , boot_disk_size , cpu_cores , remote_instance_name = None , disk_names = None , all_disks = False , image_project = "ubuntu-os-cloud" , image_family = "ubuntu-1604-lts" ) : disk_names = disk_names . split ( "," ) if disk_names else [ ] self . analysis_project = libcloudforensics . GoogleCloudProject ( analysis_project_name , default_zone = zone ) remote_project = libcloudforensics . GoogleCloudProject ( remote_project_name ) if not ( remote_instance_name or disk_names ) : self . state . add_error ( "You need to specify at least an instance name or disks to copy" , critical = True ) return self . incident_id = incident_id analysis_vm_name = "gcp-forensics-vm-{0:s}" . format ( incident_id ) print ( "Your analysis VM will be: {0:s}" . format ( analysis_vm_name ) ) print ( "Complimentary gcloud command:" ) print ( "gcloud compute ssh --project {0:s} {1:s} --zone {2:s}" . format ( analysis_project_name , analysis_vm_name , zone ) ) try : self . analysis_vm , _ = libcloudforensics . start_analysis_vm ( self . analysis_project . project_id , analysis_vm_name , zone , boot_disk_size , int ( cpu_cores ) , attach_disk = None , image_project = image_project , image_family = image_family ) if disk_names : for name in disk_names : try : self . disks_to_copy . append ( remote_project . get_disk ( name ) ) except RuntimeError : self . state . add_error ( "Disk '{0:s}' was not found in project {1:s}" . format ( name , remote_project_name ) , critical = True ) break elif remote_instance_name : remote_instance = remote_project . get_instance ( remote_instance_name ) if all_disks : self . disks_to_copy = [ remote_project . get_disk ( disk_name ) for disk_name in remote_instance . list_disks ( ) ] else : self . disks_to_copy = [ remote_instance . get_boot_disk ( ) ] if not self . disks_to_copy : self . state . add_error ( "Could not find any disks to copy" , critical = True ) except AccessTokenRefreshError as err : self . state . add_error ( "Something is wrong with your gcloud access token." ) self . state . add_error ( err , critical = True ) except ApplicationDefaultCredentialsError as err : self . state . add_error ( "Something is wrong with your Application Default " "Credentials. Try running:\n" " $ gcloud auth application-default login" ) self . state . add_error ( err , critical = True ) except HttpError as err : if err . resp . status == 403 : self . state . add_error ( "Make sure you have the appropriate permissions on the project" ) if err . resp . status == 404 : self . state . add_error ( "GCP resource not found. Maybe a typo in the project / instance / " "disk name?" ) self . state . add_error ( err , critical = True )
Sets up a Google cloud collector .
15,103
def setup ( self , timezone = None ) : self . _timezone = timezone self . _output_path = tempfile . mkdtemp ( )
Sets up the _timezone attribute .
15,104
def process ( self ) : for description , path in self . state . input : log_file_path = os . path . join ( self . _output_path , 'plaso.log' ) print ( 'Log file: {0:s}' . format ( log_file_path ) ) cmd = [ 'log2timeline.py' ] cmd . extend ( [ '-q' , '--status_view' , 'none' ] ) if self . _timezone : cmd . extend ( [ '-z' , self . _timezone ] ) cmd . extend ( [ '--partition' , 'all' ] ) cmd . extend ( [ '--logfile' , log_file_path ] ) plaso_storage_file_path = os . path . join ( self . _output_path , '{0:s}.plaso' . format ( uuid . uuid4 ( ) . hex ) ) cmd . extend ( [ plaso_storage_file_path , path ] ) full_cmd = ' ' . join ( cmd ) print ( 'Running external command: "{0:s}"' . format ( full_cmd ) ) try : l2t_proc = subprocess . Popen ( cmd , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) _ , error = l2t_proc . communicate ( ) l2t_status = l2t_proc . wait ( ) if l2t_status : message = ( 'The log2timeline command {0:s} failed: {1:s}.' ' Check log file for details.' ) . format ( full_cmd , error ) self . state . add_error ( message , critical = True ) self . state . output . append ( ( description , plaso_storage_file_path ) ) except OSError as exception : self . state . add_error ( exception , critical = True ) except Exception as exception : self . state . add_error ( exception , critical = True )
Execute the Plaso process .
15,105
def setup ( self , reason , grr_server_url , grr_username , grr_password , approvers = None , verify = True ) : grr_auth = ( grr_username , grr_password ) self . approvers = [ ] if approvers : self . approvers = [ item . strip ( ) for item in approvers . strip ( ) . split ( ',' ) ] self . grr_api = grr_api . InitHttp ( api_endpoint = grr_server_url , auth = grr_auth , verify = verify ) self . output_path = tempfile . mkdtemp ( ) self . reason = reason
Initializes a GRR hunt result collector .
15,106
def _check_approval_wrapper ( self , grr_object , grr_function , * args , ** kwargs ) : approval_sent = False while True : try : return grr_function ( * args , ** kwargs ) except grr_errors . AccessForbiddenError as exception : print ( 'No valid approval found: {0!s}' . format ( exception ) ) if approval_sent : print ( 'Approval not yet granted, waiting {0:d}s' . format ( self . _CHECK_APPROVAL_INTERVAL_SEC ) ) time . sleep ( self . _CHECK_APPROVAL_INTERVAL_SEC ) continue if not self . approvers : message = ( 'GRR needs approval but no approvers specified ' '(hint: use --approvers)' ) self . state . add_error ( message , critical = True ) return None grr_object . CreateApproval ( reason = self . reason , notified_users = self . approvers ) approval_sent = True print ( '{0!s}: approval request sent to: {1!s} (reason: {2:s})' . format ( grr_object , self . approvers , self . reason ) )
Wraps a call to GRR functions checking for approval .
15,107
def _create_session ( self , username , password ) : session = requests . Session ( ) session . verify = False try : response = session . get ( self . host_url ) except requests . exceptions . ConnectionError : return False soup = BeautifulSoup ( response . text , 'html.parser' ) csrf_token = soup . find ( 'input' , dict ( name = 'csrf_token' ) ) [ 'value' ] login_data = dict ( username = username , password = password ) session . headers . update ( { 'x-csrftoken' : csrf_token , 'referer' : self . host_url } ) _ = session . post ( '{0:s}/login/' . format ( self . host_url ) , data = login_data ) return session
Create HTTP session .
15,108
def create_sketch ( self , name , description ) : resource_url = '{0:s}/sketches/' . format ( self . api_base_url ) form_data = { 'name' : name , 'description' : description } response = self . session . post ( resource_url , json = form_data ) response_dict = response . json ( ) sketch_id = response_dict [ 'objects' ] [ 0 ] [ 'id' ] return sketch_id
Create a new sketch with the specified name and description .
15,109
def upload_timeline ( self , timeline_name , plaso_storage_path ) : resource_url = '{0:s}/upload/' . format ( self . api_base_url ) files = { 'file' : open ( plaso_storage_path , 'rb' ) } data = { 'name' : timeline_name } response = self . session . post ( resource_url , files = files , data = data ) try : response_dict = response . json ( ) except ValueError : raise RuntimeError ( 'Could not decode JSON response from Timesketch' ' (Status {0:d}):\n{1:s}' . format ( response . status_code , response . content ) ) index_id = response_dict [ 'objects' ] [ 0 ] [ 'id' ] return index_id
Create a timeline with the specified name from the given plaso file .
15,110
def export_artifacts ( self , processed_artifacts , sketch_id ) : for timeline_name , artifact_path in processed_artifacts : print ( 'Uploading {0:s} to timeline {1:s}' . format ( artifact_path , timeline_name ) ) new_timeline_id = self . upload_timeline ( timeline_name , artifact_path ) self . add_timeline_to_sketch ( sketch_id , new_timeline_id ) return sketch_id
Upload provided artifacts to specified or new if non - existent sketch .
15,111
def add_timeline_to_sketch ( self , sketch_id , index_id ) : resource_url = '{0:s}/sketches/{1:d}/timelines/' . format ( self . api_base_url , sketch_id ) form_data = { 'timeline' : [ index_id ] } self . session . post ( resource_url , json = form_data )
Associate the specified timeline and sketch .
15,112
def get_sketch ( self , sketch_id ) : resource_url = '{0:s}/sketches/{1:d}/' . format ( self . api_base_url , sketch_id ) response = self . session . get ( resource_url ) response_dict = response . json ( ) try : response_dict [ 'objects' ] except KeyError : raise ValueError ( 'Sketch does not exist or you have no access' ) return response_dict
Get information on the specified sketch .
15,113
def setup ( self , keywords = None ) : self . _keywords = keywords self . _output_path = tempfile . mkdtemp ( )
Sets up the _keywords attribute .
15,114
def process ( self ) : for _ , path in self . state . input : log_file_path = os . path . join ( self . _output_path , 'grepper.log' ) print ( 'Log file: {0:s}' . format ( log_file_path ) ) print ( 'Walking through dir (absolute) = ' + os . path . abspath ( path ) ) try : for root , _ , files in os . walk ( path ) : for filename in files : found = set ( ) fullpath = '{0:s}/{1:s}' . format ( os . path . abspath ( root ) , filename ) if mimetypes . guess_type ( filename ) [ 0 ] == 'application/pdf' : found = self . grepPDF ( fullpath ) else : with open ( fullpath , 'r' ) as fp : for line in fp : found . update ( set ( x . lower ( ) for x in re . findall ( self . _keywords , line , re . IGNORECASE ) ) ) if [ item for item in found if item ] : output = '{0:s}/{1:s}:{2:s}' . format ( path , filename , ',' . join ( filter ( None , found ) ) ) if self . _final_output : self . _final_output += '\n' + output else : self . _final_output = output print ( output ) except OSError as exception : self . state . add_error ( exception , critical = True ) return except Exception as exception : self . state . add_error ( exception , critical = True ) return
Execute the grep command
15,115
def grepPDF ( self , path ) : with open ( path , 'rb' ) as pdf_file_obj : match = set ( ) text = '' pdf_reader = PyPDF2 . PdfFileReader ( pdf_file_obj ) pages = pdf_reader . numPages for page in range ( pages ) : page_obj = pdf_reader . getPage ( page ) text += '\n' + page_obj . extractText ( ) match . update ( set ( x . lower ( ) for x in re . findall ( self . _keywords , text , re . IGNORECASE ) ) ) return match
Parse PDF files text content for keywords .
15,116
def load_recipe ( self , recipe ) : self . recipe = recipe for module_description in recipe [ 'modules' ] : module_name = module_description [ 'name' ] module = self . config . get_module ( module_name ) ( self ) self . _module_pool [ module_name ] = module
Populates the internal module pool with modules declared in a recipe .
15,117
def store_container ( self , container ) : with self . _store_lock : self . store . setdefault ( container . CONTAINER_TYPE , [ ] ) . append ( container )
Thread - safe method to store data in the state s store .
15,118
def get_containers ( self , container_class ) : with self . _store_lock : return self . store . get ( container_class . CONTAINER_TYPE , [ ] )
Thread - safe method to retrieve data from the state s store .
15,119
def setup_modules ( self , args ) : def _setup_module_thread ( module_description ) : new_args = utils . import_args_from_dict ( module_description [ 'args' ] , vars ( args ) , self . config ) module = self . _module_pool [ module_description [ 'name' ] ] try : module . setup ( ** new_args ) except Exception as error : self . add_error ( 'An unknown error occurred: {0!s}\nFull traceback:\n{1:s}' . format ( error , traceback . format_exc ( ) ) , critical = True ) self . events [ module_description [ 'name' ] ] = threading . Event ( ) self . cleanup ( ) threads = [ ] for module_description in self . recipe [ 'modules' ] : t = threading . Thread ( target = _setup_module_thread , args = ( module_description , ) ) threads . append ( t ) t . start ( ) for t in threads : t . join ( ) self . check_errors ( is_global = True )
Performs setup tasks for each module in the module pool .
15,120
def run_modules ( self ) : def _run_module_thread ( module_description ) : for blocker in module_description [ 'wants' ] : self . events [ blocker ] . wait ( ) module = self . _module_pool [ module_description [ 'name' ] ] try : module . process ( ) except DFTimewolfError as error : self . add_error ( error . message , critical = True ) except Exception as error : self . add_error ( 'An unknown error occurred: {0!s}\nFull traceback:\n{1:s}' . format ( error , traceback . format_exc ( ) ) , critical = True ) print ( 'Module {0:s} completed' . format ( module_description [ 'name' ] ) ) self . events [ module_description [ 'name' ] ] . set ( ) self . cleanup ( ) threads = [ ] for module_description in self . recipe [ 'modules' ] : t = threading . Thread ( target = _run_module_thread , args = ( module_description , ) ) threads . append ( t ) t . start ( ) for t in threads : t . join ( ) self . check_errors ( is_global = True )
Performs the actual processing for each module in the module pool .
15,121
def add_error ( self , error , critical = False ) : self . errors . append ( ( error , critical ) )
Adds an error to the state .
15,122
def cleanup ( self ) : self . global_errors . extend ( self . errors ) self . errors = [ ] self . input = self . output self . output = [ ]
Basic cleanup after modules .
15,123
def check_errors ( self , is_global = False ) : errors = self . global_errors if is_global else self . errors if errors : print ( 'dfTimewolf encountered one or more errors:' ) for error , critical in errors : print ( '{0:s} {1:s}' . format ( 'CRITICAL: ' if critical else '' , error ) ) if critical : print ( 'Critical error found. Aborting.' ) sys . exit ( - 1 )
Checks for errors and exits if any of them are critical .
15,124
def setup ( self , disk_name , project , turbinia_zone ) : if project is None or turbinia_zone is None : self . state . add_error ( 'project or turbinia_zone are not all specified, bailing out' , critical = True ) return self . disk_name = disk_name self . project = project self . turbinia_zone = turbinia_zone try : turbinia_config . LoadConfig ( ) self . turbinia_region = turbinia_config . TURBINIA_REGION self . instance = turbinia_config . PUBSUB_TOPIC if turbinia_config . PROJECT != self . project : self . state . add_error ( 'Specified project {0:s} does not match Turbinia configured ' 'project {1:s}. Use gcp_turbinia_import recipe to copy the disk ' 'into the same project.' . format ( self . project , turbinia_config . PROJECT ) , critical = True ) return self . _output_path = tempfile . mkdtemp ( ) self . client = turbinia_client . TurbiniaClient ( ) except TurbiniaException as e : self . state . add_error ( e , critical = True ) return
Sets up the object attributes .
15,125
def _print_task_data ( self , task ) : print ( ' {0:s} ({1:s})' . format ( task [ 'name' ] , task [ 'id' ] ) ) paths = task . get ( 'saved_paths' , [ ] ) if not paths : return for path in paths : if path . endswith ( 'worker-log.txt' ) : continue if path . endswith ( '{0:s}.log' . format ( task . get ( 'id' ) ) ) : continue if path . startswith ( '/' ) : continue print ( ' ' + path )
Pretty - prints task data .
15,126
def display_task_progress ( self , instance , project , region , request_id = None , user = None , poll_interval = 60 ) : total_completed = 0 while True : task_results = self . client . get_task_data ( instance , project , region , request_id = request_id , user = user ) tasks = { task [ 'id' ] : task for task in task_results } completed_tasks = set ( ) pending_tasks = set ( ) for task in tasks . values ( ) : if task . get ( 'successful' ) is not None : completed_tasks . add ( task [ 'id' ] ) else : pending_tasks . add ( task [ 'id' ] ) if len ( completed_tasks ) > total_completed or not completed_tasks : total_completed = len ( completed_tasks ) print ( 'Task status update (completed: {0:d} | pending: {1:d})' . format ( len ( completed_tasks ) , len ( pending_tasks ) ) ) print ( 'Completed tasks:' ) for task_id in completed_tasks : self . _print_task_data ( tasks [ task_id ] ) print ( 'Pending tasks:' ) for task_id in pending_tasks : self . _print_task_data ( tasks [ task_id ] ) if len ( completed_tasks ) == len ( task_results ) and completed_tasks : print ( 'All {0:d} Tasks completed' . format ( len ( task_results ) ) ) return time . sleep ( poll_interval )
Displays the overall progress of tasks in a Turbinia job .
15,127
def generate_help ( ) : help_text = '\nAvailable recipes:\n\n' recipes = config . Config . get_registered_recipes ( ) for contents , _ , _ in sorted ( recipes , key = lambda k : k [ 0 ] [ 'name' ] ) : help_text += ' {0:<35s}{1:s}\n' . format ( contents [ 'name' ] , contents . get ( 'short_description' , 'No description' ) ) return help_text
Generates help text with alphabetically sorted recipes .
15,128
def main ( ) : parser = argparse . ArgumentParser ( formatter_class = argparse . RawDescriptionHelpFormatter , description = generate_help ( ) ) subparsers = parser . add_subparsers ( ) for registered_recipe in config . Config . get_registered_recipes ( ) : recipe , recipe_args , documentation = registered_recipe subparser = subparsers . add_parser ( recipe [ 'name' ] , formatter_class = utils . DFTimewolfFormatterClass , description = '{0:s}' . format ( documentation ) ) subparser . set_defaults ( recipe = recipe ) for switch , help_text , default in recipe_args : subparser . add_argument ( switch , help = help_text , default = default ) subparser . set_defaults ( ** config . Config . get_extra ( ) ) args = parser . parse_args ( ) recipe = args . recipe state = DFTimewolfState ( config . Config ) print ( 'Loading recipes...' ) state . load_recipe ( recipe ) print ( 'Loaded recipe {0:s} with {1:d} modules' . format ( recipe [ 'name' ] , len ( recipe [ 'modules' ] ) ) ) print ( 'Setting up modules...' ) state . setup_modules ( args ) print ( 'Modules successfully set up!' ) print ( 'Running modules...' ) state . run_modules ( ) print ( 'Recipe {0:s} executed successfully.' . format ( recipe [ 'name' ] ) )
Main function for DFTimewolf .
15,129
def setup ( self , endpoint = None , username = None , password = None , incident_id = None , sketch_id = None ) : self . timesketch_api = timesketch_utils . TimesketchApiClient ( endpoint , username , password ) self . incident_id = None self . sketch_id = int ( sketch_id ) if sketch_id else None if not self . timesketch_api . session : message = 'Could not connect to Timesketch server at ' + endpoint self . state . add_error ( message , critical = True ) return if not self . sketch_id : if incident_id : sketch_name = 'Sketch for incident ID: ' + incident_id else : sketch_name = 'Untitled sketch' sketch_description = 'Sketch generated by dfTimewolf' self . sketch_id = self . timesketch_api . create_sketch ( sketch_name , sketch_description ) print ( 'Sketch {0:d} created' . format ( self . sketch_id ) )
Setup a connection to a Timesketch server and create a sketch if needed .
15,130
def process ( self ) : if not self . timesketch_api . session : message = 'Could not connect to Timesketch server' self . state . add_error ( message , critical = True ) named_timelines = [ ] for description , path in self . state . input : if not description : description = 'untitled timeline for ' + path named_timelines . append ( ( description , path ) ) try : self . timesketch_api . export_artifacts ( named_timelines , self . sketch_id ) except RuntimeError as e : self . state . add_error ( 'Error occurred while working with Timesketch: {0:s}' . format ( str ( e ) ) , critical = True ) return sketch_url = self . timesketch_api . get_sketch_url ( self . sketch_id ) print ( 'Your Timesketch URL is: {0:s}' . format ( sketch_url ) ) self . state . output = sketch_url
Executes a Timesketch export .
15,131
def setup ( self , target_directory = None ) : self . _target_directory = target_directory if not target_directory : self . _target_directory = tempfile . mkdtemp ( ) elif not os . path . exists ( target_directory ) : try : os . makedirs ( target_directory ) except OSError as exception : message = 'An unknown error occurred: {0!s}' . format ( exception ) self . state . add_error ( message , critical = True )
Sets up the _target_directory attribute .
15,132
def _copy_file_or_directory ( self , source , destination_directory ) : if os . path . isdir ( source ) : for item in os . listdir ( source ) : full_source = os . path . join ( source , item ) full_destination = os . path . join ( destination_directory , item ) shutil . copytree ( full_source , full_destination ) else : shutil . copy2 ( source , destination_directory )
Recursively copies files from source to destination_directory .
15,133
def import_args_from_dict ( value , args , config ) : if isinstance ( value , six . string_types ) : for match in TOKEN_REGEX . finditer ( str ( value ) ) : token = match . group ( 1 ) if token in args : actual_param = args [ token ] if isinstance ( actual_param , six . string_types ) : value = value . replace ( "@" + token , args [ token ] ) else : value = actual_param elif isinstance ( value , list ) : return [ import_args_from_dict ( item , args , config ) for item in value ] elif isinstance ( value , dict ) : return { key : import_args_from_dict ( val , args , config ) for key , val in value . items ( ) } elif isinstance ( value , tuple ) : return tuple ( import_args_from_dict ( val , args , config ) for val in value ) return value
Replaces some arguments by those specified by a key - value dictionary .
15,134
def get_version ( ) : reg = re . compile ( r'__version__ = [\'"]([^\'"]*)[\'"]' ) with open ( 'requests_kerberos/__init__.py' ) as fd : matches = list ( filter ( lambda x : x , map ( reg . match , fd ) ) ) if not matches : raise RuntimeError ( 'Could not find the version information for requests_kerberos' ) return matches [ 0 ] . group ( 1 )
Simple function to extract the current version using regular expressions .
15,135
def _negotiate_value ( response ) : if hasattr ( _negotiate_value , 'regex' ) : regex = _negotiate_value . regex else : regex = re . compile ( '(?:.*,)*\s*Negotiate\s*([^,]*),?' , re . I ) _negotiate_value . regex = regex authreq = response . headers . get ( 'www-authenticate' , None ) if authreq : match_obj = regex . search ( authreq ) if match_obj : return match_obj . group ( 1 ) return None
Extracts the gssapi authentication token from the appropriate header
15,136
def generate_request_header ( self , response , host , is_preemptive = False ) : gssflags = kerberos . GSS_C_MUTUAL_FLAG | kerberos . GSS_C_SEQUENCE_FLAG if self . delegate : gssflags |= kerberos . GSS_C_DELEG_FLAG try : kerb_stage = "authGSSClientInit()" kerb_host = self . hostname_override if self . hostname_override is not None else host kerb_spn = "{0}@{1}" . format ( self . service , kerb_host ) result , self . context [ host ] = kerberos . authGSSClientInit ( kerb_spn , gssflags = gssflags , principal = self . principal ) if result < 1 : raise EnvironmentError ( result , kerb_stage ) negotiate_resp_value = '' if is_preemptive else _negotiate_value ( response ) kerb_stage = "authGSSClientStep()" if self . cbt_struct : result = kerberos . authGSSClientStep ( self . context [ host ] , negotiate_resp_value , channel_bindings = self . cbt_struct ) else : result = kerberos . authGSSClientStep ( self . context [ host ] , negotiate_resp_value ) if result < 0 : raise EnvironmentError ( result , kerb_stage ) kerb_stage = "authGSSClientResponse()" gss_response = kerberos . authGSSClientResponse ( self . context [ host ] ) return "Negotiate {0}" . format ( gss_response ) except kerberos . GSSError as error : log . exception ( "generate_request_header(): {0} failed:" . format ( kerb_stage ) ) log . exception ( error ) raise KerberosExchangeError ( "%s failed: %s" % ( kerb_stage , str ( error . args ) ) ) except EnvironmentError as error : if error . errno != result : raise message = "{0} failed, result: {1}" . format ( kerb_stage , result ) log . error ( "generate_request_header(): {0}" . format ( message ) ) raise KerberosExchangeError ( message )
Generates the GSSAPI authentication token with kerberos .
15,137
def handle_other ( self , response ) : log . debug ( "handle_other(): Handling: %d" % response . status_code ) if self . mutual_authentication in ( REQUIRED , OPTIONAL ) and not self . auth_done : is_http_error = response . status_code >= 400 if _negotiate_value ( response ) is not None : log . debug ( "handle_other(): Authenticating the server" ) if not self . authenticate_server ( response ) : log . error ( "handle_other(): Mutual authentication failed" ) raise MutualAuthenticationError ( "Unable to authenticate " "{0}" . format ( response ) ) log . debug ( "handle_other(): returning {0}" . format ( response ) ) self . auth_done = True return response elif is_http_error or self . mutual_authentication == OPTIONAL : if not response . ok : log . error ( "handle_other(): Mutual authentication unavailable " "on {0} response" . format ( response . status_code ) ) if ( self . mutual_authentication == REQUIRED and self . sanitize_mutual_error_response ) : return SanitizedResponse ( response ) else : return response else : log . error ( "handle_other(): Mutual authentication failed" ) raise MutualAuthenticationError ( "Unable to authenticate " "{0}" . format ( response ) ) else : log . debug ( "handle_other(): returning {0}" . format ( response ) ) return response
Handles all responses with the exception of 401s .
15,138
def authenticate_server ( self , response ) : log . debug ( "authenticate_server(): Authenticate header: {0}" . format ( _negotiate_value ( response ) ) ) host = urlparse ( response . url ) . hostname try : if self . cbt_struct : result = kerberos . authGSSClientStep ( self . context [ host ] , _negotiate_value ( response ) , channel_bindings = self . cbt_struct ) else : result = kerberos . authGSSClientStep ( self . context [ host ] , _negotiate_value ( response ) ) except kerberos . GSSError : log . exception ( "authenticate_server(): authGSSClientStep() failed:" ) return False if result < 1 : log . error ( "authenticate_server(): authGSSClientStep() failed: " "{0}" . format ( result ) ) return False log . debug ( "authenticate_server(): returning {0}" . format ( response ) ) return True
Uses GSSAPI to authenticate the server .
15,139
def handle_response ( self , response , ** kwargs ) : num_401s = kwargs . pop ( 'num_401s' , 0 ) if not self . cbt_binding_tried and self . send_cbt : cbt_application_data = _get_channel_bindings_application_data ( response ) if cbt_application_data : try : self . cbt_struct = kerberos . channelBindings ( application_data = cbt_application_data ) except AttributeError : self . cbt_struct = None self . cbt_binding_tried = True if self . pos is not None : response . request . body . seek ( self . pos ) if response . status_code == 401 and num_401s < 2 : _r = self . handle_401 ( response , ** kwargs ) log . debug ( "handle_response(): returning %s" , _r ) log . debug ( "handle_response() has seen %d 401 responses" , num_401s ) num_401s += 1 return self . handle_response ( _r , num_401s = num_401s , ** kwargs ) elif response . status_code == 401 and num_401s >= 2 : log . debug ( "handle_response(): returning 401 %s" , response ) return response else : _r = self . handle_other ( response ) log . debug ( "handle_response(): returning %s" , _r ) return _r
Takes the given response and tries kerberos - auth as needed .
15,140
def _quote_query ( query ) : return "&" . join ( "%s=%s" % ( k , urllib_quote ( unicode ( query [ k ] ) . encode ( 'utf-8' ) , safe = '~' ) ) for k in sorted ( query ) )
Turn a dictionary into a query string in a URL with keys in alphabetical order .
15,141
def api_url ( self , ** kwargs ) : query = { 'Operation' : self . Operation , 'Service' : "AWSECommerceService" , 'Timestamp' : time . strftime ( "%Y-%m-%dT%H:%M:%SZ" , time . gmtime ( ) ) , 'Version' : self . Version , } query . update ( kwargs ) query [ 'AWSAccessKeyId' ] = self . AWSAccessKeyId query [ 'Timestamp' ] = time . strftime ( "%Y-%m-%dT%H:%M:%SZ" , time . gmtime ( ) ) if self . AssociateTag : query [ 'AssociateTag' ] = self . AssociateTag service_domain = SERVICE_DOMAINS [ self . Region ] [ 0 ] quoted_strings = _quote_query ( query ) data = "GET\n" + service_domain + "\n/onca/xml\n" + quoted_strings if type ( self . AWSSecretAccessKey ) is unicode : self . AWSSecretAccessKey = self . AWSSecretAccessKey . encode ( 'utf-8' ) if type ( data ) is unicode : data = data . encode ( 'utf-8' ) digest = hmac . new ( self . AWSSecretAccessKey , data , sha256 ) . digest ( ) if sys . version_info [ 0 ] == 3 : signature = urllib . parse . quote ( b64encode ( digest ) ) else : signature = urllib . quote ( b64encode ( digest ) ) return ( "https://" + service_domain + "/onca/xml?" + quoted_strings + "&Signature=%s" % signature )
The URL for making the given query against the API .
15,142
def isSameTypeWith ( self , other , matchTags = True , matchConstraints = True ) : return ( self is other or ( not matchTags or self . tagSet == other . tagSet ) and ( not matchConstraints or self . subtypeSpec == other . subtypeSpec ) )
Examine |ASN . 1| type for equality with other ASN . 1 type .
15,143
def isSuperTypeOf ( self , other , matchTags = True , matchConstraints = True ) : return ( not matchTags or ( self . tagSet . isSuperTagSetOf ( other . tagSet ) ) and ( not matchConstraints or self . subtypeSpec . isSuperTypeOf ( other . subtypeSpec ) ) )
Examine |ASN . 1| type for subtype relationship with other ASN . 1 type .
15,144
def clone ( self , value = noValue , ** kwargs ) : if value is noValue : if not kwargs : return self value = self . _value initializers = self . readOnly . copy ( ) initializers . update ( kwargs ) return self . __class__ ( value , ** initializers )
Create a modified version of |ASN . 1| schema or value object .
15,145
def subtype ( self , value = noValue , ** kwargs ) : if value is noValue : if not kwargs : return self value = self . _value initializers = self . readOnly . copy ( ) implicitTag = kwargs . pop ( 'implicitTag' , None ) if implicitTag is not None : initializers [ 'tagSet' ] = self . tagSet . tagImplicitly ( implicitTag ) explicitTag = kwargs . pop ( 'explicitTag' , None ) if explicitTag is not None : initializers [ 'tagSet' ] = self . tagSet . tagExplicitly ( explicitTag ) for arg , option in kwargs . items ( ) : initializers [ arg ] += option return self . __class__ ( value , ** initializers )
Create a specialization of |ASN . 1| schema or value object .
15,146
def clone ( self , ** kwargs ) : cloneValueFlag = kwargs . pop ( 'cloneValueFlag' , False ) initializers = self . readOnly . copy ( ) initializers . update ( kwargs ) clone = self . __class__ ( ** initializers ) if cloneValueFlag : self . _cloneComponentValues ( clone , cloneValueFlag ) return clone
Create a modified version of |ASN . 1| schema object .
15,147
def subtype ( self , ** kwargs ) : initializers = self . readOnly . copy ( ) cloneValueFlag = kwargs . pop ( 'cloneValueFlag' , False ) implicitTag = kwargs . pop ( 'implicitTag' , None ) if implicitTag is not None : initializers [ 'tagSet' ] = self . tagSet . tagImplicitly ( implicitTag ) explicitTag = kwargs . pop ( 'explicitTag' , None ) if explicitTag is not None : initializers [ 'tagSet' ] = self . tagSet . tagExplicitly ( explicitTag ) for arg , option in kwargs . items ( ) : initializers [ arg ] += option clone = self . __class__ ( ** initializers ) if cloneValueFlag : self . _cloneComponentValues ( clone , cloneValueFlag ) return clone
Create a specialization of |ASN . 1| schema object .
15,148
def getTypeByPosition ( self , idx ) : try : return self . __namedTypes [ idx ] . asn1Object except IndexError : raise error . PyAsn1Error ( 'Type position out of range' )
Return ASN . 1 type object by its position in fields set .
15,149
def getPositionByType ( self , tagSet ) : try : return self . __tagToPosMap [ tagSet ] except KeyError : raise error . PyAsn1Error ( 'Type %s not found' % ( tagSet , ) )
Return field position by its ASN . 1 type .
15,150
def getNameByPosition ( self , idx ) : try : return self . __namedTypes [ idx ] . name except IndexError : raise error . PyAsn1Error ( 'Type position out of range' )
Return field name by its position in fields set .
15,151
def getPositionByName ( self , name ) : try : return self . __nameToPosMap [ name ] except KeyError : raise error . PyAsn1Error ( 'Name %s not found' % ( name , ) )
Return field position by filed name .
15,152
def getTagMapNearPosition ( self , idx ) : try : return self . __ambiguousTypes [ idx ] . tagMap except KeyError : raise error . PyAsn1Error ( 'Type position out of range' )
Return ASN . 1 types that are allowed at or past given field position .
15,153
def getPositionNearType ( self , tagSet , idx ) : try : return idx + self . __ambiguousTypes [ idx ] . getPositionByType ( tagSet ) except KeyError : raise error . PyAsn1Error ( 'Type position out of range' )
Return the closest field position where given ASN . 1 type is allowed .
15,154
def asBinary ( self ) : binString = binary . bin ( self . _value ) [ 2 : ] return '0' * ( len ( self . _value ) - len ( binString ) ) + binString
Get |ASN . 1| value as a text string of bits .
15,155
def fromOctetString ( cls , value , internalFormat = False , prepend = None , padding = 0 ) : value = SizedInteger ( integer . from_bytes ( value ) >> padding ) . setBitLength ( len ( value ) * 8 - padding ) if prepend is not None : value = SizedInteger ( ( SizedInteger ( prepend ) << len ( value ) ) | value ) . setBitLength ( len ( prepend ) + len ( value ) ) if not internalFormat : value = cls ( value ) return value
Create a |ASN . 1| object initialized from a string .
15,156
def fromBinaryString ( value ) : bitNo = 8 byte = 0 r = [ ] for v in value : if bitNo : bitNo -= 1 else : bitNo = 7 r . append ( byte ) byte = 0 if v in ( '0' , '1' ) : v = int ( v ) else : raise error . PyAsn1Error ( 'Non-binary OCTET STRING initializer %s' % ( v , ) ) byte |= v << bitNo r . append ( byte ) return octets . ints2octs ( r )
Create a |ASN . 1| object initialized from a string of 0 and 1 .
15,157
def isPrefixOf ( self , other ) : l = len ( self ) if l <= len ( other ) : if self . _value [ : l ] == other [ : l ] : return True return False
Indicate if this |ASN . 1| object is a prefix of other |ASN . 1| object .
15,158
def getComponentByPosition ( self , idx , default = noValue , instantiate = True ) : try : componentValue = self . _componentValues [ idx ] except IndexError : if not instantiate : return default self . setComponentByPosition ( idx ) componentValue = self . _componentValues [ idx ] if default is noValue or componentValue . isValue : return componentValue else : return default
Return |ASN . 1| type component value by position .
15,159
def getComponentByName ( self , name , default = noValue , instantiate = True ) : if self . _componentTypeLen : idx = self . componentType . getPositionByName ( name ) else : try : idx = self . _dynamicNames . getPositionByName ( name ) except KeyError : raise error . PyAsn1Error ( 'Name %s not found' % ( name , ) ) return self . getComponentByPosition ( idx , default = default , instantiate = instantiate )
Returns |ASN . 1| type component by name .
15,160
def setComponentByName ( self , name , value = noValue , verifyConstraints = True , matchTags = True , matchConstraints = True ) : if self . _componentTypeLen : idx = self . componentType . getPositionByName ( name ) else : try : idx = self . _dynamicNames . getPositionByName ( name ) except KeyError : raise error . PyAsn1Error ( 'Name %s not found' % ( name , ) ) return self . setComponentByPosition ( idx , value , verifyConstraints , matchTags , matchConstraints )
Assign |ASN . 1| type component by name .
15,161
def prettyPrint ( self , scope = 0 ) : scope += 1 representation = self . __class__ . __name__ + ':\n' for idx , componentValue in enumerate ( self . _componentValues ) : if componentValue is not noValue and componentValue . isValue : representation += ' ' * scope if self . componentType : representation += self . componentType . getNameByPosition ( idx ) else : representation += self . _dynamicNames . getNameByPosition ( idx ) representation = '%s=%s\n' % ( representation , componentValue . prettyPrint ( scope ) ) return representation
Return an object representation string .
15,162
def getComponentByType ( self , tagSet , default = noValue , instantiate = True , innerFlag = False ) : componentValue = self . getComponentByPosition ( self . componentType . getPositionByType ( tagSet ) , default = default , instantiate = instantiate ) if innerFlag and isinstance ( componentValue , Set ) : return componentValue . getComponent ( innerFlag = True ) else : return componentValue
Returns |ASN . 1| type component by ASN . 1 tag .
15,163
def setComponentByType ( self , tagSet , value = noValue , verifyConstraints = True , matchTags = True , matchConstraints = True , innerFlag = False ) : idx = self . componentType . getPositionByType ( tagSet ) if innerFlag : componentType = self . componentType . getTypeByPosition ( idx ) if componentType . tagSet : return self . setComponentByPosition ( idx , value , verifyConstraints , matchTags , matchConstraints ) else : componentType = self . getComponentByPosition ( idx ) return componentType . setComponentByType ( tagSet , value , verifyConstraints , matchTags , matchConstraints , innerFlag = innerFlag ) else : return self . setComponentByPosition ( idx , value , verifyConstraints , matchTags , matchConstraints )
Assign |ASN . 1| type component by ASN . 1 tag .
15,164
def getComponent ( self , innerFlag = False ) : if self . _currentIdx is None : raise error . PyAsn1Error ( 'Component not chosen' ) else : c = self . _componentValues [ self . _currentIdx ] if innerFlag and isinstance ( c , Choice ) : return c . getComponent ( innerFlag ) else : return c
Return currently assigned component of the |ASN . 1| object .
15,165
def getName ( self , innerFlag = False ) : if self . _currentIdx is None : raise error . PyAsn1Error ( 'Component not chosen' ) else : if innerFlag : c = self . _componentValues [ self . _currentIdx ] if isinstance ( c , Choice ) : return c . getName ( innerFlag ) return self . componentType . getNameByPosition ( self . _currentIdx )
Return the name of currently assigned component of the |ASN . 1| object .
15,166
def expand_path ( path ) : expanded = [ ] if len ( path ) < 2 : return expanded for i in range ( len ( path ) - 1 ) : expanded += bresenham ( path [ i ] , path [ i + 1 ] ) expanded += [ path [ : - 1 ] ] return expanded
Given a compressed path return a new path that has all the segments in it interpolated .
15,167
def apply_heuristic ( self , node_a , node_b , heuristic = None ) : if not heuristic : heuristic = self . heuristic return heuristic ( abs ( node_a . x - node_b . x ) , abs ( node_a . y - node_b . y ) )
helper function to apply heuristic
15,168
def cleanup ( self ) : self . h = 0.0 self . g = 0.0 self . f = 0.0 self . opened = 0 self . closed = False self . parent = None self . retain_count = 0 self . tested = False
reset all calculated values fresh start for pathfinding
15,169
def walkable ( self , x , y ) : return self . inside ( x , y ) and self . nodes [ y ] [ x ] . walkable
check if the tile is inside grid and if it is set as walkable
15,170
def grid_str ( self , path = None , start = None , end = None , border = True , start_chr = 's' , end_chr = 'e' , path_chr = 'x' , empty_chr = ' ' , block_chr = '#' , show_weight = False ) : data = '' if border : data = '+{}+' . format ( '-' * len ( self . nodes [ 0 ] ) ) for y in range ( len ( self . nodes ) ) : line = '' for x in range ( len ( self . nodes [ y ] ) ) : node = self . nodes [ y ] [ x ] if node == start : line += start_chr elif node == end : line += end_chr elif path and ( ( node . x , node . y ) in path or node in path ) : line += path_chr elif node . walkable : weight = str ( node . weight ) if node . weight < 10 else '+' line += weight if show_weight else empty_chr else : line += block_chr if border : line = '|' + line + '|' if data : data += '\n' data += line if border : data += '\n+{}+' . format ( '-' * len ( self . nodes [ 0 ] ) ) return data
create a printable string from the grid using ASCII characters
15,171
def load ( ) : if 'Windows' == platform . system ( ) : fname , dependencies = _windows_fnames ( ) def load_objects ( directory ) : deps = [ cdll . LoadLibrary ( str ( directory . joinpath ( dep ) ) ) for dep in dependencies ] libzbar = cdll . LoadLibrary ( str ( directory . joinpath ( fname ) ) ) return deps , libzbar try : dependencies , libzbar = load_objects ( Path ( '' ) ) except OSError : dependencies , libzbar = load_objects ( Path ( __file__ ) . parent ) else : path = find_library ( 'zbar' ) if not path : raise ImportError ( 'Unable to find zbar shared library' ) libzbar = cdll . LoadLibrary ( path ) dependencies = [ ] return libzbar , dependencies
Loads the libzar shared library and its dependencies .
15,172
def load_libzbar ( ) : global LIBZBAR global EXTERNAL_DEPENDENCIES if not LIBZBAR : libzbar , dependencies = zbar_library . load ( ) LIBZBAR = libzbar EXTERNAL_DEPENDENCIES = [ LIBZBAR ] + dependencies return LIBZBAR
Loads the zbar shared library and its dependencies .
15,173
def zbar_function ( fname , restype , * args ) : prototype = CFUNCTYPE ( restype , * args ) return prototype ( ( fname , load_libzbar ( ) ) )
Returns a foreign function exported by zbar .
15,174
def _symbols_for_image ( image ) : symbol = zbar_image_first_symbol ( image ) while symbol : yield symbol symbol = zbar_symbol_next ( symbol )
Generator of symbols .
15,175
def _decode_symbols ( symbols ) : for symbol in symbols : data = string_at ( zbar_symbol_get_data ( symbol ) ) symbol_type = ZBarSymbol ( symbol . contents . type ) . name polygon = convex_hull ( ( zbar_symbol_get_loc_x ( symbol , index ) , zbar_symbol_get_loc_y ( symbol , index ) ) for index in _RANGEFN ( zbar_symbol_get_loc_size ( symbol ) ) ) yield Decoded ( data = data , type = symbol_type , rect = bounding_box ( polygon ) , polygon = polygon )
Generator of decoded symbol information .
15,176
def render_app_name ( context , app , template = "/admin_app_name.html" ) : try : template = app [ 'app_label' ] + template text = render_to_string ( template , context ) except : text = app [ 'name' ] return text
Render the application name using the default template name . If it cannot find a template matching the given path fallback to the application name .
15,177
def render_app_label ( context , app , fallback = "" ) : try : text = app [ 'app_label' ] except KeyError : text = fallback except TypeError : text = app return text
Render the application label .
15,178
def render_app_description ( context , app , fallback = "" , template = "/admin_app_description.html" ) : try : template = app [ 'app_label' ] + template text = render_to_string ( template , context ) except : text = fallback return text
Render the application description using the default template name . If it cannot find a template matching the given path fallback to the fallback argument .
15,179
def custom_field_rendering ( context , field , * args , ** kwargs ) : if CUSTOM_FIELD_RENDERER : mod , cls = CUSTOM_FIELD_RENDERER . rsplit ( "." , 1 ) field_renderer = getattr ( import_module ( mod ) , cls ) if field_renderer : return field_renderer ( field , ** kwargs ) . render ( ) return field
Wrapper for rendering the field via an external renderer
15,180
def filenames ( self ) : if self . _is_reader : assert self . _filenames is not None return self . _filenames else : return self . data_producer . filenames
list of file names the data is originally being read from .
15,181
def _data_flow_chain ( self ) : if self . data_producer is None : return [ ] res = [ ] ds = self . data_producer while not ds . is_reader : res . append ( ds ) ds = ds . data_producer res . append ( ds ) res = res [ : : - 1 ] return res
Get a list of all elements in the data flow graph . The first element is the original source the next one reads from the prior and so on and so forth .
15,182
def number_of_trajectories ( self , stride = None ) : r if not IteratorState . is_uniform_stride ( stride ) : n = len ( np . unique ( stride [ : , 0 ] ) ) else : n = self . ntraj return n
r Returns the number of trajectories .
15,183
def trajectory_length ( self , itraj , stride = 1 , skip = 0 ) : r if itraj >= self . ntraj : raise IndexError ( "given index (%s) exceeds number of data sets (%s)." " Zero based indexing!" % ( itraj , self . ntraj ) ) if not IteratorState . is_uniform_stride ( stride ) : selection = stride [ stride [ : , 0 ] == itraj ] [ : , 0 ] return 0 if itraj not in selection else len ( selection ) else : res = max ( ( self . _lengths [ itraj ] - skip - 1 ) // int ( stride ) + 1 , 0 ) return res
r Returns the length of trajectory of the requested index .
15,184
def trajectory_lengths ( self , stride = 1 , skip = 0 ) : r n = self . ntraj if not IteratorState . is_uniform_stride ( stride ) : return np . fromiter ( ( self . trajectory_length ( itraj , stride ) for itraj in range ( n ) ) , dtype = int , count = n ) else : return np . fromiter ( ( self . trajectory_length ( itraj , stride , skip ) for itraj in range ( n ) ) , dtype = int , count = n )
r Returns the length of each trajectory .
15,185
def n_frames_total ( self , stride = 1 , skip = 0 ) : r if not IteratorState . is_uniform_stride ( stride ) : return len ( stride ) return sum ( self . trajectory_lengths ( stride = stride , skip = skip ) )
r Returns total number of frames .
15,186
def write_to_csv ( self , filename = None , extension = '.dat' , overwrite = False , stride = 1 , chunksize = None , ** kw ) : import os if not filename : assert hasattr ( self , 'filenames' ) filenames = [ ] for f in self . filenames : base , _ = os . path . splitext ( f ) filenames . append ( base + extension ) elif isinstance ( filename , str ) : filename = filename . replace ( '{stride}' , str ( stride ) ) filenames = [ filename . replace ( '{itraj}' , str ( itraj ) ) for itraj in range ( self . number_of_trajectories ( ) ) ] else : raise TypeError ( "filename should be str or None" ) self . logger . debug ( "write_to_csv, filenames=%s" % filenames ) import errno for f in filenames : try : st = os . stat ( f ) raise OSError ( errno . EEXIST ) except OSError as e : if e . errno == errno . EEXIST : if overwrite : continue elif e . errno == errno . ENOENT : continue raise f = None from pyemma . _base . progress import ProgressReporter pg = ProgressReporter ( ) it = self . iterator ( stride , chunk = chunksize , return_trajindex = False ) pg . register ( it . n_chunks , "saving to csv" ) with it , pg . context ( ) : oldtraj = - 1 for X in it : if oldtraj != it . current_trajindex : if f is not None : f . close ( ) fn = filenames [ it . current_trajindex ] self . logger . debug ( "opening file %s for writing csv." % fn ) f = open ( fn , 'wb' ) oldtraj = it . current_trajindex np . savetxt ( f , X , ** kw ) f . flush ( ) pg . update ( 1 , 0 ) if f is not None : f . close ( )
write all data to csv with numpy . savetxt
15,187
def n_chunks ( self ) : return self . _data_source . n_chunks ( self . chunksize , stride = self . stride , skip = self . skip )
rough estimate of how many chunks will be processed
15,188
def _select_file_guard ( datasource_method ) : from functools import wraps @ wraps ( datasource_method ) def wrapper ( self , itraj ) : if itraj == self . _selected_itraj : return datasource_method ( self , itraj ) self . _itraj = self . _selected_itraj = itraj return wrapper
in case we call _select_file multiple times with the same value we do not want to reopen file handles .
15,189
def name ( self ) : try : return self . _name except AttributeError : self . _name = "%s.%s[%i]" % ( self . __module__ , self . __class__ . __name__ , next ( Loggable . __ids ) ) return self . _name
The name of this instance
15,190
def _get_model_param_names ( cls ) : r if hasattr ( cls , 'set_model_params' ) : args , varargs , kw , default = getargspec_no_self ( cls . set_model_params ) if varargs is not None : raise RuntimeError ( "PyEMMA models should always specify their parameters in the signature" " of their set_model_params (no varargs). %s doesn't follow this convention." % ( cls , ) ) return args else : return [ ]
r Get parameter names for the model
15,191
def update_model_params ( self , ** params ) : r for key , value in params . items ( ) : if not hasattr ( self , key ) : setattr ( self , key , value ) elif getattr ( self , key ) is None : setattr ( self , key , value ) elif value is not None : setattr ( self , key , value )
r Update given model parameter if they are set to specific values
15,192
def get_model_params ( self , deep = True ) : r out = dict ( ) for key in self . _get_model_param_names ( ) : from pyemma . util . exceptions import PyEMMA_DeprecationWarning warnings . simplefilter ( "always" , DeprecationWarning ) warnings . simplefilter ( "always" , PyEMMA_DeprecationWarning ) try : with warnings . catch_warnings ( record = True ) as w : value = getattr ( self , key , None ) if len ( w ) and w [ 0 ] . category in ( DeprecationWarning , PyEMMA_DeprecationWarning ) : continue finally : warnings . filters . pop ( 0 ) warnings . filters . pop ( 0 ) if deep and hasattr ( value , 'get_params' ) : deep_items = list ( value . get_params ( ) . items ( ) ) out . update ( ( key + '__' + k , val ) for k , val in deep_items ) out [ key ] = value return out
r Get parameters for this model .
15,193
def sample_f ( self , f , * args , ** kwargs ) : r self . _check_samples_available ( ) return [ call_member ( M , f , * args , ** kwargs ) for M in self . samples ]
r Evaluated method f for all samples
15,194
def sample_mean ( self , f , * args , ** kwargs ) : r vals = self . sample_f ( f , * args , ** kwargs ) return _np . mean ( vals , axis = 0 )
r Sample mean of numerical method f over all samples
15,195
def sample_std ( self , f , * args , ** kwargs ) : r vals = self . sample_f ( f , * args , ** kwargs ) return _np . std ( vals , axis = 0 )
r Sample standard deviation of numerical method f over all samples
15,196
def sample_conf ( self , f , * args , ** kwargs ) : r vals = self . sample_f ( f , * args , ** kwargs ) return confidence_interval ( vals , conf = self . conf )
r Sample confidence interval of numerical method f over all samples
15,197
def describe ( self ) : all_labels = [ ] for f in self . active_features : all_labels += f . describe ( ) return all_labels
Returns a list of strings one for each feature selected with human - readable descriptions of the features .
15,198
def add_distances ( self , indices , periodic = True , indices2 = None ) : r from . distances import DistanceFeature atom_pairs = _parse_pairwise_input ( indices , indices2 , self . logger , fname = 'add_distances()' ) atom_pairs = self . _check_indices ( atom_pairs ) f = DistanceFeature ( self . topology , atom_pairs , periodic = periodic ) self . __add_feature ( f )
r Adds the distances between atoms to the feature list .
15,199
def add_distances_ca ( self , periodic = True , excluded_neighbors = 2 ) : at_idxs_ca = self . select_Ca ( ) res_idxs_ca = [ self . topology . atom ( ca ) . residue . index for ca in at_idxs_ca ] res_idxs_ca_pairs = self . pairs ( res_idxs_ca , excluded_neighbors = excluded_neighbors ) distance_indexes = [ ] for ri , rj in res_idxs_ca_pairs : distance_indexes . append ( [ self . topology . residue ( ri ) . atom ( 'CA' ) . index , self . topology . residue ( rj ) . atom ( 'CA' ) . index ] ) distance_indexes = np . array ( distance_indexes ) self . add_distances ( distance_indexes , periodic = periodic )
Adds the distances between all Ca s to the feature list .