idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
24,900
def add_property_orders ( query_proto , * orders ) : for order in orders : proto = query_proto . order . add ( ) if order [ 0 ] == '-' : order = order [ 1 : ] proto . direction = query_pb2 . PropertyOrder . DESCENDING else : proto . direction = query_pb2 . PropertyOrder . ASCENDING proto . property . name = order
Add ordering constraint for the given datastore . Query proto message .
24,901
def add_projection ( query_proto , * projection ) : for p in projection : proto = query_proto . projection . add ( ) proto . property . name = p
Add projection properties to the given datatstore . Query proto message .
24,902
def set_property_filter ( filter_proto , name , op , value ) : filter_proto . Clear ( ) pf = filter_proto . property_filter pf . property . name = name pf . op = op set_value ( pf . value , value ) return filter_proto
Set property filter contraint in the given datastore . Filter proto message .
24,903
def set_composite_filter ( filter_proto , op , * filters ) : filter_proto . Clear ( ) cf = filter_proto . composite_filter cf . op = op for f in filters : cf . filters . add ( ) . CopyFrom ( f ) return filter_proto
Set composite filter contraint in the given datastore . Filter proto message .
24,904
def micros_to_timestamp ( micros , timestamp ) : seconds = long ( micros / _MICROS_PER_SECOND ) micro_remainder = micros % _MICROS_PER_SECOND timestamp . seconds = seconds timestamp . nanos = micro_remainder * _NANOS_PER_MICRO
Convert microseconds from utc epoch to google . protobuf . timestamp .
24,905
def to_timestamp ( dt , timestamp ) : if dt . tzinfo : raise TypeError ( 'Cannot store a timezone aware datetime. ' 'Convert to UTC and store the naive datetime.' ) timestamp . seconds = calendar . timegm ( dt . timetuple ( ) ) timestamp . nanos = dt . microsecond * _NANOS_PER_MICRO
Convert datetime to google . protobuf . Timestamp .
24,906
def _extract_params ( self , kwargs , hyperparameters ) : init_params = dict ( ) fit_params = dict ( ) produce_params = dict ( ) for name , param in hyperparameters . get ( 'fixed' , dict ( ) ) . items ( ) : if name in kwargs : value = kwargs . pop ( name ) elif 'default' in param : value = param [ 'default' ] else : raise TypeError ( "{} required argument '{}' not found" . format ( self . name , name ) ) init_params [ name ] = value for name , param in hyperparameters . get ( 'tunable' , dict ( ) ) . items ( ) : if name in kwargs : init_params [ name ] = kwargs . pop ( name ) fit_args = [ arg [ 'name' ] for arg in self . fit_args ] produce_args = [ arg [ 'name' ] for arg in self . produce_args ] for name in list ( kwargs . keys ( ) ) : if name in fit_args : fit_params [ name ] = kwargs . pop ( name ) elif name in produce_args : produce_params [ name ] = kwargs . pop ( name ) if kwargs : error = "Unexpected hyperparameters '{}'" . format ( ', ' . join ( kwargs . keys ( ) ) ) raise TypeError ( error ) return init_params , fit_params , produce_params
Extract init fit and produce params from kwargs .
24,907
def set_hyperparameters ( self , hyperparameters ) : self . _hyperparameters . update ( hyperparameters ) if self . _class : LOGGER . debug ( 'Creating a new primitive instance for %s' , self . name ) self . instance = self . primitive ( ** self . _hyperparameters )
Set new hyperparameters .
24,908
def fit ( self , ** kwargs ) : if self . fit_method is not None : fit_args = self . _fit_params . copy ( ) fit_args . update ( kwargs ) getattr ( self . instance , self . fit_method ) ( ** fit_args )
Call the fit method of the primitive .
24,909
def produce ( self , ** kwargs ) : produce_args = self . _produce_params . copy ( ) produce_args . update ( kwargs ) if self . _class : return getattr ( self . instance , self . produce_method ) ( ** produce_args ) produce_args . update ( self . _hyperparameters ) return self . primitive ( ** produce_args )
Call the primitive function or the predict method of the primitive .
24,910
def get_hyperparameters ( self ) : hyperparameters = { } for block_name , block in self . blocks . items ( ) : hyperparameters [ block_name ] = block . get_hyperparameters ( ) return hyperparameters
Get the current hyperparamters of each block .
24,911
def set_hyperparameters ( self , hyperparameters ) : for block_name , block_hyperparams in hyperparameters . items ( ) : self . blocks [ block_name ] . set_hyperparameters ( block_hyperparams )
Set new hyperparameter values for some blocks .
24,912
def fit ( self , X = None , y = None , ** kwargs ) : context = { 'X' : X , 'y' : y } context . update ( kwargs ) last_block_name = list ( self . blocks . keys ( ) ) [ - 1 ] for block_name , block in self . blocks . items ( ) : LOGGER . debug ( "Fitting block %s" , block_name ) try : fit_args = self . _get_block_args ( block_name , block . fit_args , context ) block . fit ( ** fit_args ) except Exception : LOGGER . exception ( "Exception caught fitting MLBlock %s" , block_name ) raise if block_name != last_block_name : LOGGER . debug ( "Producing block %s" , block_name ) try : produce_args = self . _get_block_args ( block_name , block . produce_args , context ) outputs = block . produce ( ** produce_args ) output_dict = self . _get_outputs ( block_name , outputs , block . produce_output ) context . update ( output_dict ) except Exception : LOGGER . exception ( "Exception caught producing MLBlock %s" , block_name ) raise
Fit the blocks of this pipeline .
24,913
def predict ( self , X = None , ** kwargs ) : context = { 'X' : X } context . update ( kwargs ) last_block_name = list ( self . blocks . keys ( ) ) [ - 1 ] for block_name , block in self . blocks . items ( ) : LOGGER . debug ( "Producing block %s" , block_name ) try : produce_args = self . _get_block_args ( block_name , block . produce_args , context ) outputs = block . produce ( ** produce_args ) if block_name != last_block_name : output_dict = self . _get_outputs ( block_name , outputs , block . produce_output ) context . update ( output_dict ) except Exception : LOGGER . exception ( "Exception caught producing MLBlock %s" , block_name ) raise return outputs
Produce predictions using the blocks of this pipeline .
24,914
def to_dict ( self ) : return { 'primitives' : self . primitives , 'init_params' : self . init_params , 'input_names' : self . input_names , 'output_names' : self . output_names , 'hyperparameters' : self . get_hyperparameters ( ) , 'tunable_hyperparameters' : self . _tunable_hyperparameters }
Return all the details of this MLPipeline in a dict .
24,915
def save ( self , path ) : with open ( path , 'w' ) as out_file : json . dump ( self . to_dict ( ) , out_file , indent = 4 )
Save the specification of this MLPipeline in a JSON file .
24,916
def from_dict ( cls , metadata ) : hyperparameters = metadata . get ( 'hyperparameters' ) tunable = metadata . get ( 'tunable_hyperparameters' ) pipeline = cls ( metadata [ 'primitives' ] , metadata . get ( 'init_params' ) , metadata . get ( 'input_names' ) , metadata . get ( 'output_names' ) , ) if hyperparameters : pipeline . set_hyperparameters ( hyperparameters ) if tunable is not None : pipeline . _tunable_hyperparameters = tunable return pipeline
Create a new MLPipeline from a dict specification .
24,917
def load ( cls , path ) : with open ( path , 'r' ) as in_file : metadata = json . load ( in_file ) return cls . from_dict ( metadata )
Create a new MLPipeline from a JSON specification .
24,918
def add_primitives_path ( path ) : if path not in _PRIMITIVES_PATHS : if not os . path . isdir ( path ) : raise ValueError ( 'Invalid path: {}' . format ( path ) ) LOGGER . debug ( 'Adding new primitives path %s' , path ) _PRIMITIVES_PATHS . insert ( 0 , os . path . abspath ( path ) )
Add a new path to look for primitives .
24,919
def get_primitives_paths ( ) : primitives_paths = list ( ) entry_points = pkg_resources . iter_entry_points ( 'mlprimitives' ) for entry_point in entry_points : if entry_point . name == 'jsons_path' : path = entry_point . load ( ) primitives_paths . append ( path ) return _PRIMITIVES_PATHS + primitives_paths
Get the list of folders where the primitives will be looked for .
24,920
def load_primitive ( name ) : for base_path in get_primitives_paths ( ) : parts = name . split ( '.' ) number_of_parts = len ( parts ) for folder_parts in range ( number_of_parts ) : folder = os . path . join ( base_path , * parts [ : folder_parts ] ) filename = '.' . join ( parts [ folder_parts : ] ) + '.json' json_path = os . path . join ( folder , filename ) if os . path . isfile ( json_path ) : with open ( json_path , 'r' ) as json_file : LOGGER . debug ( 'Loading primitive %s from %s' , name , json_path ) return json . load ( json_file ) raise ValueError ( "Unknown primitive: {}" . format ( name ) )
Locate and load the JSON annotation of the given primitive .
24,921
def load_usps ( ) : dataset_path = _load ( 'usps' ) df = _load_csv ( dataset_path , 'data' ) X = _load_images ( os . path . join ( dataset_path , 'images' ) , df . image ) y = df . label . values return Dataset ( load_usps . __doc__ , X , y , accuracy_score , stratify = True )
USPs Digits Dataset .
24,922
def load_handgeometry ( ) : dataset_path = _load ( 'handgeometry' ) df = _load_csv ( dataset_path , 'data' ) X = _load_images ( os . path . join ( dataset_path , 'images' ) , df . image ) y = df . target . values return Dataset ( load_handgeometry . __doc__ , X , y , r2_score )
Hand Geometry Dataset .
24,923
def load_personae ( ) : dataset_path = _load ( 'personae' ) X = _load_csv ( dataset_path , 'data' ) y = X . pop ( 'label' ) . values return Dataset ( load_personae . __doc__ , X , y , accuracy_score , stratify = True )
Personae Dataset .
24,924
def load_umls ( ) : dataset_path = _load ( 'umls' ) X = _load_csv ( dataset_path , 'data' ) y = X . pop ( 'label' ) . values graph = nx . Graph ( nx . read_gml ( os . path . join ( dataset_path , 'graph.gml' ) ) ) return Dataset ( load_umls . __doc__ , X , y , accuracy_score , stratify = True , graph = graph )
UMLs Dataset .
24,925
def load_dic28 ( ) : dataset_path = _load ( 'dic28' ) X = _load_csv ( dataset_path , 'data' ) y = X . pop ( 'label' ) . values graph1 = nx . Graph ( nx . read_gml ( os . path . join ( dataset_path , 'graph1.gml' ) ) ) graph2 = nx . Graph ( nx . read_gml ( os . path . join ( dataset_path , 'graph2.gml' ) ) ) graph = graph1 . copy ( ) graph . add_nodes_from ( graph2 . nodes ( data = True ) ) graph . add_edges_from ( graph2 . edges ) graph . add_edges_from ( X [ [ 'graph1' , 'graph2' ] ] . values ) graphs = { 'graph1' : graph1 , 'graph2' : graph2 , } return Dataset ( load_dic28 . __doc__ , X , y , accuracy_score , stratify = True , graph = graph , graphs = graphs )
DIC28 Dataset from Pajek .
24,926
def load_amazon ( ) : dataset_path = _load ( 'amazon' ) X = _load_csv ( dataset_path , 'data' ) y = X . pop ( 'label' ) . values graph = nx . Graph ( nx . read_gml ( os . path . join ( dataset_path , 'graph.gml' ) ) ) return Dataset ( load_amazon . __doc__ , X , y , normalized_mutual_info_score , graph = graph )
Amazon product co - purchasing network and ground - truth communities .
24,927
def load_jester ( ) : dataset_path = _load ( 'jester' ) X = _load_csv ( dataset_path , 'data' ) y = X . pop ( 'rating' ) . values return Dataset ( load_jester . __doc__ , X , y , r2_score )
Ratings from the Jester Online Joke Recommender System .
24,928
def load_wikiqa ( ) : dataset_path = _load ( 'wikiqa' ) data = _load_csv ( dataset_path , 'data' , set_index = True ) questions = _load_csv ( dataset_path , 'questions' , set_index = True ) sentences = _load_csv ( dataset_path , 'sentences' , set_index = True ) vocabulary = _load_csv ( dataset_path , 'vocabulary' , set_index = True ) entities = { 'data' : ( data , 'd3mIndex' , None ) , 'questions' : ( questions , 'qIndex' , None ) , 'sentences' : ( sentences , 'sIndex' , None ) , 'vocabulary' : ( vocabulary , 'index' , None ) } relationships = [ ( 'questions' , 'qIndex' , 'data' , 'qIndex' ) , ( 'sentences' , 'sIndex' , 'data' , 'sIndex' ) ] target = data . pop ( 'isAnswer' ) . values return Dataset ( load_wikiqa . __doc__ , data , target , accuracy_score , startify = True , entities = entities , relationships = relationships )
A Challenge Dataset for Open - Domain Question Answering .
24,929
def load_newsgroups ( ) : dataset = datasets . fetch_20newsgroups ( ) return Dataset ( load_newsgroups . __doc__ , np . array ( dataset . data ) , dataset . target , accuracy_score , stratify = True )
20 News Groups Dataset .
24,930
def load_iris ( ) : dataset = datasets . load_iris ( ) return Dataset ( load_iris . __doc__ , dataset . data , dataset . target , accuracy_score , stratify = True )
Iris Dataset .
24,931
def load_boston ( ) : dataset = datasets . load_boston ( ) return Dataset ( load_boston . __doc__ , dataset . data , dataset . target , r2_score )
Boston House Prices Dataset .
24,932
def get_splits ( self , n_splits = 1 ) : if n_splits == 1 : stratify = self . target if self . _stratify else None return train_test_split ( self . data , self . target , shuffle = self . _shuffle , stratify = stratify ) else : cv_class = StratifiedKFold if self . _stratify else KFold cv = cv_class ( n_splits = n_splits , shuffle = self . _shuffle ) splits = list ( ) for train , test in cv . split ( self . data , self . target ) : X_train = self . _get_split ( self . data , train ) y_train = self . _get_split ( self . target , train ) X_test = self . _get_split ( self . data , test ) y_test = self . _get_split ( self . target , test ) splits . append ( ( X_train , X_test , y_train , y_test ) ) return splits
Return splits of this dataset ready for Cross Validation .
24,933
def add ( key , value ) : tr = TrackedRequest . instance ( ) tr . tag ( key , value )
Adds context to the currently executing request .
24,934
def libc ( cls ) : try : output = subprocess . check_output ( [ "ldd" , "--version" ] , stderr = subprocess . STDOUT ) except ( OSError , subprocess . CalledProcessError ) : return "gnu" else : if b"musl" in output : return "musl" else : return "gnu"
Alpine linux uses a non glibc version of the standard library it uses the stripped down musl instead . The core agent can be built against it but which one is running must be detected . Shelling out to ldd appears to be the most reliable way to do this .
24,935
def run ( self ) : try : self . _connect ( ) self . _register ( ) while True : try : body = self . command_queue . get ( block = True , timeout = 1 * SECOND ) except queue . Empty : body = None if body is not None : result = self . _send ( body ) if result : self . command_queue . task_done ( ) else : self . _disconnect ( ) self . _connect ( ) self . _register ( ) if self . _stop_event . is_set ( ) : logger . debug ( "CoreAgentSocket thread stopping." ) break except Exception : logger . debug ( "CoreAgentSocket thread exception." ) finally : self . _started_event . clear ( ) self . _stop_event . clear ( ) self . _stopped_event . set ( ) logger . debug ( "CoreAgentSocket thread stopped." )
Called by the threading system
24,936
def set ( cls , ** kwargs ) : global SCOUT_PYTHON_VALUES for key , value in kwargs . items ( ) : SCOUT_PYTHON_VALUES [ key ] = value
Sets a configuration value for the Scout agent . Values set here will not override values set in ENV .
24,937
def text ( value , encoding = "utf-8" , errors = "strict" ) : if isinstance ( value , text_type ) : return value elif isinstance ( value , bytes ) : return text_type ( value , encoding , errors ) else : return text_type ( value )
Convert a value to str on Python 3 and unicode on Python 2 .
24,938
def install ( ) : @ monkeypatch_method ( BaseDatabaseWrapper ) def cursor ( original , self , * args , ** kwargs ) : result = original ( * args , ** kwargs ) return _DetailedTracingCursorWrapper ( result , self ) logger . debug ( "Monkey patched SQL" )
Installs ScoutApm SQL Instrumentation by monkeypatching the cursor method of BaseDatabaseWrapper to return a wrapper that instruments any calls going through it .
24,939
def dispatch_request ( self ) : req = _request_ctx_stack . top . request app = current_app if req . method == "OPTIONS" : return app . make_default_options_response ( ) if req . routing_exception is not None : app . raise_routing_exception ( req ) rule = req . url_rule view_func = self . wrap_view_func ( app , rule , req , app . view_functions [ rule . endpoint ] , req . view_args ) return view_func ( ** req . view_args )
Modified version of Flask . dispatch_request to call process_view .
24,940
def wrap_view_func ( self , app , rule , req , view_func , view_kwargs ) : operation = view_func . __module__ + "." + view_func . __name__ return self . trace_view_function ( view_func , ( "Controller" , { "path" : req . path , "name" : operation } ) )
This method is called just before the flask view is called . This is done by the dispatch_request method .
24,941
def process_view ( self , request , view_func , view_args , view_kwargs ) : try : if ignore_path ( request . path ) : TrackedRequest . instance ( ) . tag ( "ignore_transaction" , True ) view_name = request . resolver_match . _func_path span = TrackedRequest . instance ( ) . current_span ( ) if span is not None : span . operation = "Controller/" + view_name Context . add ( "path" , request . path ) Context . add ( "user_ip" , RemoteIp . lookup_from_headers ( request . META ) ) if getattr ( request , "user" , None ) is not None : Context . add ( "username" , request . user . get_username ( ) ) except Exception : pass
Capture details about the view_func that is about to execute
24,942
def create ( hypervisor , identifier , configuration ) : counter = count ( ) xml_config = DEFAULT_NETWORK_XML if not { 'configuration' , 'dynamic_address' } & set ( configuration . keys ( ) ) : raise RuntimeError ( "Either configuration or dynamic_address must be specified" ) if 'configuration' in configuration : with open ( configuration [ 'configuration' ] ) as xml_file : xml_config = xml_file . read ( ) while True : if 'dynamic_address' in configuration : address = generate_address ( hypervisor , configuration [ 'dynamic_address' ] ) xml_string = network_xml ( identifier , xml_config , address = address ) else : xml_string = network_xml ( identifier , xml_config ) try : return hypervisor . networkCreateXML ( xml_string ) except libvirt . libvirtError as error : if next ( counter ) > MAX_ATTEMPTS : raise RuntimeError ( "Exceeded failed attempts ({}) to get IP address." . format ( MAX_ATTEMPTS ) , "Last error: {}" . format ( error ) )
Creates a virtual network according to the given configuration .
24,943
def lookup ( domain ) : xml = domain . XMLDesc ( 0 ) element = etree . fromstring ( xml ) subelm = element . find ( './/interface[@type="network"]' ) if subelm is not None : network = subelm . find ( './/source' ) . get ( 'network' ) hypervisor = domain . connect ( ) return hypervisor . networkLookupByName ( network ) return None
Find the virNetwork object associated to the domain .
24,944
def delete ( network ) : try : network . destroy ( ) except libvirt . libvirtError as error : raise RuntimeError ( "Unable to destroy network: {}" . format ( error ) )
libvirt network cleanup .
24,945
def set_address ( network , address ) : if network . find ( './/ip' ) is not None : raise RuntimeError ( "Address already specified in XML configuration." ) netmask = str ( address . netmask ) ipv4 = str ( address [ 1 ] ) dhcp_start = str ( address [ 2 ] ) dhcp_end = str ( address [ - 2 ] ) ip = etree . SubElement ( network , 'ip' , address = ipv4 , netmask = netmask ) dhcp = etree . SubElement ( ip , 'dhcp' ) etree . SubElement ( dhcp , 'range' , start = dhcp_start , end = dhcp_end )
Sets the given address to the network XML element .
24,946
def generate_address ( hypervisor , configuration ) : ipv4 = configuration [ 'ipv4' ] prefix = configuration [ 'prefix' ] subnet_prefix = configuration [ 'subnet_prefix' ] subnet_address = ipaddress . IPv4Network ( u'/' . join ( ( str ( ipv4 ) , str ( prefix ) ) ) ) net_address_pool = subnet_address . subnets ( new_prefix = subnet_prefix ) return address_lookup ( hypervisor , net_address_pool )
Generate a valid IP address according to the configuration .
24,947
def address_lookup ( hypervisor , address_pool ) : address_pool = set ( address_pool ) active_addresses = set ( active_network_addresses ( hypervisor ) ) try : return random . choice ( tuple ( address_pool - active_addresses ) ) except IndexError : raise RuntimeError ( "All IP addresses are in use" )
Retrieves a valid and available network IP address .
24,948
def active_network_addresses ( hypervisor ) : active = [ ] for network in hypervisor . listNetworks ( ) : try : xml = hypervisor . networkLookupByName ( network ) . XMLDesc ( 0 ) except libvirt . libvirtError : continue else : ip_element = etree . fromstring ( xml ) . find ( './/ip' ) address = ip_element . get ( 'address' ) netmask = ip_element . get ( 'netmask' ) active . append ( ipaddress . IPv4Network ( u'/' . join ( ( address , netmask ) ) , strict = False ) ) return active
Query libvirt for the already reserved addresses .
24,949
def interface_lookup ( interfaces , hwaddr , address_type ) : for interface in interfaces . values ( ) : if interface . get ( 'hwaddr' ) == hwaddr : for address in interface . get ( 'addrs' ) : if address . get ( 'type' ) == address_type : return address . get ( 'addr' )
Search the address within the interface list .
24,950
def mac_address ( self ) : if self . _mac_address is None : self . _mac_address = self . _get_mac_address ( ) return self . _mac_address
Returns the MAC address of the network interface .
24,951
def ip4_address ( self ) : if self . _ip4_address is None and self . network is not None : self . _ip4_address = self . _get_ip_address ( libvirt . VIR_IP_ADDR_TYPE_IPV4 ) return self . _ip4_address
Returns the IPv4 address of the network interface .
24,952
def ip6_address ( self ) : if self . _ip6_address is None and self . network is not None : self . _ip6_address = self . _get_ip_address ( libvirt . VIR_IP_ADDR_TYPE_IPV6 ) return self . _ip6_address
Returns the IPv6 address of the network interface .
24,953
def shutdown ( self , timeout = None , ** kwargs ) : self . _assert_transition ( 'shutdown' ) self . trigger ( 'pre_shutdown' , ** kwargs ) self . _execute_command ( self . domain . shutdown ) self . _wait_for_shutdown ( timeout ) self . trigger ( 'post_shutdown' , ** kwargs )
Shuts down the Context . Sends an ACPI request to the OS for a clean shutdown .
24,954
def _command ( self , event , command , * args , ** kwargs ) : self . _assert_transition ( event ) self . trigger ( 'pre_%s' % event , ** kwargs ) self . _execute_command ( command , * args ) self . trigger ( 'post_%s' % event , ** kwargs )
Context state controller .
24,955
def _assert_transition ( self , event ) : state = self . domain . state ( ) [ 0 ] if event not in STATES_MAP [ state ] : raise RuntimeError ( "State transition %s not allowed" % event )
Asserts the state transition validity .
24,956
def _execute_command ( self , command , * args ) : try : command ( * args ) except libvirt . libvirtError as error : raise RuntimeError ( "Unable to execute command. %s" % error )
Execute the state transition command .
24,957
def snapshot_to_checkpoint ( volume , snapshot , folder_path ) : create_folder ( folder_path ) name = snapshot . getName ( ) path = os . path . join ( folder_path , '%s.qcow2' % name ) process = launch_process ( QEMU_IMG , "convert" , "-f" , "qcow2" , "-o" , "backing_file=%s" % volume_backing_path ( volume ) , "-O" , "qcow2" , "-s" , name , volume_path ( volume ) , path ) collect_process_output ( process ) return path
Turns a QEMU internal snapshot into a QCOW file .
24,958
def compare_disks ( disk0 , disk1 , configuration ) : with DiskComparator ( disk0 , disk1 ) as comparator : results = comparator . compare ( size = configuration . get ( 'get_file_size' , False ) , identify = configuration . get ( 'identify_files' , False ) , concurrent = configuration . get ( 'use_concurrency' , False ) ) if configuration . get ( 'extract_files' , False ) : extract = results [ 'created_files' ] + results [ 'modified_files' ] files = comparator . extract ( 1 , extract , path = configuration [ 'results_folder' ] ) results . update ( files ) if configuration . get ( 'compare_registries' , False ) : results [ 'registry' ] = comparator . compare_registry ( concurrent = configuration . get ( 'use_concurrency' , False ) ) return results
Compares two disks according to the given configuration .
24,959
def start_processing_handler ( self , event ) : results_path = os . path . join ( self . configuration [ 'results_folder' ] , "filesystem.json" ) self . logger . debug ( "Event %s: start comparing %s with %s." , event , self . checkpoints [ 0 ] , self . checkpoints [ 1 ] ) results = compare_disks ( self . checkpoints [ 0 ] , self . checkpoints [ 1 ] , self . configuration ) with open ( results_path , 'w' ) as results_file : json . dump ( results , results_file ) self . processing_done . set ( )
Asynchronous handler starting the disk analysis process .
24,960
def lookup_class ( fully_qualified_name ) : module_name , class_name = str ( fully_qualified_name ) . rsplit ( "." , 1 ) module = __import__ ( module_name , globals ( ) , locals ( ) , [ class_name ] , 0 ) Class = getattr ( module , class_name ) if not inspect . isclass ( Class ) : raise TypeError ( "%s is not of type class: %s" % ( class_name , type ( Class ) ) ) return Class
Given its fully qualified name finds the desired class and imports it . Returns the Class object if found .
24,961
def prime_event ( event , source , ** kwargs ) : if not isinstance ( event , Event ) : event = Event ( event , source = source , ** kwargs ) return event
Returns the event ready to be triggered .
24,962
def asynchronous ( function , event ) : thread = Thread ( target = synchronous , args = ( function , event ) ) thread . daemon = True thread . start ( )
Runs the function asynchronously taking care of exceptions .
24,963
def synchronous ( function , event ) : try : function ( event ) except Exception as error : logger = get_function_logger ( function ) logger . exception ( error )
Runs the function synchronously taking care of exceptions .
24,964
def subscribe ( self , event , handler ) : self . _handlers . sync_handlers [ event ] . append ( handler )
Subscribes a Handler for the given Event .
24,965
def subscribe_async ( self , event , handler ) : self . _handlers . async_handlers [ event ] . append ( handler )
Subscribes an asynchronous Handler for the given Event .
24,966
def unsubscribe ( self , event , handler ) : try : self . _handlers . sync_handlers [ event ] . remove ( handler ) except ValueError : self . _handlers . async_handlers [ event ] . remove ( handler ) else : try : self . _handlers . async_handlers [ event ] . remove ( handler ) except ValueError : pass
Unsubscribes the Handler from the given Event . Both synchronous and asynchronous handlers are removed .
24,967
def trigger ( self , event , ** kwargs ) : with self . _handlers . trigger_mutex : event = prime_event ( event , self . __class__ . __name__ , ** kwargs ) for handler in self . _handlers . async_handlers [ event ] : asynchronous ( handler , event ) for handler in self . _handlers . sync_handlers [ event ] : synchronous ( handler , event )
Triggers an event .
24,968
def provider_image ( self ) : if self . _image is None : if isinstance ( self . configuration [ 'disk' ] [ 'image' ] , dict ) : ProviderClass = lookup_provider_class ( self . configuration [ 'disk' ] [ 'image' ] [ 'provider' ] ) self . _image = ProviderClass ( self . configuration [ 'disk' ] [ 'image' ] ) . image else : self . _image = self . configuration [ 'disk' ] [ 'image' ] return self . _image
Image path getter .
24,969
def run_command ( args , asynchronous = False ) : logging . info ( "Executing %s command %s." , asynchronous and 'asynchronous' or 'synchronous' , args ) process = subprocess . Popen ( args , shell = True , stdout = subprocess . PIPE , stderr = subprocess . STDOUT ) try : timeout = asynchronous and 1 or None output = process . communicate ( timeout = timeout ) [ 0 ] . decode ( 'utf8' ) except subprocess . TimeoutExpired : pass if asynchronous : return PopenOutput ( None , 'Asynchronous call.' ) else : return PopenOutput ( process . returncode , output )
Executes a command returning its exit code and output .
24,970
def respond ( self , output ) : response = { 'exit_code' : output . code , 'command_output' : output . log } self . send_response ( 200 ) self . send_header ( 'Content-type' , 'application/json' ) self . end_headers ( ) self . wfile . write ( bytes ( json . dumps ( response ) , "utf8" ) )
Generates server response .
24,971
def store_file ( self , folder , name ) : path = os . path . join ( folder , name ) length = self . headers [ 'content-length' ] with open ( path , 'wb' ) as sample : sample . write ( self . rfile . read ( int ( length ) ) ) return path
Stores the uploaded file in the given path .
24,972
def start_processing_handler ( self , event ) : self . logger . debug ( "Event %s: starting Volatility process(es)." , event ) for snapshot in self . snapshots : self . process_snapshot ( snapshot ) self . processing_done . set ( )
Asynchronous handler starting the Volatility processes .
24,973
def disk_xml ( identifier , pool_xml , base_volume_xml , cow ) : pool = etree . fromstring ( pool_xml ) base_volume = etree . fromstring ( base_volume_xml ) pool_path = pool . find ( './/path' ) . text base_path = base_volume . find ( './/target/path' ) . text target_path = os . path . join ( pool_path , '%s.qcow2' % identifier ) volume_xml = VOLUME_DEFAULT_CONFIG . format ( identifier , target_path ) volume = etree . fromstring ( volume_xml ) base_volume_capacity = base_volume . find ( ".//capacity" ) volume . append ( base_volume_capacity ) if cow : backing_xml = BACKING_STORE_DEFAULT_CONFIG . format ( base_path ) backing_store = etree . fromstring ( backing_xml ) volume . append ( backing_store ) return etree . tostring ( volume ) . decode ( 'utf-8' )
Clones volume_xml updating the required fields .
24,974
def pool_create ( hypervisor , identifier , pool_path ) : path = os . path . join ( pool_path , identifier ) if not os . path . exists ( path ) : os . makedirs ( path ) xml = POOL_DEFAULT_CONFIG . format ( identifier , path ) return hypervisor . storagePoolCreateXML ( xml , 0 )
Storage pool creation .
24,975
def pool_lookup ( hypervisor , disk_path ) : try : volume = hypervisor . storageVolLookupByPath ( disk_path ) return volume . storagePoolLookupByVolume ( ) except libvirt . libvirtError : return None
Storage pool lookup .
24,976
def pool_delete ( storage_pool , logger ) : path = etree . fromstring ( storage_pool . XMLDesc ( 0 ) ) . find ( './/path' ) . text volumes_delete ( storage_pool , logger ) try : storage_pool . destroy ( ) except libvirt . libvirtError : logger . exception ( "Unable to delete storage pool." ) try : if os . path . exists ( path ) : shutil . rmtree ( path ) except EnvironmentError : logger . exception ( "Unable to delete storage pool folder." )
Storage Pool deletion removes all the created disk images within the pool and the pool itself .
24,977
def volumes_delete ( storage_pool , logger ) : try : for vol_name in storage_pool . listVolumes ( ) : try : vol = storage_pool . storageVolLookupByName ( vol_name ) vol . delete ( 0 ) except libvirt . libvirtError : logger . exception ( "Unable to delete storage volume %s." , vol_name ) except libvirt . libvirtError : logger . exception ( "Unable to delete storage volumes." )
Deletes all storage volume disks contained in the given storage pool .
24,978
def disk_clone ( hypervisor , identifier , storage_pool , configuration , image , logger ) : cow = configuration . get ( 'copy_on_write' , False ) try : volume = hypervisor . storageVolLookupByPath ( image ) except libvirt . libvirtError : if os . path . exists ( image ) : pool_path = os . path . dirname ( image ) logger . info ( "LibVirt pool does not exist, creating {} pool" . format ( pool_path . replace ( '/' , '_' ) ) ) pool = hypervisor . storagePoolDefineXML ( BASE_POOL_CONFIG . format ( pool_path . replace ( '/' , '_' ) , pool_path ) ) pool . setAutostart ( True ) pool . create ( ) pool . refresh ( ) volume = hypervisor . storageVolLookupByPath ( image ) else : raise RuntimeError ( "%s disk does not exist." % image ) xml = disk_xml ( identifier , storage_pool . XMLDesc ( 0 ) , volume . XMLDesc ( 0 ) , cow ) if cow : storage_pool . createXML ( xml , 0 ) else : storage_pool . createXMLFrom ( xml , volume , 0 )
Disk image cloning .
24,979
def _clone_disk ( self , configuration ) : disk_clone ( self . _hypervisor , self . identifier , self . _storage_pool , configuration , self . provider_image , self . logger ) disk_name = self . _storage_pool . listVolumes ( ) [ 0 ] return self . _storage_pool . storageVolLookupByName ( disk_name ) . path ( )
Clones the disk and returns the path to the new disk .
24,980
def load_configuration ( configuration ) : if isinstance ( configuration , dict ) : return configuration else : with open ( configuration ) as configfile : return json . load ( configfile )
Returns a dictionary accepts a dictionary or a path to a JSON file .
24,981
def cleanup ( logger , * args ) : for obj in args : if obj is not None and hasattr ( obj , 'cleanup' ) : try : obj . cleanup ( ) except NotImplementedError : pass except Exception : logger . exception ( "Unable to cleanup %s object" , obj )
Environment s cleanup routine .
24,982
def allocate ( self ) : self . logger . debug ( "Allocating environment." ) self . _allocate ( ) self . logger . debug ( "Environment successfully allocated." )
Builds the context and the Hooks .
24,983
def deallocate ( self ) : self . logger . debug ( "Deallocating environment." ) self . _deallocate ( ) self . logger . debug ( "Environment successfully deallocated." )
Cleans up the context and the Hooks .
24,984
def hooks_factory ( identifier , configuration , context ) : manager = HookManager ( identifier , configuration ) manager . load_hooks ( context ) return manager
Returns the initialized hooks .
24,985
def load_hooks ( self , context ) : for hook in self . configuration . get ( 'hooks' , ( ) ) : config = hook . get ( 'configuration' , { } ) config . update ( self . configuration . get ( 'configuration' , { } ) ) try : self . _load_hook ( hook [ 'name' ] , config , context ) except KeyError : self . logger . exception ( 'Provided hook has no name: %s.' , hook )
Initializes the Hooks and loads them within the Environment .
24,986
def get_hash_as_int ( * args , group : cmod . PairingGroup = None ) : group = group if group else cmod . PairingGroup ( PAIRING_GROUP ) h_challenge = sha256 ( ) serialedArgs = [ group . serialize ( arg ) if isGroupElement ( arg ) else cmod . Conversion . IP2OS ( arg ) for arg in args ] for arg in sorted ( serialedArgs ) : h_challenge . update ( arg ) return bytes_to_int ( h_challenge . digest ( ) )
Enumerate over the input tuple and generate a hash using the tuple values
24,987
def randomString ( size : int = 20 , chars : str = string . ascii_letters + string . digits ) -> str : return '' . join ( sample ( chars , size ) )
Generate a random string of the specified size .
24,988
def genPrime ( ) : prime = cmod . randomPrime ( LARGE_PRIME ) i = 0 while not cmod . isPrime ( 2 * prime + 1 ) : prime = cmod . randomPrime ( LARGE_PRIME ) i += 1 return prime
Generate 2 large primes p_prime and q_prime and use them to generate another 2 primes p and q of 1024 bits
24,989
def encoded ( self ) : encoded = { } for i in range ( len ( self . credType . names ) ) : self . credType . names [ i ] attr_types = self . credType . attrTypes [ i ] for at in attr_types : attrName = at . name if attrName in self . _vals : if at . encode : encoded [ attrName ] = encodeAttr ( self . _vals [ attrName ] ) else : encoded [ attrName ] = self . _vals [ at . name ] return encoded
This function will encode all the attributes to 256 bit integers
24,990
async def genSchema ( self , name , version , attrNames ) -> Schema : schema = Schema ( name , version , attrNames , self . issuerId ) return await self . wallet . submitSchema ( schema )
Generates and submits Schema .
24,991
async def issueAccumulator ( self , schemaId : ID , iA , L ) -> AccumulatorPublicKey : accum , tails , accPK , accSK = await self . _nonRevocationIssuer . issueAccumulator ( schemaId , iA , L ) accPK = await self . wallet . submitAccumPublic ( schemaId = schemaId , accumPK = accPK , accum = accum , tails = tails ) await self . wallet . submitAccumSecret ( schemaId = schemaId , accumSK = accSK ) return accPK
Issues and submits an accumulator used for non - revocation proof .
24,992
async def revoke ( self , schemaId : ID , i ) : acc , ts = await self . _nonRevocationIssuer . revoke ( schemaId , i ) await self . wallet . submitAccumUpdate ( schemaId = schemaId , accum = acc , timestampMs = ts )
Performs revocation of a Claim .
24,993
async def issueClaim ( self , schemaId : ID , claimRequest : ClaimRequest , iA = None , i = None ) -> ( Claims , Dict [ str , ClaimAttributeValues ] ) : schemaKey = ( await self . wallet . getSchema ( schemaId ) ) . getKey ( ) attributes = self . _attrRepo . getAttributes ( schemaKey , claimRequest . userId ) await self . _genContxt ( schemaId , iA , claimRequest . userId ) ( c1 , claim ) = await self . _issuePrimaryClaim ( schemaId , attributes , claimRequest . U ) c2 = await self . _issueNonRevocationClaim ( schemaId , claimRequest . Ur , iA , i ) if claimRequest . Ur else None signature = Claims ( primaryClaim = c1 , nonRevocClaim = c2 ) return ( signature , claim )
Issue a claim for the given user and schema .
24,994
async def issueClaims ( self , allClaimRequest : Dict [ ID , ClaimRequest ] ) -> Dict [ ID , Claims ] : res = { } for schemaId , claimReq in allClaimRequest . items ( ) : res [ schemaId ] = await self . issueClaim ( schemaId , claimReq ) return res
Issue claims for the given users and schemas .
24,995
async def verify ( self , proofRequest : ProofRequest , proof : FullProof ) : if proofRequest . verifiableAttributes . keys ( ) != proof . requestedProof . revealed_attrs . keys ( ) : raise ValueError ( 'Received attributes ={} do not correspond to requested={}' . format ( proof . requestedProof . revealed_attrs . keys ( ) , proofRequest . verifiableAttributes . keys ( ) ) ) if proofRequest . predicates . keys ( ) != proof . requestedProof . predicates . keys ( ) : raise ValueError ( 'Received predicates ={} do not correspond to requested={}' . format ( proof . requestedProof . predicates . keys ( ) , proofRequest . predicates . keys ( ) ) ) TauList = [ ] for ( uuid , proofItem ) in proof . proofs . items ( ) : if proofItem . proof . nonRevocProof : TauList += await self . _nonRevocVerifier . verifyNonRevocation ( proofRequest , proofItem . schema_seq_no , proof . aggregatedProof . cHash , proofItem . proof . nonRevocProof ) if proofItem . proof . primaryProof : TauList += await self . _primaryVerifier . verify ( proofItem . schema_seq_no , proof . aggregatedProof . cHash , proofItem . proof . primaryProof ) CHver = self . _get_hash ( proof . aggregatedProof . CList , self . _prepare_collection ( TauList ) , cmod . integer ( proofRequest . nonce ) ) return CHver == proof . aggregatedProof . cHash
Verifies a proof from the prover .
24,996
async def processClaim ( self , schemaId : ID , claimAttributes : Dict [ str , ClaimAttributeValues ] , signature : Claims ) : await self . wallet . submitContextAttr ( schemaId , signature . primaryClaim . m2 ) await self . wallet . submitClaimAttributes ( schemaId , claimAttributes ) await self . _initPrimaryClaim ( schemaId , signature . primaryClaim ) if signature . nonRevocClaim : await self . _initNonRevocationClaim ( schemaId , signature . nonRevocClaim )
Processes and saves a received Claim for the given Schema .
24,997
async def processClaims ( self , allClaims : Dict [ ID , Claims ] ) : res = [ ] for schemaId , ( claim_signature , claim_attributes ) in allClaims . items ( ) : res . append ( await self . processClaim ( schemaId , claim_attributes , claim_signature ) ) return res
Processes and saves received Claims .
24,998
async def presentProof ( self , proofRequest : ProofRequest ) -> FullProof : claims , requestedProof = await self . _findClaims ( proofRequest ) proof = await self . _prepareProof ( claims , proofRequest . nonce , requestedProof ) return proof
Presents a proof to the verifier .
24,999
def unsigned_hex_to_signed_int ( hex_string : str ) -> int : v = struct . unpack ( 'q' , struct . pack ( 'Q' , int ( hex_string , 16 ) ) ) [ 0 ] return v
Converts a 64 - bit hex string to a signed int value .