idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
33,200
def _k_modes_iter ( X , centroids , cl_attr_freq , membship , dissim , random_state ) : moves = 0 for ipoint , curpoint in enumerate ( X ) : clust = np . argmin ( dissim ( centroids , curpoint , X = X , membship = membship ) ) if membship [ clust , ipoint ] : continue moves += 1 old_clust = np . argwhere ( membship [ :...
Single iteration of k - modes clustering algorithm
33,201
def k_modes ( X , n_clusters , max_iter , dissim , init , n_init , verbose , random_state , n_jobs ) : random_state = check_random_state ( random_state ) if sparse . issparse ( X ) : raise TypeError ( "k-modes does not support sparse data." ) X = check_array ( X , dtype = None ) X , enc_map = encode_features ( X ) n_po...
k - modes algorithm
33,202
def fit ( self , X , y = None , ** kwargs ) : X = pandas_to_numpy ( X ) random_state = check_random_state ( self . random_state ) self . _enc_cluster_centroids , self . _enc_map , self . labels_ , self . cost_ , self . n_iter_ = k_modes ( X , self . n_clusters , self . max_iter , self . cat_dissim , self . init , self ...
Compute k - modes clustering .
33,203
def fit_predict ( self , X , y = None , ** kwargs ) : return self . fit ( X , ** kwargs ) . predict ( X , ** kwargs )
Compute cluster centroids and predict cluster index for each sample .
33,204
def get_max_value_key ( dic ) : v = np . array ( list ( dic . values ( ) ) ) k = np . array ( list ( dic . keys ( ) ) ) maxima = np . where ( v == np . max ( v ) ) [ 0 ] if len ( maxima ) == 1 : return k [ maxima [ 0 ] ] else : return k [ maxima [ np . argmin ( k [ maxima ] ) ] ]
Gets the key for the maximum value in a dict .
33,205
def decode_centroids ( encoded , mapping ) : decoded = [ ] for ii in range ( encoded . shape [ 1 ] ) : inv_mapping = { v : k for k , v in mapping [ ii ] . items ( ) } decoded . append ( np . vectorize ( inv_mapping . __getitem__ ) ( encoded [ : , ii ] ) ) return np . atleast_2d ( np . array ( decoded ) ) . T
Decodes the encoded centroids array back to the original data labels using a list of mappings .
33,206
def move_point_num ( point , to_clust , from_clust , cl_attr_sum , cl_memb_sum ) : for iattr , curattr in enumerate ( point ) : cl_attr_sum [ to_clust ] [ iattr ] += curattr cl_attr_sum [ from_clust ] [ iattr ] -= curattr cl_memb_sum [ to_clust ] += 1 cl_memb_sum [ from_clust ] -= 1 return cl_attr_sum , cl_memb_sum
Move point between clusters numerical attributes .
33,207
def _split_num_cat ( X , categorical ) : Xnum = np . asanyarray ( X [ : , [ ii for ii in range ( X . shape [ 1 ] ) if ii not in categorical ] ] ) . astype ( np . float64 ) Xcat = np . asanyarray ( X [ : , categorical ] ) return Xnum , Xcat
Extract numerical and categorical columns . Convert to numpy arrays if needed .
33,208
def _labels_cost ( Xnum , Xcat , centroids , num_dissim , cat_dissim , gamma , membship = None ) : n_points = Xnum . shape [ 0 ] Xnum = check_array ( Xnum ) cost = 0. labels = np . empty ( n_points , dtype = np . uint16 ) for ipoint in range ( n_points ) : num_costs = num_dissim ( centroids [ 0 ] , Xnum [ ipoint ] ) ca...
Calculate labels and cost function given a matrix of points and a list of centroids for the k - prototypes algorithm .
33,209
def _k_prototypes_iter ( Xnum , Xcat , centroids , cl_attr_sum , cl_memb_sum , cl_attr_freq , membship , num_dissim , cat_dissim , gamma , random_state ) : moves = 0 for ipoint in range ( Xnum . shape [ 0 ] ) : clust = np . argmin ( num_dissim ( centroids [ 0 ] , Xnum [ ipoint ] ) + gamma * cat_dissim ( centroids [ 1 ]...
Single iteration of the k - prototypes algorithm
33,210
def k_prototypes ( X , categorical , n_clusters , max_iter , num_dissim , cat_dissim , gamma , init , n_init , verbose , random_state , n_jobs ) : random_state = check_random_state ( random_state ) if sparse . issparse ( X ) : raise TypeError ( "k-prototypes does not support sparse data." ) if categorical is None or no...
k - prototypes algorithm
33,211
def fit ( self , X , y = None , categorical = None ) : if categorical is not None : assert isinstance ( categorical , ( int , list , tuple ) ) , "The 'categorical' \ argument needs to be an integer with the index of the categorical \ column in your data, or a list or tuple of several of th...
Compute k - prototypes clustering .
33,212
def euclidean_dissim ( a , b , ** _ ) : if np . isnan ( a ) . any ( ) or np . isnan ( b ) . any ( ) : raise ValueError ( "Missing values detected in numerical columns." ) return np . sum ( ( a - b ) ** 2 , axis = 1 )
Euclidean distance dissimilarity function
33,213
def ng_dissim ( a , b , X = None , membship = None ) : if membship is None : return matching_dissim ( a , b ) def calc_cjr ( b , X , memj , idr ) : xcids = np . where ( memj == 1 ) return float ( ( np . take ( X , xcids , axis = 0 ) [ 0 ] [ : , idr ] == b [ idr ] ) . sum ( 0 ) ) def calc_dissim ( b , X , memj , idr ) :...
Ng et al . s dissimilarity measure as presented in Michael K . Ng Mark Junjie Li Joshua Zhexue Huang and Zengyou He On the Impact of Dissimilarity Measure in k - Modes Clustering Algorithm IEEE Transactions on Pattern Analysis and Machine Intelligence Vol . 29 No . 3 January 2007
33,214
def store_result ( self , message , result : Result , ttl : int ) -> None : message_key = self . build_message_key ( message ) return self . _store ( message_key , result , ttl )
Store a result in the backend .
33,215
def build_message_key ( self , message ) -> str : message_key = "%(namespace)s:%(queue_name)s:%(actor_name)s:%(message_id)s" % { "namespace" : self . namespace , "queue_name" : q_name ( message . queue_name ) , "actor_name" : message . actor_name , "message_id" : message . message_id , } return hashlib . md5 ( message_...
Given a message return its globally - unique key .
33,216
def _store ( self , message_key : str , result : Result , ttl : int ) -> None : raise NotImplementedError ( "%(classname)r does not implement _store()" % { "classname" : type ( self ) . __name__ , } )
Store a result in the backend . Subclasses may implement this method if they want to use the default implementation of set_result .
33,217
def acquire ( self , * , raise_on_failure = True ) : acquired = False try : acquired = self . _acquire ( ) if raise_on_failure and not acquired : raise RateLimitExceeded ( "rate limit exceeded for key %(key)r" % vars ( self ) ) yield acquired finally : if acquired : self . _release ( )
Attempt to acquire a slot under this rate limiter .
33,218
def flock ( path ) : with open ( path , "w+" ) as lf : try : fcntl . flock ( lf , fcntl . LOCK_EX | fcntl . LOCK_NB ) acquired = True yield acquired except OSError : acquired = False yield acquired finally : if acquired : fcntl . flock ( lf , fcntl . LOCK_UN )
Attempt to acquire a POSIX file lock .
33,219
def copy ( self , ** attributes ) : updated_options = attributes . pop ( "options" , { } ) options = self . options . copy ( ) options . update ( updated_options ) return self . _replace ( ** attributes , options = options )
Create a copy of this message .
33,220
def get_result ( self , * , backend = None , block = False , timeout = None ) : if not backend : broker = get_broker ( ) for middleware in broker . middleware : if isinstance ( middleware , Results ) : backend = middleware . backend break else : raise RuntimeError ( "The default broker doesn't have a results backend." ...
Get the result associated with this message from a result backend .
33,221
def compute_backoff ( attempts , * , factor = 5 , jitter = True , max_backoff = 2000 , max_exponent = 32 ) : exponent = min ( attempts , max_exponent ) backoff = min ( factor * 2 ** exponent , max_backoff ) if jitter : backoff /= 2 backoff = int ( backoff + uniform ( 0 , backoff ) ) return attempts + 1 , backoff
Compute an exponential backoff value based on some number of attempts .
33,222
def join_all ( joinables , timeout ) : started , elapsed = current_millis ( ) , 0 for ob in joinables : ob . join ( timeout = timeout / 1000 ) elapsed = current_millis ( ) - started timeout = max ( 0 , timeout - elapsed )
Wait on a list of objects that can be joined with a total timeout represented by timeout .
33,223
def dq_name ( queue_name ) : if queue_name . endswith ( ".DQ" ) : return queue_name if queue_name . endswith ( ".XQ" ) : queue_name = queue_name [ : - 3 ] return queue_name + ".DQ"
Returns the delayed queue name for a given queue . If the given queue name already belongs to a delayed queue then it is returned unchanged .
33,224
def xq_name ( queue_name ) : if queue_name . endswith ( ".XQ" ) : return queue_name if queue_name . endswith ( ".DQ" ) : queue_name = queue_name [ : - 3 ] return queue_name + ".XQ"
Returns the dead letter queue name for a given queue . If the given queue name belongs to a delayed queue the dead letter queue name for the original queue is generated .
33,225
def get_broker ( ) -> "Broker" : global global_broker if global_broker is None : from . brokers . rabbitmq import RabbitmqBroker set_broker ( RabbitmqBroker ( host = "127.0.0.1" , port = 5672 , heartbeat = 5 , connection_attempts = 5 , blocked_connection_timeout = 30 , ) ) return global_broker
Get the global broker instance . If no global broker is set this initializes a RabbitmqBroker and returns it .
33,226
def add_middleware ( self , middleware , * , before = None , after = None ) : assert not ( before and after ) , "provide either 'before' or 'after', but not both" if before or after : for i , m in enumerate ( self . middleware ) : if isinstance ( m , before or after ) : break else : raise ValueError ( "Middleware %r no...
Add a middleware object to this broker . The middleware is appended to the end of the middleware list by default .
33,227
def declare_actor ( self , actor ) : self . emit_before ( "declare_actor" , actor ) self . declare_queue ( actor . queue_name ) self . actors [ actor . actor_name ] = actor self . emit_after ( "declare_actor" , actor )
Declare a new actor on this broker . Declaring an Actor twice replaces the first actor with the second by name .
33,228
def URLRabbitmqBroker ( url , * , middleware = None ) : warnings . warn ( "Use RabbitmqBroker with the 'url' parameter instead of URLRabbitmqBroker." , DeprecationWarning , stacklevel = 2 , ) return RabbitmqBroker ( url = url , middleware = middleware )
Alias for the RabbitMQ broker that takes a connection URL as a positional argument .
33,229
def close ( self ) : logging_filter = _IgnoreScaryLogs ( ) logging . getLogger ( "pika.adapters.base_connection" ) . addFilter ( logging_filter ) logging . getLogger ( "pika.adapters.blocking_connection" ) . addFilter ( logging_filter ) self . logger . debug ( "Closing channels and connections..." ) for channel_or_conn...
Close all open RabbitMQ connections .
33,230
def declare_queue ( self , queue_name ) : attempts = 1 while True : try : if queue_name not in self . queues : self . emit_before ( "declare_queue" , queue_name ) self . _declare_queue ( queue_name ) self . queues . add ( queue_name ) self . emit_after ( "declare_queue" , queue_name ) delayed_name = dq_name ( queue_nam...
Declare a queue . Has no effect if a queue with the given name already exists .
33,231
def get_queue_message_counts ( self , queue_name ) : queue_response = self . _declare_queue ( queue_name ) dq_queue_response = self . _declare_dq_queue ( queue_name ) xq_queue_response = self . _declare_xq_queue ( queue_name ) return ( queue_response . method . message_count , dq_queue_response . method . message_count...
Get the number of messages in a queue . This method is only meant to be used in unit and integration tests .
33,232
def create ( self , parties ) : assert parties > 0 , "parties must be a positive integer." return self . backend . add ( self . key , parties , self . ttl )
Create the barrier for the given number of parties .
33,233
def wait ( self , * , block = True , timeout = None ) : cleared = not self . backend . decr ( self . key , 1 , 1 , self . ttl ) if cleared : self . backend . wait_notify ( self . key_events , self . ttl ) return True if block : return self . backend . wait ( self . key_events , timeout ) return False
Signal that a party has reached the barrier .
33,234
def raise_thread_exception ( thread_id , exception ) : if current_platform == "CPython" : _raise_thread_exception_cpython ( thread_id , exception ) else : message = "Setting thread exceptions (%s) is not supported for your current platform (%r)." exctype = ( exception if inspect . isclass ( exception ) else type ( exce...
Raise an exception in a thread .
33,235
def setup_file_watcher ( path , use_polling = False ) : if use_polling : observer_class = watchdog . observers . polling . PollingObserver else : observer_class = EVENTED_OBSERVER file_event_handler = _SourceChangesHandler ( patterns = [ "*.py" ] ) file_watcher = observer_class ( ) file_watcher . schedule ( file_event_...
Sets up a background thread that watches for source changes and automatically sends SIGHUP to the current process whenever a file changes .
33,236
def declare_queue ( self , queue_name ) : if queue_name not in self . queues : self . emit_before ( "declare_queue" , queue_name ) self . queues [ queue_name ] = Queue ( ) self . emit_after ( "declare_queue" , queue_name ) delayed_name = dq_name ( queue_name ) self . queues [ delayed_name ] = Queue ( ) self . delay_que...
Declare a queue . Has no effect if a queue with the given name has already been declared .
33,237
def flush_all ( self ) : for queue_name in chain ( self . queues , self . delay_queues ) : self . flush ( queue_name )
Drop all messages from all declared queues .
33,238
def run ( self , * , delay = None ) : self . broker . enqueue ( self . messages [ 0 ] , delay = delay ) return self
Run this pipeline .
33,239
def get_result ( self , * , block = False , timeout = None ) : return self . messages [ - 1 ] . get_result ( block = block , timeout = timeout )
Get the result of this pipeline .
33,240
def get_results ( self , * , block = False , timeout = None ) : deadline = None if timeout : deadline = time . monotonic ( ) + timeout / 1000 for message in self . messages : if deadline : timeout = max ( 0 , int ( ( deadline - time . monotonic ( ) ) * 1000 ) ) yield message . get_result ( block = block , timeout = tim...
Get the results of each job in the pipeline .
33,241
def run ( self , * , delay = None ) : for child in self . children : if isinstance ( child , ( group , pipeline ) ) : child . run ( delay = delay ) else : self . broker . enqueue ( child , delay = delay ) return self
Run the actors in this group .
33,242
def get_results ( self , * , block = False , timeout = None ) : deadline = None if timeout : deadline = time . monotonic ( ) + timeout / 1000 for child in self . children : if deadline : timeout = max ( 0 , int ( ( deadline - time . monotonic ( ) ) * 1000 ) ) if isinstance ( child , group ) : yield list ( child . get_r...
Get the results of each job in the group .
33,243
def wait ( self , * , timeout = None ) : for _ in self . get_results ( block = True , timeout = timeout ) : pass
Block until all the jobs in the group have finished or until the timeout expires .
33,244
def actor ( fn = None , * , actor_class = Actor , actor_name = None , queue_name = "default" , priority = 0 , broker = None , ** options ) : def decorator ( fn ) : nonlocal actor_name , broker actor_name = actor_name or fn . __name__ if not _queue_name_re . fullmatch ( queue_name ) : raise ValueError ( "Queue names mus...
Declare an actor .
33,245
def message ( self , * args , ** kwargs ) : return self . message_with_options ( args = args , kwargs = kwargs )
Build a message . This method is useful if you want to compose actors . See the actor composition documentation for details .
33,246
def message_with_options ( self , * , args = None , kwargs = None , ** options ) : for name in [ "on_failure" , "on_success" ] : callback = options . get ( name ) if isinstance ( callback , Actor ) : options [ name ] = callback . actor_name elif not isinstance ( callback , ( type ( None ) , str ) ) : raise TypeError ( ...
Build a message with an arbitray set of processing options . This method is useful if you want to compose actors . See the actor composition documentation for details .
33,247
def send ( self , * args , ** kwargs ) : return self . send_with_options ( args = args , kwargs = kwargs )
Asynchronously send a message to this actor .
33,248
def send_with_options ( self , * , args = None , kwargs = None , delay = None , ** options ) : message = self . message_with_options ( args = args , kwargs = kwargs , ** options ) return self . broker . enqueue ( message , delay = delay )
Asynchronously send a message to this actor along with an arbitrary set of processing options for the broker and middleware .
33,249
def start ( self ) : self . broker . emit_before ( "worker_boot" , self ) worker_middleware = _WorkerMiddleware ( self ) self . broker . add_middleware ( worker_middleware ) for _ in range ( self . worker_threads ) : self . _add_worker ( ) self . broker . emit_after ( "worker_boot" , self )
Initialize the worker boot sequence and start up all the worker threads .
33,250
def pause ( self ) : for child in chain ( self . consumers . values ( ) , self . workers ) : child . pause ( ) for child in chain ( self . consumers . values ( ) , self . workers ) : child . paused_event . wait ( )
Pauses all the worker threads .
33,251
def resume ( self ) : for child in chain ( self . consumers . values ( ) , self . workers ) : child . resume ( )
Resumes all the worker threads .
33,252
def stop ( self , timeout = 600000 ) : self . broker . emit_before ( "worker_shutdown" , self ) self . logger . info ( "Shutting down..." ) self . logger . debug ( "Stopping workers..." ) for thread in self . workers : thread . stop ( ) join_all ( self . workers , timeout ) self . logger . debug ( "Workers stopped." ) ...
Gracefully stop the Worker and all of its consumers and workers .
33,253
def join ( self ) : while True : for consumer in self . consumers . values ( ) : consumer . delay_queue . join ( ) self . work_queue . join ( ) for consumer in self . consumers . values ( ) : if consumer . delay_queue . unfinished_tasks : break else : if self . work_queue . unfinished_tasks : continue return
Wait for this worker to complete its work in progress . This method is useful when testing code .
33,254
def handle_delayed_messages ( self ) : for eta , message in iter_queue ( self . delay_queue ) : if eta > current_millis ( ) : self . delay_queue . put ( ( eta , message ) ) self . delay_queue . task_done ( ) break queue_name = q_name ( message . queue_name ) new_message = message . copy ( queue_name = queue_name ) del ...
Enqueue any delayed messages whose eta has passed .
33,255
def handle_message ( self , message ) : try : if "eta" in message . options : self . logger . debug ( "Pushing message %r onto delay queue." , message . message_id ) self . broker . emit_before ( "delay_message" , message ) self . delay_queue . put ( ( message . options . get ( "eta" , 0 ) , message ) ) else : actor = ...
Handle a message received off of the underlying consumer . If the message has an eta delay it . Otherwise put it on the work queue .
33,256
def post_process_message ( self , message ) : while True : try : if message . failed : self . logger . debug ( "Rejecting message %r." , message . message_id ) self . broker . emit_before ( "nack" , message ) self . consumer . nack ( message ) self . broker . emit_after ( "nack" , message ) else : self . logger . debug...
Called by worker threads whenever they re done processing individual messages signaling that each message is ready to be acked or rejected .
33,257
def close ( self ) : try : if self . consumer : self . requeue_messages ( m for _ , m in iter_queue ( self . delay_queue ) ) self . consumer . close ( ) except ConnectionError : pass
Close this consumer thread and its underlying connection .
33,258
def process_message ( self , message ) : try : self . logger . debug ( "Received message %s with id %r." , message , message . message_id ) self . broker . emit_before ( "process_message" , message ) res = None if not message . failed : actor = self . broker . get_actor ( message . actor_name ) res = actor ( * message ...
Process a message pulled off of the work queue then push it back to its associated consumer for post processing .
33,259
def compare ( self , a , b , zone ) : self . log . info ( 'compare: a=%s, b=%s, zone=%s' , a , b , zone ) try : a = [ self . providers [ source ] for source in a ] b = [ self . providers [ source ] for source in b ] except KeyError as e : raise Exception ( 'Unknown source: {}' . format ( e . args [ 0 ] ) ) sub_zones = ...
Compare zone data between 2 sources .
33,260
def dump ( self , zone , output_dir , lenient , split , source , * sources ) : self . log . info ( 'dump: zone=%s, sources=%s' , zone , sources ) sources = [ source ] + list ( sources ) try : sources = [ self . providers [ s ] for s in sources ] except KeyError as e : raise Exception ( 'Unknown source: {}' . format ( e...
Dump zone data from the specified source
33,261
def flush_zone ( cls , zone_name ) : cls . log . debug ( 'flush_zone: zone_name=%s' , zone_name ) try : del cls . _cache [ zone_name ] except KeyError : pass
Flushes the zone cache if there is one
33,262
def _check_zone ( self , name , create = False ) : self . log . debug ( '_check_zone: name=%s' , name ) try : if name in self . _azure_zones : return name self . _dns_client . zones . get ( self . _resource_group , name ) self . _azure_zones . add ( name ) return name except CloudError as err : msg = 'The Resource \'Mi...
Checks whether a zone specified in a source exist in Azure server .
33,263
def _apply_Create ( self , change ) : ar = _AzureRecord ( self . _resource_group , change . new ) create = self . _dns_client . record_sets . create_or_update create ( resource_group_name = ar . resource_group , zone_name = ar . zone_name , relative_record_set_name = ar . relative_record_set_name , record_type = ar . r...
A record from change must be created .
33,264
def apply ( self , plan ) : if self . apply_disabled : self . log . info ( 'apply: disabled' ) return 0 self . log . info ( 'apply: making changes' ) self . _apply ( plan ) return len ( plan . changes )
Submits actual planned changes to the provider . Returns the number of changes made
33,265
def _is_valid_dkim ( self , value ) : validator_dict = { 'h' : lambda val : val in [ 'sha1' , 'sha256' ] , 's' : lambda val : val in [ '*' , 'email' ] , 't' : lambda val : val in [ 'y' , 's' ] , 'v' : lambda val : val == 'DKIM1' , 'k' : lambda val : val == 'rsa' , 'n' : lambda _ : True , 'g' : lambda _ : True } splitte...
Check if value is a valid DKIM
33,266
def _get_gcloud_records ( self , gcloud_zone , page_token = None ) : gcloud_iterator = gcloud_zone . list_resource_record_sets ( page_token = page_token ) for gcloud_record in gcloud_iterator : yield gcloud_record if gcloud_iterator . next_page_token : for gcloud_record in self . _get_gcloud_records ( gcloud_zone , gcl...
Generator function which yields ResourceRecordSet for the managed gcloud zone until there are no more records to pull .
33,267
def _get_cloud_zones ( self , page_token = None ) : gcloud_zones = self . gcloud_client . list_zones ( page_token = page_token ) for gcloud_zone in gcloud_zones : self . _gcloud_zones [ gcloud_zone . dns_name ] = gcloud_zone if gcloud_zones . next_page_token : self . _get_cloud_zones ( gcloud_zones . next_page_token )
Load all ManagedZones into the self . _gcloud_zones dict which is mapped with the dns_name as key .
33,268
def get_client_key_exchange_record ( cls , robot_payload_enum : RobotPmsPaddingPayloadEnum , tls_version : TlsVersionEnum , modulus : int , exponent : int ) -> TlsRsaClientKeyExchangeRecord : pms_padding = cls . _compute_pms_padding ( modulus ) tls_version_hex = binascii . b2a_hex ( TlsRecordTlsVersionBytes [ tls_versi...
A client key exchange record with a hardcoded pre_master_secret and a valid or invalid padding .
33,269
def get_finished_record_bytes ( cls , tls_version : TlsVersionEnum ) -> bytes : return b'\x16' + TlsRecordTlsVersionBytes [ tls_version . name ] . value + cls . _FINISHED_RECORD
The Finished TLS record corresponding to the hardcoded PMS used in the Client Key Exchange record .
33,270
def compute_result_enum ( self ) -> RobotScanResultEnum : for payload_enum , server_responses in self . _payload_responses . items ( ) : if server_responses [ 0 ] != server_responses [ 1 ] : return RobotScanResultEnum . UNKNOWN_INCONSISTENT_RESULTS if len ( set ( [ server_responses [ 0 ] for server_responses in self . ...
Look at the server s response to each ROBOT payload and return the conclusion of the analysis .
33,271
def is_extended_validation ( self , certificate : Certificate ) -> bool : if not self . ev_oids : raise ValueError ( 'No EV OIDs supplied for {} store - cannot detect EV certificates' . format ( self . name ) ) try : cert_policies_ext = certificate . extensions . get_extension_for_oid ( ExtensionOID . CERTIFICATE_POLIC...
Is the supplied server certificate EV?
33,272
def run_scan_command ( self , server_info : ServerConnectivityInfo , scan_command : PluginScanCommand ) -> PluginScanResult : plugin_class = self . _plugins_repository . get_plugin_class_for_command ( scan_command ) plugin = plugin_class ( ) return plugin . process_task ( server_info , scan_command )
Run a single scan command against a server ; will block until the scan command has been completed .
33,273
def update_default ( cls ) -> 'TrustStoresRepository' : temp_path = mkdtemp ( ) try : archive_path = join ( temp_path , 'trust_stores_as_pem.tar.gz' ) urlretrieve ( cls . _UPDATE_URL , archive_path ) extract_path = join ( temp_path , 'extracted' ) tarfile . open ( archive_path ) . extractall ( extract_path ) shutil . r...
Update the default trust stores used by SSLyze .
33,274
def _get_preferred_cipher_suite ( cls , server_connectivity_info : ServerConnectivityInfo , ssl_version : OpenSslVersionEnum , accepted_cipher_list : List [ 'AcceptedCipherSuite' ] ) -> Optional [ 'AcceptedCipherSuite' ] : if len ( accepted_cipher_list ) < 2 : return None accepted_cipher_names = [ cipher . openssl_name...
Try to detect the server s preferred cipher suite among all cipher suites supported by SSLyze .
33,275
def name ( self ) -> str : return OPENSSL_TO_RFC_NAMES_MAPPING [ self . ssl_version ] . get ( self . openssl_name , self . openssl_name )
OpenSSL uses a different naming convention than the corresponding RFCs .
33,276
def queue_scan_command ( self , server_info : ServerConnectivityInfo , scan_command : PluginScanCommand ) -> None : self . _check_and_create_process ( server_info . hostname ) self . _queued_tasks_nb += 1 if scan_command . is_aggressive : self . _hostname_queues_dict [ server_info . hostname ] . put ( ( server_info , s...
Queue a scan command targeting a specific server .
33,277
def get_results ( self ) -> Iterable [ PluginScanResult ] : for _ in range ( self . _get_current_processes_nb ( ) ) : self . _task_queue . put ( None ) for hostname , hostname_queue in self . _hostname_queues_dict . items ( ) : for i in range ( len ( self . _processes_dict [ hostname ] ) ) : hostname_queue . put ( None...
Return the result of previously queued scan commands ; new commands cannot be queued once this is called .
33,278
def run ( self ) -> None : from sslyze . concurrent_scanner import PluginRaisedExceptionScanResult current_queue_in = self . priority_queue_in while True : task = current_queue_in . get ( ) if task is None : current_queue_in . task_done ( ) if current_queue_in == self . priority_queue_in : current_queue_in = self . que...
The process will first complete tasks it gets from self . queue_in . Once it gets notified that all the tasks have been completed it terminates .
33,279
def connect_socket ( self , sock : socket . socket ) -> None : try : sock . connect ( ( self . _tunnel_host , self . _tunnel_port ) ) except socket . timeout as e : raise ProxyError ( self . ERR_PROXY_OFFLINE . format ( str ( e ) ) ) except socket . error as e : raise ProxyError ( self . ERR_PROXY_OFFLINE . format ( st...
Setup HTTP tunneling with the configured proxy .
33,280
def get_preconfigured_ssl_connection ( self , override_ssl_version : Optional [ OpenSslVersionEnum ] = None , ssl_verify_locations : Optional [ str ] = None , should_use_legacy_openssl : Optional [ bool ] = None , ) -> SslConnection : if override_ssl_version is not None : final_ssl_version = override_ssl_version openss...
Get an SSLConnection instance with the right SSL configuration for successfully connecting to the server .
33,281
def _add_plugin_options ( self , available_plugins : Set [ Type [ Plugin ] ] ) -> None : for plugin_class in available_plugins : group = OptionGroup ( self . _parser , plugin_class . get_title ( ) , plugin_class . get_description ( ) ) for option in plugin_class . get_cli_option_group ( ) : group . add_option ( option ...
Recovers the list of command line options implemented by the available plugins and adds them to the command line parser .
33,282
def get_long_description ( ) : with open ( path . join ( root_path , 'README.md' ) , encoding = 'utf-8' ) as f : long_description = f . read ( ) return long_description
Convert the README file into the long description .
33,283
def get_include_files ( ) : plugin_data_files = [ ] trust_stores_pem_path = path . join ( root_path , 'sslyze' , 'plugins' , 'utils' , 'trust_store' , 'pem_files' ) for file in listdir ( trust_stores_pem_path ) : file = path . join ( trust_stores_pem_path , file ) if path . isfile ( file ) : filename = path . basename ...
Get the list of trust stores so they properly packaged when doing a cx_freeze build .
33,284
def get_dns_subject_alternative_names ( certificate : cryptography . x509 . Certificate ) -> List [ str ] : subj_alt_names : List [ str ] = [ ] try : san_ext = certificate . extensions . get_extension_for_oid ( ExtensionOID . SUBJECT_ALTERNATIVE_NAME ) subj_alt_names = san_ext . value . get_values_for_type ( DNSName ) ...
Retrieve all the DNS entries of the Subject Alternative Name extension .
33,285
def matches_hostname ( cls , certificate : cryptography . x509 . Certificate , hostname : str ) -> None : certificate_names = { 'subject' : ( tuple ( [ ( 'commonName' , name ) for name in cls . get_common_names ( certificate . subject ) ] ) , ) , 'subjectAltName' : tuple ( [ ( 'DNS' , name ) for name in cls . get_dns_s...
Verify that the certificate was issued for the given hostname .
33,286
def get_name_as_short_text ( cls , name_field : cryptography . x509 . Name ) -> str : common_names = cls . get_common_names ( name_field ) if common_names : return common_names [ 0 ] else : return cls . get_name_as_text ( name_field )
Convert a name field returned by the cryptography module to a string suitable for displaying it to the user .
33,287
def has_ocsp_must_staple_extension ( certificate : cryptography . x509 . Certificate ) -> bool : has_ocsp_must_staple = False try : tls_feature_ext = certificate . extensions . get_extension_for_oid ( ExtensionOID . TLS_FEATURE ) for feature_type in tls_feature_ext . value : if feature_type == cryptography . x509 . TLS...
Return True if the certificate has the OCSP Must - Staple extension defined in RFC 6066 .
33,288
def send_request ( self , ssl_client : SslClient ) -> str : try : ssl_client . write ( HttpRequestGenerator . get_request ( self . _hostname ) ) http_response = HttpResponseParser . parse_from_ssl_connection ( ssl_client ) if http_response . version == 9 : result = self . ERR_NOT_HTTP else : redirect = '' if 300 <= htt...
Send an HTTP GET to the server and return the HTTP status code .
33,289
def get_plugin_class_for_command ( self , scan_command : PluginScanCommand ) -> Type [ Plugin ] : return self . _scan_command_classes_to_plugin_classes [ scan_command . __class__ ]
Get the class of the plugin implementing the supplied scan command .
33,290
def _parse ( read_method : Callable ) -> HTTPResponse : response = read_method ( 4096 ) while b'HTTP/' not in response or b'\r\n\r\n' not in response : response += read_method ( 4096 ) fake_sock = _FakeSocket ( response ) response = HTTPResponse ( fake_sock ) response . begin ( ) return response
Trick to standardize the API between sockets and SSLConnection objects .
33,291
def _work_function ( job_q : Queue , result_q : Queue , error_q : Queue ) -> None : while True : job = job_q . get ( ) if isinstance ( job , _ThreadPoolSentinel ) : result_q . put ( _ThreadPoolSentinel ( ) ) error_q . put ( _ThreadPoolSentinel ( ) ) job_q . task_done ( ) break work_function = job [ 0 ] args = job [ 1 ]...
Work function expected to run within threads .
33,292
def _resume_with_session_id ( self , server_info : ServerConnectivityInfo , ssl_version_to_use : OpenSslVersionEnum ) -> bool : session1 = self . _resume_ssl_session ( server_info , ssl_version_to_use ) try : session1_id = self . _extract_session_id ( session1 ) except IndexError : return False if session1_id == '' : r...
Perform one session resumption using Session IDs .
33,293
def _resume_with_session_ticket ( self , server_info : ServerConnectivityInfo , ssl_version_to_use : OpenSslVersionEnum , ) -> TslSessionTicketSupportEnum : try : session1 = self . _resume_ssl_session ( server_info , ssl_version_to_use , should_enable_tls_ticket = True ) except SslHandshakeRejected : if server_info . h...
Perform one session resumption using TLS Session Tickets .
33,294
def _extract_session_id ( ssl_session : nassl . _nassl . SSL_SESSION ) -> str : session_string = ( ( ssl_session . as_text ( ) ) . split ( 'Session-ID:' ) ) [ 1 ] session_id = ( session_string . split ( 'Session-ID-ctx:' ) ) [ 0 ] . strip ( ) return session_id
Extract the SSL session ID from a SSL session object or raises IndexError if the session ID was not set .
33,295
def _extract_tls_session_ticket ( ssl_session : nassl . _nassl . SSL_SESSION ) -> str : session_string = ( ( ssl_session . as_text ( ) ) . split ( 'TLS session ticket:' ) ) [ 1 ] session_tls_ticket = ( session_string . split ( 'Compression:' ) ) [ 0 ] return session_tls_ticket
Extract the TLS session ticket from a SSL session object or raises IndexError if the ticket was not set .
33,296
def _resume_ssl_session ( server_info : ServerConnectivityInfo , ssl_version_to_use : OpenSslVersionEnum , ssl_session : Optional [ nassl . _nassl . SSL_SESSION ] = None , should_enable_tls_ticket : bool = False ) -> nassl . _nassl . SSL_SESSION : ssl_connection = server_info . get_preconfigured_ssl_connection ( overri...
Connect to the server and returns the session object that was assigned for that connection . If ssl_session is given tries to resume that session .
33,297
def _object_to_json_dict ( obj : Any ) -> Union [ bool , int , float , str , Dict [ str , Any ] ] : if isinstance ( obj , Enum ) : result = obj . name elif isinstance ( obj , ObjectIdentifier ) : result = obj . dotted_string elif isinstance ( obj , x509 . _Certificate ) : certificate = obj result = { 'as_pem' : obj . p...
Convert an object to a dictionary suitable for the JSON output .
33,298
def _get_and_verify_certificate_chain ( server_info : ServerConnectivityInfo , trust_store : TrustStore ) -> Tuple [ List [ Certificate ] , str , Optional [ OcspResponse ] ] : ssl_connection = server_info . get_preconfigured_ssl_connection ( ssl_verify_locations = trust_store . path ) ssl_connection . ssl_client . set_...
Connects to the target server and uses the supplied trust store to validate the server s certificate . Returns the server s certificate and OCSP response .
33,299
def get_description ( cls ) -> str : if cls . __doc__ is None : raise ValueError ( 'No docstring found for {}' . format ( cls . __name__ ) ) return cls . __doc__ . strip ( )
The description is expected to be the command class docstring .