idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
30,600 | def on_sigchld ( self , _signum , _unused_frame ) : LOGGER . info ( 'SIGCHLD received from child' ) if not self . active_processes ( False ) : LOGGER . info ( 'Stopping with no active processes and child error' ) signal . setitimer ( signal . ITIMER_REAL , 0 , 0 ) self . set_state ( self . STATE_STOPPED ) | Invoked when a child sends up an SIGCHLD signal . |
30,601 | def on_timer ( self , _signum , _unused_frame ) : if self . is_shutting_down : LOGGER . debug ( 'Polling timer fired while shutting down' ) return if not self . polled : self . poll ( ) self . polled = True self . set_timer ( 5 ) else : self . polled = False self . poll_results_check ( ) self . set_timer ( self . poll_... | Invoked by the Poll timer signal . |
30,602 | def poll ( self ) : self . set_state ( self . STATE_ACTIVE ) if not self . total_process_count : LOGGER . debug ( 'Did not find any active consumers in poll' ) return self . check_process_counts ( ) self . poll_data = { 'timestamp' : time . time ( ) , 'processes' : list ( ) } for proc in list ( self . active_processes ... | Start the poll process by invoking the get_stats method of the consumers . If we hit this after another interval without fully processing note it with a warning . |
30,603 | def poll_results_check ( self ) : if not self . consumers : LOGGER . debug ( 'Skipping poll results check, no consumers' ) return LOGGER . debug ( 'Checking for poll results' ) while True : try : stats = self . stats_queue . get ( False ) except queue . Empty : break try : self . poll_data [ 'processes' ] . remove ( st... | Check the polling results by checking to see if the stats queue is empty . If it is not try and collect stats . If it is set a timer to call ourselves in _POLL_RESULTS_INTERVAL . |
30,604 | def process_spawn_qty ( self , name ) : return self . consumers [ name ] . qty - self . process_count ( name ) | Return the number of processes to spawn for the given consumer name . |
30,605 | def remove_consumer_process ( self , consumer , name ) : my_pid = os . getpid ( ) if name in self . consumers [ consumer ] . processes . keys ( ) : child = self . consumers [ consumer ] . processes [ name ] try : alive = child . is_alive ( ) except AssertionError : LOGGER . debug ( 'Tried to test non-child process (%r ... | Remove all details for the specified consumer and process name . |
30,606 | def run ( self ) : self . set_state ( self . STATE_ACTIVE ) self . setup_consumers ( ) signal . signal ( signal . SIGCHLD , self . on_sigchld ) signal . signal ( signal . SIGALRM , self . on_timer ) signal . setitimer ( signal . ITIMER_REAL , self . poll_interval , 0 ) while self . is_running : if not self . is_sleepin... | When the consumer is ready to start running kick off all of our consumer consumers and then loop while we process messages . |
30,607 | def set_process_name ( ) : proc = multiprocessing . current_process ( ) for offset in range ( 0 , len ( sys . argv ) ) : if sys . argv [ offset ] == '-c' : name = sys . argv [ offset + 1 ] . split ( '/' ) [ - 1 ] proc . name = name . split ( '.' ) [ 0 ] break | Set the process name for the top level process so that it shows up in logs in a more trackable fashion . |
30,608 | def set_timer ( self , duration ) : if self . is_shutting_down : LOGGER . debug ( 'Not sleeping, application is trying to shutdown' ) return signal . setitimer ( signal . ITIMER_REAL , duration , 0 ) | Setup the next alarm to fire and then wait for it to fire . |
30,609 | def setup_consumers ( self ) : if not self . consumer_cfg : LOGGER . warning ( 'No consumers are configured' ) for name in self . consumer_cfg . keys ( ) : self . consumers [ name ] = self . new_consumer ( self . consumer_cfg [ name ] , name ) self . start_processes ( name , self . consumers [ name ] . qty ) | Iterate through each consumer in the configuration and kick off the minimal amount of processes setting up the runtime data as well . |
30,610 | def start_process ( self , name ) : process_name , proc = self . new_process ( name ) LOGGER . info ( 'Spawning %s process for %s' , process_name , name ) self . consumers [ name ] . processes [ process_name ] = proc try : proc . start ( ) except IOError as error : LOGGER . critical ( 'Failed to start %s for %s: %r' , ... | Start a new consumer process for the given consumer name |
30,611 | def start_processes ( self , name , quantity ) : [ self . start_process ( name ) for i in range ( 0 , quantity or 0 ) ] | Start the specified quantity of consumer processes for the given consumer . |
30,612 | def stop_processes ( self ) : self . set_state ( self . STATE_SHUTTING_DOWN ) LOGGER . info ( 'Stopping consumer processes' ) signal . signal ( signal . SIGABRT , signal . SIG_IGN ) signal . signal ( signal . SIGALRM , signal . SIG_IGN ) signal . signal ( signal . SIGCHLD , signal . SIG_IGN ) signal . signal ( signal .... | Iterate through all of the consumer processes shutting them down . |
30,613 | def add_parser_arguments ( ) : argparser = parser . get ( ) argparser . add_argument ( '-P' , '--profile' , action = 'store' , default = None , dest = 'profile' , help = 'Profile the consumer modules, specifying ' 'the output directory.' ) argparser . add_argument ( '-o' , '--only' , action = 'store' , default = None ,... | Add options to the parser |
30,614 | def _master_control_program ( self ) : return mcp . MasterControlProgram ( self . config , consumer = self . args . consumer , profile = self . args . profile , quantity = self . args . quantity ) | Return an instance of the MasterControlProgram . |
30,615 | def setup ( self ) : if self . args . prepend_path : self . _prepend_python_path ( self . args . prepend_path ) | Continue the run process blocking on MasterControlProgram . run |
30,616 | def stop ( self ) : LOGGER . info ( 'Shutting down controller' ) self . set_state ( self . STATE_STOP_REQUESTED ) signal . setitimer ( signal . ITIMER_PROF , 0 , 0 ) self . _mcp . stop_processes ( ) if self . _mcp . is_running : LOGGER . info ( 'Waiting up to 3 seconds for MCP to shut things down' ) signal . setitimer ... | Shutdown the MCP and child processes cleanly |
30,617 | def run ( self ) : self . setup ( ) self . _mcp = self . _master_control_program ( ) try : self . _mcp . run ( ) except KeyboardInterrupt : LOGGER . info ( 'Caught CTRL-C, shutting down' ) except Exception : exc_info = sys . exc_info ( ) kwargs = { 'logger' : 'rejected.controller' } LOGGER . debug ( 'Sending exception ... | Run the rejected Application |
30,618 | def connect ( self ) : self . set_state ( self . STATE_CONNECTING ) self . handle = tornado_connection . TornadoConnection ( self . _connection_parameters , on_open_callback = self . on_open , on_open_error_callback = self . on_open_error , stop_ioloop_on_close = False , custom_ioloop = self . io_loop ) | Create the low - level AMQP connection to RabbitMQ . |
30,619 | def shutdown ( self ) : if self . is_shutting_down : self . logger . debug ( 'Already shutting down' ) return self . set_state ( self . STATE_SHUTTING_DOWN ) self . logger . debug ( 'Shutting down connection' ) if not self . is_active : return self . channel . close ( ) self . logger . debug ( 'Sending a Basic.Cancel t... | Start the connection shutdown process cancelling any active consuming and closing the channel if the connection is not active . |
30,620 | def on_open ( self , _handle ) : self . logger . debug ( 'Connection opened' ) self . handle . add_on_connection_blocked_callback ( self . on_blocked ) self . handle . add_on_connection_unblocked_callback ( self . on_unblocked ) self . handle . add_on_close_callback ( self . on_closed ) self . handle . channel ( self .... | Invoked when the connection is opened |
30,621 | def on_channel_open ( self , channel ) : self . logger . debug ( 'Channel opened' ) self . set_state ( self . STATE_CONNECTED ) self . channel = channel self . channel . add_on_close_callback ( self . on_channel_closed ) self . channel . add_on_cancel_callback ( self . on_consumer_cancelled ) if self . publisher_confir... | This method is invoked by pika when the channel has been opened . It will change the state to CONNECTED add the callbacks and setup the channel to start consuming . |
30,622 | def consume ( self , queue_name , no_ack , prefetch_count ) : if self . state == self . STATE_ACTIVE : self . logger . debug ( '%s already consuming' , self . name ) return self . set_state ( self . STATE_ACTIVE ) self . channel . basic_qos ( self . on_qos_set , 0 , prefetch_count , False ) self . channel . basic_consu... | Consume messages from RabbitMQ changing the state QoS and issuing the RPC to RabbitMQ to start delivering messages . |
30,623 | def on_consumer_cancelled ( self , _frame ) : self . logger . debug ( 'Consumer has been cancelled' ) if self . is_shutting_down : self . channel . close ( ) else : self . set_state ( self . STATE_CONNECTED ) | Invoked by pika when a Basic . Cancel or Basic . CancelOk is received . |
30,624 | def on_confirmation ( self , frame ) : delivered = frame . method . NAME . split ( '.' ) [ 1 ] . lower ( ) == 'ack' self . logger . debug ( 'Received publisher confirmation (Delivered: %s)' , delivered ) if frame . method . multiple : for index in range ( self . last_confirmation + 1 , frame . method . delivery_tag ) :... | Invoked by pika when RabbitMQ responds to a Basic . Publish RPC command passing in either a Basic . Ack or Basic . Nack frame with the delivery tag of the message that was published . The delivery tag is an integer counter indicating the message number that was sent on the channel via Basic . Publish . |
30,625 | def confirm_delivery ( self , delivery_tag , delivered ) : for offset , msg in self . pending_confirmations ( ) : if delivery_tag == msg . delivery_tag : self . published_messages [ offset ] . future . set_result ( delivered ) return for msg in self . published_messages : if msg . delivery_tag == delivery_tag and msg .... | Invoked by RabbitMQ when it is confirming delivery via a Basic . Ack |
30,626 | def on_delivery ( self , channel , method , properties , body ) : self . callbacks . on_delivery ( self . name , channel , method , properties , body ) | Invoked by pika when RabbitMQ delivers a message from a queue . |
30,627 | def on_return ( self , channel , method , properties , body ) : pending = self . pending_confirmations ( ) if not pending : self . logger . warning ( 'RabbitMQ returned message %s and no pending ' 'messages are unconfirmed' , utils . message_info ( method . exchange , method . routing_key , properties ) ) return self .... | Invoked by RabbitMQ when it returns a message that was published . |
30,628 | def pending_confirmations ( self ) : return sorted ( [ ( idx , msg ) for idx , msg in enumerate ( self . published_messages ) if not msg . future . done ( ) ] , key = lambda x : x [ 1 ] . delivery_tag ) | Return all published messages that have yet to be acked nacked or returned . |
30,629 | def _connection_parameters ( self ) : return pika . ConnectionParameters ( self . config . get ( 'host' , 'localhost' ) , self . config . get ( 'port' , 5672 ) , self . config . get ( 'vhost' , '/' ) , pika . PlainCredentials ( self . config . get ( 'user' , 'guest' ) , self . config . get ( 'password' , self . config ... | Return connection parameters for a pika connection . |
30,630 | def publish_message ( self , exchange , routing_key , properties , body , no_serialization = False , no_encoding = False , channel = None , connection = None ) : is_string = ( isinstance ( body , str ) or isinstance ( body , bytes ) or isinstance ( body , unicode ) ) if not no_serialization and not is_string and proper... | Publish a message to RabbitMQ on the same channel the original message was received on . |
30,631 | def body ( self ) : if self . _message_body : return self . _message_body self . _message_body = self . _maybe_decompress_body ( ) self . _message_body = self . _maybe_deserialize_body ( ) return self . _message_body | Return the message body unencoded if needed deserialized if possible . |
30,632 | def _compress ( self , value , module_name ) : self . logger . debug ( 'Decompressing with %s' , module_name ) if not isinstance ( value , bytes ) : value = value . encode ( 'utf-8' ) return self . _maybe_import ( module_name ) . compress ( value ) | Compress the value passed in using the named compression module . |
30,633 | def _dump_csv ( self , rows ) : self . logger . debug ( 'Writing %r' , rows ) csv = self . _maybe_import ( 'csv' ) buff = io . StringIO ( ) if _PYTHON3 else io . BytesIO ( ) writer = csv . DictWriter ( buff , sorted ( set ( [ k for r in rows for k in r . keys ( ) ] ) ) , dialect = 'excel' ) writer . writeheader ( ) wri... | Take a list of dicts and return it as a CSV value . The |
30,634 | def _maybe_decode ( self , value , encoding = 'utf-8' ) : if _PYTHON3 and isinstance ( value , bytes ) : try : return value . decode ( encoding ) except Exception as err : self . logger . exception ( 'Error decoding value: %s' , err ) raise MessageException ( str ( err ) , 'decoding-{}' . format ( encoding ) ) return v... | If a bytes object is passed in in the Python 3 environment decode it using the specified encoding to turn it to a str instance . |
30,635 | def _maybe_decompress_body ( self ) : if self . content_encoding : if self . content_encoding in self . _CODEC_MAP . keys ( ) : module_name = self . _CODEC_MAP [ self . content_encoding ] self . logger . debug ( 'Decompressing with %s' , module_name ) module = self . _maybe_import ( module_name ) return module . decomp... | Attempt to decompress the message body passed in using the named compression module if specified . |
30,636 | def _maybe_deserialize_body ( self ) : if not self . content_type : return self . _message_body ct = headers . parse_content_type ( self . content_type ) key = '{}/{}' . format ( ct . content_type , ct . content_subtype ) if key not in self . _SERIALIZATION_MAP : if key not in self . _IGNORE_TYPES : self . logger . deb... | Attempt to deserialize the message body based upon the content - type . |
30,637 | def _serialize ( self , value , ct ) : key = '{}/{}' . format ( ct . content_type , ct . content_subtype ) if key not in self . _SERIALIZATION_MAP : raise ValueError ( 'Unsupported content-type: {}' . format ( key ) ) elif not self . _SERIALIZATION_MAP [ key ] . get ( 'enabled' , True ) : self . logger . debug ( '%s is... | Auto - serialization of the value based upon the content - type value . |
30,638 | def collection_cycle ( self , value ) : if value is not None : self . _collection_cycle = value self . _cycles_left = min ( self . _cycles_left , self . _collection_cycle ) | Set the number of messages to process before invoking gc . collect |
30,639 | def on_finish ( self , exc = None ) : super ( GarbageCollector , self ) . on_finish ( exc ) self . _cycles_left -= 1 if self . _cycles_left <= 0 : num_collected = gc . collect ( ) self . _cycles_left = self . collection_cycle LOGGER . debug ( 'garbage collection run, %d objects evicted' , num_collected ) | Used to initiate the garbage collection |
30,640 | def rpc_reply ( self , body , properties = None , exchange = None , reply_to = None , connection = None ) : if reply_to is None and self . reply_to is None : raise ValueError ( 'Missing reply_to routing key' ) properties = properties or { } if not properties . get ( 'app_id' ) : properties [ 'app_id' ] = self . name if... | Reply to the message that is currently being processed . |
30,641 | def set_sentry_context ( self , tag , value ) : if self . sentry_client : self . logger . debug ( 'Setting sentry context for %s to %s' , tag , value ) self . sentry_client . tags_context ( { tag : value } ) | Set a context tag in Sentry for the given key and value . |
30,642 | def stats_add_duration ( self , key , duration ) : if not self . _measurement : if not self . IGNORE_OOB_STATS : self . logger . warning ( 'stats_add_timing invoked outside execution' ) return self . _measurement . add_duration ( key , duration ) | Add a duration to the per - message measurements |
30,643 | def stats_incr ( self , key , value = 1 ) : if not self . _measurement : if not self . IGNORE_OOB_STATS : self . logger . warning ( 'stats_incr invoked outside execution' ) return self . _measurement . incr ( key , value ) | Increment the specified key in the per - message measurements |
30,644 | def stats_track_duration ( self , key ) : start_time = time . time ( ) try : yield finally : self . stats_add_duration ( key , max ( start_time , time . time ( ) ) - start_time ) | Time around a context and add to the the per - message measurements |
30,645 | def unset_sentry_context ( self , tag ) : if self . sentry_client : self . sentry_client . tags . pop ( tag , None ) | Remove a context tag from sentry |
30,646 | def yield_to_ioloop ( self ) : try : yield self . _yield_condition . wait ( self . _message . channel . connection . ioloop . time ( ) + 0.001 ) except gen . TimeoutError : pass | Function that will allow Rejected to process IOLoop events while in a tight - loop inside an asynchronous consumer . |
30,647 | def execute ( self , message_in , measurement ) : self . logger . debug ( 'Received: %r' , message_in ) try : self . _preprocess ( message_in , measurement ) except DropMessage : raise gen . Return ( data . MESSAGE_DROP ) except MessageException as exc : raise gen . Return ( self . _on_message_exception ( exc ) ) try :... | Process the message from RabbitMQ . To implement logic for processing a message extend Consumer . _process not this method . |
30,648 | def _clear ( self ) : self . _finished = False self . _measurement = None self . _message = None self . _message_body = None | Resets all assigned data for the current message . |
30,649 | def _handle_exception ( self , exc ) : exc_info = sys . exc_info ( ) self . logger . exception ( '%s while processing message #%s' , exc . __class__ . __name__ , self . _message . delivery_tag , exc_info = exc_info ) self . _measurement . set_tag ( 'exception' , exc . __class__ . __name__ ) if hasattr ( exc , 'metric' ... | Common exception handling behavior across all exceptions . |
30,650 | def _maybe_clear_confirmation_futures ( self ) : for name in self . _connections . keys ( ) : self . _connections [ name ] . clear_confirmation_futures ( ) | Invoked when the message has finished processing ensuring there are no confirmation futures pending . |
30,651 | def _maybe_set_message_age ( self ) : if self . _message . properties . timestamp : message_age = float ( max ( self . _message . properties . timestamp , time . time ( ) ) - self . _message . properties . timestamp ) if message_age > 0 : self . measurement . add_duration ( self . message_age_key ( ) , message_age ) | If timestamp is set and the relative age is > 0 record age of the message coming in |
30,652 | def _preprocess ( self , message_in , measurement ) : self . _clear ( ) self . _message = message_in self . _measurement = measurement self . _maybe_set_message_age ( ) self . _correlation_id = ( self . _message . properties . correlation_id or self . _message . properties . message_id or str ( uuid . uuid4 ( ) ) ) sel... | Invoked at the start of execution setting internal state validating that the message should be processed and not dropped . |
30,653 | def _publisher_confirmation_future ( self , name , exchange , routing_key , properties ) : if self . _connections [ name ] . publisher_confirmations : future = concurrent . Future ( ) self . _connections [ name ] . add_confirmation_future ( exchange , routing_key , properties , future ) return future | Return a future a publisher confirmation result that enables consumers to block on the confirmation of a published message . |
30,654 | def _publish_connection ( self , name = None ) : try : conn = self . _connections [ name or self . _message . connection ] except KeyError : raise ValueError ( 'Channel {} not found' . format ( name ) ) if not conn . is_connected or conn . channel . is_closed : raise RabbitMQException ( conn . name , 599 , 'NOT_CONNECT... | Return the connection to publish . If the name is not specified the connection associated with the current message is returned . |
30,655 | def _republish_dropped_message ( self , reason ) : self . logger . debug ( 'Republishing due to ProcessingException' ) properties = dict ( self . _message . properties ) or { } if 'headers' not in properties or not properties [ 'headers' ] : properties [ 'headers' ] = { } properties [ 'headers' ] [ 'X-Dropped-By' ] = s... | Republish the original message that was received it is being dropped by the consumer . |
30,656 | def _setting ( self , key , default ) : if key not in self . _settings : value = self . _settings_in . get ( key , os . environ . get ( 'STATSD_{}' . format ( key ) . upper ( ) , default ) ) self . _settings [ key ] = value return self . _settings [ key ] | Return the setting checking config then the appropriate environment variable falling back to the default caching the results . |
30,657 | def set_state ( self , new_state ) : if new_state not in self . STATES : raise ValueError ( 'Invalid state value: %r' % new_state ) LOGGER . debug ( 'State changing from %s to %s' , self . STATES [ self . state ] , self . STATES [ new_state ] ) self . state = new_state self . state_start = time . time ( ) | Assign the specified state to this consumer object . |
30,658 | def is_running ( self ) : return self . state in [ self . STATE_IDLE , self . STATE_ACTIVE , self . STATE_SLEEPING ] | Returns a bool determining if the process is in a running state or not |
30,659 | def ack_message ( self , message ) : if message . channel . is_closed : LOGGER . warning ( 'Can not ack message, channel is closed' ) self . counters [ self . CLOSED_ON_COMPLETE ] += 1 return message . channel . basic_ack ( delivery_tag = message . delivery_tag ) self . counters [ self . ACKED ] += 1 self . measurement... | Acknowledge the message on the broker and log the ack |
30,660 | def create_connections ( self ) : self . set_state ( self . STATE_CONNECTING ) for conn in self . consumer_config . get ( 'connections' , [ ] ) : name , confirm , consume = conn , False , True if isinstance ( conn , dict ) : name = conn [ 'name' ] confirm = conn . get ( 'publisher_confirmation' , False ) consume = conn... | Create and start the RabbitMQ connections assigning the connection object to the connections dict . |
30,661 | def get_config ( cfg , number , name , connection_name ) : return { 'connection' : cfg [ 'Connections' ] [ connection_name ] , 'consumer_name' : name , 'process_name' : '%s_%i_tag_%i' % ( name , os . getpid ( ) , number ) } | Initialize a new consumer thread setting defaults and config values |
30,662 | def get_consumer ( self , cfg ) : try : handle , version = utils . import_consumer ( cfg [ 'consumer' ] ) except ImportError as error : LOGGER . exception ( 'Error importing the consumer %s: %s' , cfg [ 'consumer' ] , error ) return if version : LOGGER . info ( 'Creating consumer %s v%s' , cfg [ 'consumer' ] , version ... | Import and create a new instance of the configured message consumer . |
30,663 | def invoke_consumer ( self , message ) : with ( yield self . consumer_lock . acquire ( ) ) : if self . is_idle : if message . channel . is_closed : LOGGER . warning ( 'Channel %s is closed on ' 'connection "%s", discarding ' 'local copy of message %s' , message . channel . channel_number , message . connection , utils ... | Wrap the actual processor processing bits |
30,664 | def maybe_get_next_message ( self ) : if self . pending : self . ioloop . add_callback ( self . invoke_consumer , self . pending . popleft ( ) ) | Pop the next message on the stack adding a callback on the IOLoop to invoke the consumer with the message . This is done so we let the IOLoop perform any pending callbacks before trying to process the next message . |
30,665 | def maybe_submit_measurement ( self ) : if self . statsd : self . submit_statsd_measurements ( ) if self . influxdb : self . submit_influxdb_measurement ( ) | Check for configured instrumentation backends and if found submit the message measurement info . |
30,666 | def on_delivery ( self , name , channel , method , properties , body ) : message = data . Message ( name , channel , method , properties , body ) if self . is_processing : return self . pending . append ( message ) self . invoke_consumer ( message ) | Process a message from Rabbit |
30,667 | def on_processed ( self , message , result , start_time ) : duration = max ( start_time , time . time ( ) ) - start_time self . counters [ self . TIME_SPENT ] += duration self . measurement . add_duration ( self . TIME_SPENT , duration ) if result == data . MESSAGE_DROP : LOGGER . debug ( 'Rejecting message due to drop... | Invoked after a message is processed by the consumer and implements the logic for how to deal with a message based upon the result . |
30,668 | def on_processing_error ( self ) : duration = time . time ( ) - self . last_failure if duration > self . MAX_ERROR_WINDOW : LOGGER . info ( 'Resetting failure window, %i seconds since last' , duration ) self . reset_error_counter ( ) self . counters [ self . ERROR ] += 1 self . last_failure = time . time ( ) if self . ... | Called when message processing failure happens due to a ConsumerException or an unhandled exception . |
30,669 | def on_ready_to_stop ( self ) : self . set_state ( self . STATE_SHUTTING_DOWN ) signal . signal ( signal . SIGABRT , signal . SIG_IGN ) signal . signal ( signal . SIGINT , signal . SIG_IGN ) signal . signal ( signal . SIGPROF , signal . SIG_IGN ) signal . signal ( signal . SIGTERM , signal . SIG_IGN ) if self . consume... | Invoked when the consumer is ready to stop . |
30,670 | def on_sigprof ( self , _unused_signum , _unused_frame ) : self . stats_queue . put ( self . report_stats ( ) , True ) self . last_stats_time = time . time ( ) signal . siginterrupt ( signal . SIGPROF , False ) | Called when SIGPROF is sent to the process will dump the stats in future versions queue them for the master process to get data . |
30,671 | def on_startup_error ( self , error ) : LOGGER . critical ( 'Could not start %s: %s' , self . consumer_name , error ) self . set_state ( self . STATE_STOPPED ) | Invoked when a pre - condition for starting the consumer has failed . Log the error and then exit the process . |
30,672 | def reject ( self , message , requeue = True ) : if self . no_ack : raise RuntimeError ( 'Can not rejected messages when ack is False' ) if message . channel . is_closed : LOGGER . warning ( 'Can not nack message, disconnected from RabbitMQ' ) self . counters [ self . CLOSED_ON_COMPLETE ] += 1 return LOGGER . warning (... | Reject the message on the broker and log it . |
30,673 | def report_stats ( self ) : if not self . previous : self . previous = dict ( ) for key in self . counters : self . previous [ key ] = 0 values = { 'name' : self . name , 'consumer_name' : self . consumer_name , 'counts' : dict ( self . counters ) , 'previous' : dict ( self . previous ) } self . previous = dict ( self ... | Create the dict of stats data for the MCP stats queue |
30,674 | def reset_state ( self ) : self . active_message = None self . measurement = None if self . is_waiting_to_shutdown : self . shutdown_connections ( ) elif self . is_processing : self . set_state ( self . STATE_IDLE ) elif self . is_idle or self . is_connecting or self . is_shutting_down : pass else : LOGGER . critical (... | Reset the runtime state after processing a message to either idle or shutting down based upon the current state . |
30,675 | def run ( self ) : if self . profile_file : LOGGER . info ( 'Profiling to %s' , self . profile_file ) profile . runctx ( 'self._run()' , globals ( ) , locals ( ) , self . profile_file ) else : self . _run ( ) LOGGER . debug ( 'Exiting %s (%i, %i)' , self . name , os . getpid ( ) , os . getppid ( ) ) | Start the consumer |
30,676 | def _run ( self ) : self . set_state ( self . STATE_INITIALIZING ) self . ioloop = ioloop . IOLoop . current ( ) self . consumer_lock = locks . Lock ( ) self . sentry_client = self . setup_sentry ( self . _kwargs [ 'config' ] , self . consumer_name ) try : self . setup ( ) except ( AttributeError , ImportError ) : retu... | Run method that can be profiled |
30,677 | def send_exception_to_sentry ( self , exc_info ) : if not self . sentry_client : LOGGER . debug ( 'No sentry_client, aborting' ) return message = dict ( self . active_message ) try : duration = math . ceil ( time . time ( ) - self . delivery_time ) * 1000 except TypeError : duration = 0 kwargs = { 'extra' : { 'consumer... | Send an exception to Sentry if enabled . |
30,678 | def setup ( self ) : LOGGER . info ( 'Initializing for %s' , self . name ) if 'consumer' not in self . consumer_config : return self . on_startup_error ( '"consumer" not specified in configuration' ) self . consumer = self . get_consumer ( self . consumer_config ) if not self . consumer : return self . on_startup_error... | Initialize the consumer setting up needed attributes and connecting to RabbitMQ . |
30,679 | def setup_influxdb ( self , config ) : base_tags = { 'version' : self . consumer_version } measurement = self . config . get ( 'influxdb_measurement' , os . environ . get ( 'SERVICE' ) ) if measurement != self . consumer_name : base_tags [ 'consumer' ] = self . consumer_name for key in { 'ENVIRONMENT' , 'SERVICE' } : i... | Configure the InfluxDB module for measurement submission . |
30,680 | def setup_sighandlers ( self ) : signal . signal ( signal . SIGINT , signal . SIG_IGN ) signal . signal ( signal . SIGTERM , signal . SIG_IGN ) signal . signal ( signal . SIGPROF , self . on_sigprof ) signal . signal ( signal . SIGABRT , self . stop ) signal . siginterrupt ( signal . SIGPROF , False ) signal . siginter... | Setup the stats and stop signal handlers . |
30,681 | def shutdown_connections ( self ) : if not self . is_shutting_down : self . set_state ( self . STATE_SHUTTING_DOWN ) for name in self . connections : if self . connections [ name ] . is_running : self . connections [ name ] . shutdown ( ) | This method closes the connections to RabbitMQ . |
30,682 | def stop ( self , signum = None , _unused = None ) : LOGGER . debug ( 'Stop called in state: %s' , self . state_description ) if self . is_stopped : LOGGER . warning ( 'Stop requested but consumer is already stopped' ) return elif self . is_shutting_down : LOGGER . warning ( 'Stop requested, consumer is already shuttin... | Stop the consumer from consuming by calling BasicCancel and setting our state . |
30,683 | def stop_consumer ( self ) : try : LOGGER . info ( 'Shutting down the consumer' ) self . consumer . shutdown ( ) except AttributeError : LOGGER . debug ( 'Consumer does not have a shutdown method' ) | Stop the consumer object and allow it to do a clean shutdown if it has the ability to do so . |
30,684 | def submit_influxdb_measurement ( self ) : measurement = influxdb . Measurement ( * self . influxdb ) measurement . set_timestamp ( time . time ( ) ) for key , value in self . measurement . counters . items ( ) : measurement . set_field ( key , value ) for key , value in self . measurement . tags . items ( ) : measurem... | Submit a measurement for a message to InfluxDB |
30,685 | def submit_statsd_measurements ( self ) : for key , value in self . measurement . counters . items ( ) : self . statsd . incr ( key , value ) for key , values in self . measurement . durations . items ( ) : for value in values : self . statsd . add_timing ( key , value ) for key , value in self . measurement . values .... | Submit a measurement for a message to statsd as individual items . |
30,686 | def profile_file ( self ) : if 'profile' in self . _kwargs and self . _kwargs [ 'profile' ] : profile_path = path . normpath ( self . _kwargs [ 'profile' ] ) if os . path . exists ( profile_path ) and os . path . isdir ( profile_path ) : return path . join ( profile_path , '{}-{}.prof' . format ( os . getpid ( ) , self... | Return the full path to write the cProfile data |
30,687 | def get_package_version ( module_obj , value ) : for key in [ 'version' , '__version__' ] : if hasattr ( module_obj , key ) : return getattr ( module_obj , key ) parts = value . split ( '.' ) for index , part in enumerate ( parts ) : try : return pkg_resources . get_distribution ( '.' . join ( parts [ 0 : index + 1 ] )... | Get the version of a package or a module s package . |
30,688 | def import_consumer ( value ) : parts = value . split ( '.' ) module_obj = importlib . import_module ( '.' . join ( parts [ 0 : - 1 ] ) ) return ( getattr ( module_obj , parts [ - 1 ] ) , get_package_version ( module_obj , value ) ) | Pass in a string in the format of foo . Bar foo . bar . Baz foo . bar . baz . Qux and it will return a handle to the class and the version . |
30,689 | def message_info ( exchange , routing_key , properties ) : output = [ ] if properties . message_id : output . append ( properties . message_id ) if properties . correlation_id : output . append ( '[correlation_id="{}"]' . format ( properties . correlation_id ) ) if exchange : output . append ( 'published to "{}"' . for... | Return info about a message using the same conditional constructs |
30,690 | def add_duration ( self , key , value ) : if key not in self . durations : self . durations [ key ] = [ ] self . durations [ key ] . append ( value ) | Add a duration for the specified key |
30,691 | def track_duration ( self , key ) : if key not in self . durations : self . durations [ key ] = [ ] start_time = time . time ( ) try : yield finally : self . durations [ key ] . append ( max ( start_time , time . time ( ) ) - start_time ) | Context manager that sets a value with the duration of time that it takes to execute whatever it is wrapping . |
30,692 | def filter ( self , record ) : if self . _exists : return int ( getattr ( record , 'correlation_id' , None ) is not None ) return int ( getattr ( record , 'correlation_id' , None ) is None ) | Is the specified record to be logged? Returns zero for no nonzero for yes . If deemed appropriate the record may be modified in - place by this method . |
30,693 | def observe_id ( self ) : if self . _observe_id is None : hasher = hashlib . sha256 ( ) hasher . update ( self . viewset_class . __module__ . encode ( 'utf8' ) ) hasher . update ( self . viewset_class . __name__ . encode ( 'utf8' ) ) hasher . update ( self . viewset_method . encode ( 'utf8' ) ) for key in sorted ( self... | Unique identifier that identifies the observer . |
30,694 | def post ( self , request ) : try : observer_id = request . query_params [ 'observer' ] session_id = request . query_params [ 'subscriber' ] except KeyError : return response . Response ( status = 400 ) observer . remove_subscriber ( session_id , observer_id ) return response . Response ( ) | Handle a query observer unsubscription request . |
30,695 | def notify_observers ( table , kind , primary_key = None ) : if IN_MIGRATIONS : return if not Observer . objects . filter ( dependencies__table = table ) . exists ( ) : return def handler ( ) : try : async_to_sync ( get_channel_layer ( ) . send ) ( CHANNEL_MAIN , { 'type' : TYPE_ORM_NOTIFY , 'table' : table , 'kind' : ... | Transmit ORM table change notification . |
30,696 | def model_post_save ( sender , instance , created = False , ** kwargs ) : if sender . _meta . app_label == 'rest_framework_reactive' : return def notify ( ) : table = sender . _meta . db_table if created : notify_observers ( table , ORM_NOTIFY_KIND_CREATE , instance . pk ) else : notify_observers ( table , ORM_NOTIFY_K... | Signal emitted after any model is saved via Django ORM . |
30,697 | def model_post_delete ( sender , instance , ** kwargs ) : if sender . _meta . app_label == 'rest_framework_reactive' : return def notify ( ) : table = sender . _meta . db_table notify_observers ( table , ORM_NOTIFY_KIND_DELETE , instance . pk ) transaction . on_commit ( notify ) | Signal emitted after any model is deleted via Django ORM . |
30,698 | def model_m2m_changed ( sender , instance , action , ** kwargs ) : if sender . _meta . app_label == 'rest_framework_reactive' : return def notify ( ) : table = sender . _meta . db_table if action == 'post_add' : notify_observers ( table , ORM_NOTIFY_KIND_CREATE ) elif action in ( 'post_remove' , 'post_clear' ) : notify... | Signal emitted after any M2M relation changes via Django ORM . |
30,699 | async def observer_orm_notify ( self , message ) : @ database_sync_to_async def get_observers ( table ) : return list ( Observer . objects . filter ( dependencies__table = table , subscribers__isnull = False ) . distinct ( 'pk' ) . values_list ( 'pk' , flat = True ) ) observers_ids = await get_observers ( message [ 'ta... | Process notification from ORM . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.