idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
18,900 | def default ( self ) : try : return self . __resolved_default except AttributeError : resolved_default = super ( EnumField , self ) . default if isinstance ( resolved_default , ( six . string_types , six . integer_types ) ) : resolved_default = self . type ( resolved_default ) self . __resolved_default = resolved_default return self . __resolved_default | Default for enum field . |
18,901 | def MessageToJson ( message , include_fields = None ) : result = _ProtoJsonApiTools . Get ( ) . encode_message ( message ) return _IncludeFields ( result , message , include_fields ) | Convert the given message to JSON . |
18,902 | def DictToAdditionalPropertyMessage ( properties , additional_property_type , sort_items = False ) : items = properties . items ( ) if sort_items : items = sorted ( items ) map_ = [ ] for key , value in items : map_ . append ( additional_property_type . AdditionalProperty ( key = key , value = value ) ) return additional_property_type ( additionalProperties = map_ ) | Convert the given dictionary to an AdditionalProperty message . |
18,903 | def MessageToRepr ( msg , multiline = False , ** kwargs ) : indent = kwargs . get ( 'indent' , 0 ) def IndentKwargs ( kwargs ) : kwargs = dict ( kwargs ) kwargs [ 'indent' ] = kwargs . get ( 'indent' , 0 ) + 4 return kwargs if isinstance ( msg , list ) : s = '[' for item in msg : if multiline : s += '\n' + ' ' * ( indent + 4 ) s += MessageToRepr ( item , multiline = multiline , ** IndentKwargs ( kwargs ) ) + ',' if multiline : s += '\n' + ' ' * indent s += ']' return s if isinstance ( msg , messages . Message ) : s = type ( msg ) . __name__ + '(' if not kwargs . get ( 'no_modules' ) : s = msg . __module__ + '.' + s names = sorted ( [ field . name for field in msg . all_fields ( ) ] ) for name in names : field = msg . field_by_name ( name ) if multiline : s += '\n' + ' ' * ( indent + 4 ) value = getattr ( msg , field . name ) s += field . name + '=' + MessageToRepr ( value , multiline = multiline , ** IndentKwargs ( kwargs ) ) + ',' if multiline : s += '\n' + ' ' * indent s += ')' return s if isinstance ( msg , six . string_types ) : if kwargs . get ( 'shortstrings' ) and len ( msg ) > 100 : msg = msg [ : 100 ] if isinstance ( msg , datetime . datetime ) : class SpecialTZInfo ( datetime . tzinfo ) : def __init__ ( self , offset ) : super ( SpecialTZInfo , self ) . __init__ ( ) self . offset = offset def __repr__ ( self ) : s = 'TimeZoneOffset(' + repr ( self . offset ) + ')' if not kwargs . get ( 'no_modules' ) : s = 'apitools.base.protorpclite.util.' + s return s msg = datetime . datetime ( msg . year , msg . month , msg . day , msg . hour , msg . minute , msg . second , msg . microsecond , SpecialTZInfo ( msg . tzinfo . utcoffset ( 0 ) ) ) return repr ( msg ) | Return a repr - style string for a protorpc message . |
18,904 | def _IncludeFields ( encoded_message , message , include_fields ) : if include_fields is None : return encoded_message result = json . loads ( encoded_message ) for field_name in include_fields : try : value = _GetField ( message , field_name . split ( '.' ) ) nullvalue = None if isinstance ( value , list ) : nullvalue = [ ] except KeyError : raise exceptions . InvalidDataError ( 'No field named %s in message of type %s' % ( field_name , type ( message ) ) ) _SetField ( result , field_name . split ( '.' ) , nullvalue ) return json . dumps ( result ) | Add the requested fields to the encoded message . |
18,905 | def _DecodeUnknownFields ( message , encoded_message ) : destination = _UNRECOGNIZED_FIELD_MAPPINGS . get ( type ( message ) ) if destination is None : return message pair_field = message . field_by_name ( destination ) if not isinstance ( pair_field , messages . MessageField ) : raise exceptions . InvalidDataFromServerError ( 'Unrecognized fields must be mapped to a compound ' 'message type.' ) pair_type = pair_field . message_type if isinstance ( pair_type . value , messages . MessageField ) : new_values = _DecodeUnknownMessages ( message , json . loads ( encoded_message ) , pair_type ) else : new_values = _DecodeUnrecognizedFields ( message , pair_type ) setattr ( message , destination , new_values ) setattr ( message , '_Message__unrecognized_fields' , { } ) return message | Rewrite unknown fields in message into message . destination . |
18,906 | def _DecodeUnknownMessages ( message , encoded_message , pair_type ) : field_type = pair_type . value . type new_values = [ ] all_field_names = [ x . name for x in message . all_fields ( ) ] for name , value_dict in six . iteritems ( encoded_message ) : if name in all_field_names : continue value = PyValueToMessage ( field_type , value_dict ) if pair_type . value . repeated : value = _AsMessageList ( value ) new_pair = pair_type ( key = name , value = value ) new_values . append ( new_pair ) return new_values | Process unknown fields in encoded_message of a message type . |
18,907 | def _DecodeUnrecognizedFields ( message , pair_type ) : new_values = [ ] codec = _ProtoJsonApiTools . Get ( ) for unknown_field in message . all_unrecognized_fields ( ) : value , _ = message . get_unrecognized_field_info ( unknown_field ) value_type = pair_type . field_by_name ( 'value' ) if isinstance ( value_type , messages . MessageField ) : decoded_value = DictToMessage ( value , pair_type . value . message_type ) else : decoded_value = codec . decode_field ( pair_type . value , value ) try : new_pair_key = str ( unknown_field ) except UnicodeEncodeError : new_pair_key = protojson . ProtoJson ( ) . decode_field ( pair_type . key , unknown_field ) new_pair = pair_type ( key = new_pair_key , value = decoded_value ) new_values . append ( new_pair ) return new_values | Process unrecognized fields in message . |
18,908 | def _EncodeUnknownFields ( message ) : source = _UNRECOGNIZED_FIELD_MAPPINGS . get ( type ( message ) ) if source is None : return message result = _CopyProtoMessageVanillaProtoJson ( message ) pairs_field = message . field_by_name ( source ) if not isinstance ( pairs_field , messages . MessageField ) : raise exceptions . InvalidUserInputError ( 'Invalid pairs field %s' % pairs_field ) pairs_type = pairs_field . message_type value_field = pairs_type . field_by_name ( 'value' ) value_variant = value_field . variant pairs = getattr ( message , source ) codec = _ProtoJsonApiTools . Get ( ) for pair in pairs : encoded_value = codec . encode_field ( value_field , pair . value ) result . set_unrecognized_field ( pair . key , encoded_value , value_variant ) setattr ( result , source , [ ] ) return result | Remap unknown fields in message out of message . source . |
18,909 | def _SafeEncodeBytes ( field , value ) : try : if field . repeated : result = [ base64 . urlsafe_b64encode ( byte ) for byte in value ] else : result = base64 . urlsafe_b64encode ( value ) complete = True except TypeError : result = value complete = False return CodecResult ( value = result , complete = complete ) | Encode the bytes in value as urlsafe base64 . |
18,910 | def _SafeDecodeBytes ( unused_field , value ) : try : result = base64 . urlsafe_b64decode ( str ( value ) ) complete = True except TypeError : result = value complete = False return CodecResult ( value = result , complete = complete ) | Decode the urlsafe base64 value into bytes . |
18,911 | def _ProcessUnknownEnums ( message , encoded_message ) : if not encoded_message : return message decoded_message = json . loads ( six . ensure_str ( encoded_message ) ) for field in message . all_fields ( ) : if ( isinstance ( field , messages . EnumField ) and field . name in decoded_message and message . get_assigned_value ( field . name ) is None ) : message . set_unrecognized_field ( field . name , decoded_message [ field . name ] , messages . Variant . ENUM ) return message | Add unknown enum values from encoded_message as unknown fields . |
18,912 | def _ProcessUnknownMessages ( message , encoded_message ) : if not encoded_message : return message decoded_message = json . loads ( six . ensure_str ( encoded_message ) ) message_fields = [ x . name for x in message . all_fields ( ) ] + list ( message . all_unrecognized_fields ( ) ) missing_fields = [ x for x in decoded_message . keys ( ) if x not in message_fields ] for field_name in missing_fields : message . set_unrecognized_field ( field_name , decoded_message [ field_name ] , messages . Variant . STRING ) return message | Store any remaining unknown fields as strings . |
18,913 | def AddCustomJsonEnumMapping ( enum_type , python_name , json_name , package = None ) : if not issubclass ( enum_type , messages . Enum ) : raise exceptions . TypecheckError ( 'Cannot set JSON enum mapping for non-enum "%s"' % enum_type ) if python_name not in enum_type . names ( ) : raise exceptions . InvalidDataError ( 'Enum value %s not a value for type %s' % ( python_name , enum_type ) ) field_mappings = _JSON_ENUM_MAPPINGS . setdefault ( enum_type , { } ) _CheckForExistingMappings ( 'enum' , enum_type , python_name , json_name ) field_mappings [ python_name ] = json_name | Add a custom wire encoding for a given enum value . |
18,914 | def AddCustomJsonFieldMapping ( message_type , python_name , json_name , package = None ) : if not issubclass ( message_type , messages . Message ) : raise exceptions . TypecheckError ( 'Cannot set JSON field mapping for ' 'non-message "%s"' % message_type ) try : _ = message_type . field_by_name ( python_name ) except KeyError : raise exceptions . InvalidDataError ( 'Field %s not recognized for type %s' % ( python_name , message_type ) ) field_mappings = _JSON_FIELD_MAPPINGS . setdefault ( message_type , { } ) _CheckForExistingMappings ( 'field' , message_type , python_name , json_name ) field_mappings [ python_name ] = json_name | Add a custom wire encoding for a given message field . |
18,915 | def GetCustomJsonEnumMapping ( enum_type , python_name = None , json_name = None ) : return _FetchRemapping ( enum_type , 'enum' , python_name = python_name , json_name = json_name , mappings = _JSON_ENUM_MAPPINGS ) | Return the appropriate remapping for the given enum or None . |
18,916 | def GetCustomJsonFieldMapping ( message_type , python_name = None , json_name = None ) : return _FetchRemapping ( message_type , 'field' , python_name = python_name , json_name = json_name , mappings = _JSON_FIELD_MAPPINGS ) | Return the appropriate remapping for the given field or None . |
18,917 | def _FetchRemapping ( type_name , mapping_type , python_name = None , json_name = None , mappings = None ) : if python_name and json_name : raise exceptions . InvalidDataError ( 'Cannot specify both python_name and json_name ' 'for %s remapping' % mapping_type ) if not ( python_name or json_name ) : raise exceptions . InvalidDataError ( 'Must specify either python_name or json_name for %s remapping' % ( mapping_type , ) ) field_remappings = mappings . get ( type_name , { } ) if field_remappings : if python_name : return field_remappings . get ( python_name ) elif json_name : if json_name in list ( field_remappings . values ( ) ) : return [ k for k in field_remappings if field_remappings [ k ] == json_name ] [ 0 ] return None | Common code for fetching a key or value from a remapping dict . |
18,918 | def _CheckForExistingMappings ( mapping_type , message_type , python_name , json_name ) : if mapping_type == 'field' : getter = GetCustomJsonFieldMapping elif mapping_type == 'enum' : getter = GetCustomJsonEnumMapping remapping = getter ( message_type , python_name = python_name ) if remapping is not None and remapping != json_name : raise exceptions . InvalidDataError ( 'Cannot add mapping for %s "%s", already mapped to "%s"' % ( mapping_type , python_name , remapping ) ) remapping = getter ( message_type , json_name = json_name ) if remapping is not None and remapping != python_name : raise exceptions . InvalidDataError ( 'Cannot add mapping for %s "%s", already mapped to "%s"' % ( mapping_type , json_name , remapping ) ) | Validate that no mappings exist for the given values . |
18,919 | def _AsMessageList ( msg ) : from apitools . base . py import extra_types def _IsRepeatedJsonValue ( msg ) : if isinstance ( msg , extra_types . JsonArray ) : return True if isinstance ( msg , extra_types . JsonValue ) and msg . array_value : return True return False if not _IsRepeatedJsonValue ( msg ) : raise ValueError ( 'invalid argument to _AsMessageList' ) if isinstance ( msg , extra_types . JsonValue ) : msg = msg . array_value if isinstance ( msg , extra_types . JsonArray ) : msg = msg . entries return msg | Convert the provided list - as - JsonValue to a list . |
18,920 | def _IsMap ( message , field ) : value = message . get_assigned_value ( field . name ) if not isinstance ( value , messages . Message ) : return False try : additional_properties = value . field_by_name ( 'additionalProperties' ) except KeyError : return False else : return additional_properties . repeated | Returns whether the field is actually a map - type . |
18,921 | def UnrecognizedFieldIter ( message , _edges = ( ) ) : if not isinstance ( message , messages . Message ) : return field_names = message . all_unrecognized_fields ( ) if field_names : yield _edges , field_names return for field in message . all_fields ( ) : value = message . get_assigned_value ( field . name ) if field . repeated : for i , item in enumerate ( value ) : repeated_edge = ProtoEdge ( EdgeType . REPEATED , field . name , i ) iter_ = UnrecognizedFieldIter ( item , _edges + ( repeated_edge , ) ) for ( e , y ) in iter_ : yield e , y elif _IsMap ( message , field ) : for key , item in _MapItems ( message , field ) : map_edge = ProtoEdge ( EdgeType . MAP , field . name , key ) iter_ = UnrecognizedFieldIter ( item , _edges + ( map_edge , ) ) for ( e , y ) in iter_ : yield e , y else : scalar_edge = ProtoEdge ( EdgeType . SCALAR , field . name , None ) iter_ = UnrecognizedFieldIter ( value , _edges + ( scalar_edge , ) ) for ( e , y ) in iter_ : yield e , y | Yields the locations of unrecognized fields within message . |
18,922 | def decode_field ( self , field , value ) : for decoder in _GetFieldCodecs ( field , 'decoder' ) : result = decoder ( field , value ) value = result . value if result . complete : return value if isinstance ( field , messages . MessageField ) : field_value = self . decode_message ( field . message_type , json . dumps ( value ) ) elif isinstance ( field , messages . EnumField ) : value = GetCustomJsonEnumMapping ( field . type , json_name = value ) or value try : field_value = super ( _ProtoJsonApiTools , self ) . decode_field ( field , value ) except messages . DecodeError : if not isinstance ( value , six . string_types ) : raise field_value = None else : field_value = super ( _ProtoJsonApiTools , self ) . decode_field ( field , value ) return field_value | Decode the given JSON value . |
18,923 | def encode_field ( self , field , value ) : for encoder in _GetFieldCodecs ( field , 'encoder' ) : result = encoder ( field , value ) value = result . value if result . complete : return value if isinstance ( field , messages . EnumField ) : if field . repeated : remapped_value = [ GetCustomJsonEnumMapping ( field . type , python_name = e . name ) or e . name for e in value ] else : remapped_value = GetCustomJsonEnumMapping ( field . type , python_name = value . name ) if remapped_value : return remapped_value if ( isinstance ( field , messages . MessageField ) and not isinstance ( field , message_types . DateTimeField ) ) : value = json . loads ( self . encode_message ( value ) ) return super ( _ProtoJsonApiTools , self ) . encode_field ( field , value ) | Encode the given value as JSON . |
18,924 | def load_class_by_path ( taskpath ) : return getattr ( importlib . import_module ( re . sub ( r"\.[^.]+$" , "" , taskpath ) ) , re . sub ( r"^.*\." , "" , taskpath ) ) | Given a taskpath returns the main task class . |
18,925 | def queue_raw_jobs ( queue , params_list , ** kwargs ) : from . queue import Queue queue_obj = Queue ( queue ) queue_obj . enqueue_raw_jobs ( params_list , ** kwargs ) | Queue some jobs on a raw queue |
18,926 | def queue_jobs ( main_task_path , params_list , queue = None , batch_size = 1000 ) : if len ( params_list ) == 0 : return [ ] if queue is None : task_def = context . get_current_config ( ) . get ( "tasks" , { } ) . get ( main_task_path ) or { } queue = task_def . get ( "queue" , "default" ) from . queue import Queue queue_obj = Queue ( queue ) if queue_obj . is_raw : raise Exception ( "Can't queue regular jobs on a raw queue" ) all_ids = [ ] for params_group in group_iter ( params_list , n = batch_size ) : context . metric ( "jobs.status.queued" , len ( params_group ) ) job_ids = Job . insert ( [ { "path" : main_task_path , "params" : params , "queue" : queue , "datequeued" : datetime . datetime . utcnow ( ) , "status" : "queued" } for params in params_group ] , w = 1 , return_jobs = False ) all_ids += job_ids queue_obj . notify ( len ( all_ids ) ) set_queues_size ( { queue : len ( all_ids ) } ) return all_ids | Queue multiple jobs on a regular queue |
18,927 | def fetch ( self , start = False , full_data = True ) : if self . id is None : return self if full_data is True : fields = None elif isinstance ( full_data , dict ) : fields = full_data else : fields = { "_id" : 0 , "path" : 1 , "params" : 1 , "status" : 1 , "retry_count" : 1 , } if start : self . datestarted = datetime . datetime . utcnow ( ) self . set_data ( self . collection . find_and_modify ( { "_id" : self . id , "status" : { "$nin" : [ "cancel" , "abort" , "maxretries" ] } } , { "$set" : { "status" : "started" , "datestarted" : self . datestarted , "worker" : self . worker . id } , "$unset" : { "dateexpires" : 1 } } , projection = fields ) ) context . metric ( "jobs.status.started" ) else : self . set_data ( self . collection . find_one ( { "_id" : self . id } , projection = fields ) ) if self . data is None : context . log . info ( "Job %s not found in MongoDB or status was cancelled!" % self . id ) self . stored = True return self | Get the current job data and possibly flag it as started . |
18,928 | def save ( self ) : if not self . saved and self . data and "progress" in self . data : self . collection . update ( { "_id" : self . id } , { "$set" : { "progress" : self . data [ "progress" ] } } ) self . saved = True | Persists the current job metadata to MongoDB . Will be called at each worker report . |
18,929 | def insert ( cls , jobs_data , queue = None , statuses_no_storage = None , return_jobs = True , w = None , j = None ) : now = datetime . datetime . utcnow ( ) for data in jobs_data : if data [ "status" ] == "started" : data [ "datestarted" ] = now no_storage = ( statuses_no_storage is not None ) and ( "started" in statuses_no_storage ) if no_storage and return_jobs : for data in jobs_data : data [ "_id" ] = ObjectId ( ) else : inserted = context . connections . mongodb_jobs . mrq_jobs . insert ( jobs_data , manipulate = True , w = w , j = j ) if return_jobs : jobs = [ ] for data in jobs_data : job = cls ( data [ "_id" ] , queue = queue ) job . set_data ( data ) job . statuses_no_storage = statuses_no_storage job . stored = ( not no_storage ) if data [ "status" ] == "started" : job . datestarted = data [ "datestarted" ] jobs . append ( job ) return jobs else : return inserted | Insert a job into MongoDB |
18,930 | def _attach_original_exception ( self , exc ) : original_exception = sys . exc_info ( ) if original_exception [ 0 ] is not None : exc . original_exception = original_exception | Often a retry will be raised inside an except block . This Keep track of the first exception for debugging purposes |
18,931 | def retry ( self , queue = None , delay = None , max_retries = None ) : max_retries = max_retries if max_retries is None : max_retries = self . max_retries if self . data . get ( "retry_count" , 0 ) >= max_retries : raise MaxRetriesInterrupt ( ) exc = RetryInterrupt ( ) exc . queue = queue or self . queue or self . data . get ( "queue" ) or "default" exc . retry_count = self . data . get ( "retry_count" , 0 ) + 1 exc . delay = delay if exc . delay is None : exc . delay = self . retry_delay self . _attach_original_exception ( exc ) raise exc | Marks the current job as needing to be retried . Interrupts it . |
18,932 | def requeue ( self , queue = None , retry_count = 0 ) : if not queue : if not self . data or not self . data . get ( "queue" ) : self . fetch ( full_data = { "_id" : 0 , "queue" : 1 , "path" : 1 } ) queue = self . data [ "queue" ] self . _save_status ( "queued" , updates = { "queue" : queue , "datequeued" : datetime . datetime . utcnow ( ) , "retry_count" : retry_count } ) | Requeues the current job . Doesn t interrupt it |
18,933 | def perform ( self ) : if self . data is None : return context . log . debug ( "Starting %s(%s)" % ( self . data [ "path" ] , self . data [ "params" ] ) ) task_class = load_class_by_path ( self . data [ "path" ] ) self . task = task_class ( ) self . task . is_main_task = True if not self . task . max_concurrency : result = self . task . run_wrapped ( self . data [ "params" ] ) else : if self . task . max_concurrency > 1 : raise NotImplementedError ( ) lock = None try : lock = context . connections . redis . lock ( self . redis_max_concurrency_key , timeout = self . timeout + 5 ) if not lock . acquire ( blocking = True , blocking_timeout = 0 ) : raise MaxConcurrencyInterrupt ( ) result = self . task . run_wrapped ( self . data [ "params" ] ) finally : try : if lock : lock . release ( ) except LockError : pass self . save_success ( result ) if context . get_current_config ( ) . get ( "trace_greenlets" ) : gevent . sleep ( 0 ) current_greenlet = gevent . getcurrent ( ) t = ( datetime . datetime . utcnow ( ) - self . datestarted ) . total_seconds ( ) context . log . debug ( "Job %s success: %0.6fs total, %0.6fs in greenlet, %s switches" % ( self . id , t , current_greenlet . _trace_time , current_greenlet . _trace_switches - 1 ) ) else : context . log . debug ( "Job %s success: %0.6fs total" % ( self . id , ( datetime . datetime . utcnow ( ) - self . datestarted ) . total_seconds ( ) ) ) return result | Loads and starts the main task for this job the saves the result . |
18,934 | def wait ( self , poll_interval = 1 , timeout = None , full_data = False ) : end_time = None if timeout : end_time = time . time ( ) + timeout while end_time is None or time . time ( ) < end_time : job_data = self . collection . find_one ( { "_id" : ObjectId ( self . id ) , "status" : { "$nin" : [ "started" , "queued" ] } } , projection = ( { "_id" : 0 , "result" : 1 , "status" : 1 } if not full_data else None ) ) if job_data : return job_data time . sleep ( poll_interval ) raise Exception ( "Waited for job result for %s seconds, timeout." % timeout ) | Wait for this job to finish . |
18,935 | def kill ( self , block = False , reason = "unknown" ) : current_greenletid = id ( gevent . getcurrent ( ) ) trace = "Job killed: %s" % reason for greenlet , job in context . _GLOBAL_CONTEXT [ "greenlets" ] . values ( ) : greenletid = id ( greenlet ) if job and job . id == self . id and greenletid != current_greenletid : greenlet . kill ( block = block ) trace += "\n\n--- Greenlet %s ---\n" % greenletid trace += "" . join ( traceback . format_stack ( greenlet . gr_frame ) ) context . _GLOBAL_CONTEXT [ "greenlets" ] . pop ( greenletid , None ) if reason == "timeout" and self . data [ "status" ] != "timeout" : updates = { "exceptiontype" : "TimeoutInterrupt" , "traceback" : trace } self . _save_status ( "timeout" , updates = updates , exception = False ) | Forcefully kill all greenlets associated with this job |
18,936 | def _save_traceback_history ( self , status , trace , job_exc ) : failure_date = datetime . datetime . utcnow ( ) new_history = { "date" : failure_date , "status" : status , "exceptiontype" : job_exc . __name__ } traces = trace . split ( "---- Original exception: -----" ) if len ( traces ) > 1 : new_history [ "original_traceback" ] = traces [ 1 ] worker = context . get_current_worker ( ) if worker : new_history [ "worker" ] = worker . id new_history [ "traceback" ] = traces [ 0 ] self . collection . update ( { "_id" : self . id } , { "$push" : { "traceback_history" : new_history } } ) | Create traceback history or add a new traceback to history . |
18,937 | def trace_memory_clean_caches ( self ) : urllib . parse . clear_cache ( ) re . purge ( ) linecache . clearcache ( ) copyreg . clear_extension_cache ( ) if hasattr ( fnmatch , "purge" ) : fnmatch . purge ( ) elif hasattr ( fnmatch , "_purge" ) : fnmatch . _purge ( ) if hasattr ( encodings , "_cache" ) and len ( encodings . _cache ) > 0 : encodings . _cache = { } for handler in context . log . handlers : handler . flush ( ) | Avoid polluting results with some builtin python caches |
18,938 | def trace_memory_start ( self ) : self . trace_memory_clean_caches ( ) objgraph . show_growth ( limit = 30 ) gc . collect ( ) self . _memory_start = self . worker . get_memory ( ) [ "total" ] | Starts measuring memory consumption |
18,939 | def trace_memory_stop ( self ) : self . trace_memory_clean_caches ( ) objgraph . show_growth ( limit = 30 ) trace_type = context . get_current_config ( ) [ "trace_memory_type" ] if trace_type : filename = '%s/%s-%s.png' % ( context . get_current_config ( ) [ "trace_memory_output_dir" ] , trace_type , self . id ) chain = objgraph . find_backref_chain ( random . choice ( objgraph . by_type ( trace_type ) ) , objgraph . is_proper_module ) objgraph . show_chain ( chain , filename = filename ) del filename del chain gc . collect ( ) self . _memory_stop = self . worker . get_memory ( ) [ "total" ] diff = self . _memory_stop - self . _memory_start context . log . debug ( "Memory diff for job %s : %s" % ( self . id , diff ) ) self . collection . update ( { "_id" : self . id } , { "$set" : { "memory_diff" : diff } } , w = 1 ) | Stops measuring memory consumption |
18,940 | def list_job_ids ( self , skip = 0 , limit = 20 ) : return [ str ( x [ "_id" ] ) for x in self . collection . find ( { "status" : "queued" } , sort = [ ( "_id" , - 1 if self . is_reverse else 1 ) ] , projection = { "_id" : 1 } ) ] | Returns a list of job ids on a queue |
18,941 | def dequeue_jobs ( self , max_jobs = 1 , job_class = None , worker = None ) : if job_class is None : from . job import Job job_class = Job count = 0 sort_order = [ ( "datequeued" , - 1 if self . is_reverse else 1 ) , ( "_id" , - 1 if self . is_reverse else 1 ) ] for i in range ( max_jobs ) : query = self . base_dequeue_query job_data = self . collection . find_one_and_update ( query , { "$set" : { "status" : "started" , "datestarted" : datetime . datetime . utcnow ( ) , "worker" : worker . id if worker else None } , "$unset" : { "dateexpires" : 1 } } , sort = sort_order , return_document = ReturnDocument . AFTER , projection = { "_id" : 1 , "path" : 1 , "params" : 1 , "status" : 1 , "retry_count" : 1 , "queue" : 1 , "datequeued" : 1 } ) if not job_data : break if worker : worker . status = "spawn" count += 1 context . metric ( "queues.%s.dequeued" % job_data [ "queue" ] , 1 ) job = job_class ( job_data [ "_id" ] , queue = self . id , start = False ) job . set_data ( job_data ) job . datestarted = datetime . datetime . utcnow ( ) context . metric ( "jobs.status.started" ) yield job context . metric ( "queues.all.dequeued" , count ) | Fetch a maximum of max_jobs from this queue |
18,942 | def jsonify ( * args , ** kwargs ) : return Response ( json . dumps ( dict ( * args , ** kwargs ) , cls = MongoJSONEncoder ) , mimetype = 'application/json' ) | jsonify with support for MongoDB ObjectId |
18,943 | def queuestats ( self ) : start_time = time . time ( ) log . debug ( "Starting queue stats..." ) queues = [ Queue ( q ) for q in Queue . all_known ( ) ] new_queues = { queue . id for queue in queues } old_queues = set ( self . queue_etas . keys ( ) ) for deleted_queue in old_queues . difference ( new_queues ) : self . queue_etas . pop ( deleted_queue ) t = time . time ( ) stats = { } for queue in queues : cnt = queue . count_jobs_to_dequeue ( ) eta = self . queue_etas [ queue . id ] . next ( cnt , t = t ) stats [ queue . id ] = "%d %s %d" % ( cnt , eta if eta is not None else "N" , int ( t ) ) with connections . redis . pipeline ( transaction = True ) as pipe : if random . randint ( 0 , 100 ) == 0 or len ( stats ) == 0 : pipe . delete ( self . redis_queuestats_key ) if len ( stats ) > 0 : pipe . hmset ( self . redis_queuestats_key , stats ) pipe . execute ( ) log . debug ( "... done queue stats in %0.4fs" % ( time . time ( ) - start_time ) ) | Compute ETAs for every known queue & subqueue |
18,944 | def subpool_map ( pool_size , func , iterable ) : from . context import get_current_job , set_current_job , log if not pool_size : return [ func ( * args ) for args in iterable ] counter = itertools_count ( ) current_job = get_current_job ( ) def inner_func ( * args ) : next ( counter ) if current_job : set_current_job ( current_job ) try : ret = func ( * args ) except Exception as exc : trace = traceback . format_exc ( ) exc . subpool_traceback = trace raise if current_job : set_current_job ( None ) return ret def inner_iterable ( ) : if current_job : set_current_job ( current_job ) for x in iterable : yield x if current_job : set_current_job ( None ) start_time = time . time ( ) pool = gevent . pool . Pool ( size = pool_size ) ret = pool . map ( inner_func , inner_iterable ( ) ) pool . join ( raise_error = True ) total_time = time . time ( ) - start_time log . debug ( "SubPool ran %s greenlets in %0.6fs" % ( counter , total_time ) ) return ret | Starts a Gevent pool and run a map . Takes care of setting current_job and cleaning up . |
18,945 | def subpool_imap ( pool_size , func , iterable , flatten = False , unordered = False , buffer_size = None ) : from . context import get_current_job , set_current_job , log if not pool_size : for args in iterable : yield func ( * args ) counter = itertools_count ( ) current_job = get_current_job ( ) def inner_func ( * args ) : next ( counter ) if current_job : set_current_job ( current_job ) try : ret = func ( * args ) except Exception as exc : trace = traceback . format_exc ( ) exc . subpool_traceback = trace raise if current_job : set_current_job ( None ) return ret def inner_iterable ( ) : if current_job : set_current_job ( current_job ) for x in iterable : yield x if current_job : set_current_job ( None ) start_time = time . time ( ) pool = gevent . pool . Pool ( size = pool_size ) if unordered : iterator = pool . imap_unordered ( inner_func , inner_iterable ( ) , maxsize = buffer_size or pool_size ) else : iterator = pool . imap ( inner_func , inner_iterable ( ) ) for x in iterator : if flatten : for y in x : yield y else : yield x pool . join ( raise_error = True ) total_time = time . time ( ) - start_time log . debug ( "SubPool ran %s greenlets in %0.6fs" % ( counter , total_time ) ) | Generator version of subpool_map . Should be used with unordered = True for optimal performance |
18,946 | def _hash_task ( task ) : params = task . get ( "params" ) if params : params = json . dumps ( sorted ( list ( task [ "params" ] . items ( ) ) , key = lambda x : x [ 0 ] ) ) full = [ str ( task . get ( x ) ) for x in [ "path" , "interval" , "dailytime" , "weekday" , "monthday" , "queue" ] ] full . extend ( [ str ( params ) ] ) return " " . join ( full ) | Returns a unique hash for identify a task and its params |
18,947 | def check_config_integrity ( self ) : tasks_by_hash = { _hash_task ( t ) : t for t in self . config_tasks } if len ( tasks_by_hash ) != len ( self . config_tasks ) : raise Exception ( "Fatal error: there was a hash duplicate in the scheduled tasks config." ) for h , task in tasks_by_hash . items ( ) : if task . get ( "monthday" ) and not task . get ( "dailytime" ) : raise Exception ( "Fatal error: you can't schedule a task with 'monthday' and without 'dailytime' (%s)" % h ) if task . get ( "weekday" ) and not task . get ( "dailytime" ) : raise Exception ( "Fatal error: you can't schedule a task with 'weekday' and without 'dailytime' (%s)" % h ) if not task . get ( "monthday" ) and not task . get ( "weekday" ) and not task . get ( "dailytime" ) and not task . get ( "interval" ) : raise Exception ( "Fatal error: scheduler must be specified one of monthday,weekday,dailytime,interval. (%s)" % h ) | Make sure the scheduler config is valid |
18,948 | def sync_config_tasks ( self ) : tasks_by_hash = { _hash_task ( t ) : t for t in self . config_tasks } for task in self . all_tasks : if tasks_by_hash . get ( task [ "hash" ] ) : del tasks_by_hash [ task [ "hash" ] ] else : self . collection . remove ( { "_id" : task [ "_id" ] } ) log . debug ( "Scheduler: deleted %s" % task [ "hash" ] ) for h , task in tasks_by_hash . items ( ) : task [ "hash" ] = h task [ "datelastqueued" ] = datetime . datetime . fromtimestamp ( 0 ) if task . get ( "dailytime" ) : task [ "dailytime" ] = datetime . datetime . combine ( datetime . datetime . utcnow ( ) , task [ "dailytime" ] ) task [ "interval" ] = 3600 * 24 if datetime . datetime . utcnow ( ) . time ( ) > task [ "dailytime" ] . time ( ) : task [ "datelastqueued" ] = datetime . datetime . utcnow ( ) self . collection . find_one_and_update ( { "hash" : task [ "hash" ] } , { "$set" : task } , upsert = True ) log . debug ( "Scheduler: added %s" % task [ "hash" ] ) | Performs the first sync of a list of tasks often defined in the config file . |
18,949 | def ratelimit ( key , limit , per = 1 , redis = None ) : if redis is None : redis = connections . redis now = int ( time . time ( ) ) k = "ratelimit:%s:%s" % ( key , now // per ) with redis . pipeline ( transaction = True ) as pipeline : pipeline . incr ( k , 1 ) pipeline . expire ( k , per + 10 ) value = pipeline . execute ( ) current = int ( value [ 0 ] ) - 1 if current >= limit : return 0 else : return limit - current | Returns an integer with the number of available actions for the current period in seconds . If zero rate was already reached . |
18,950 | def greenlet_logs ( self ) : while True : try : self . flush_logs ( ) except Exception as e : self . log . error ( "When flushing logs: %s" % e ) finally : time . sleep ( self . config [ "report_interval" ] ) | This greenlet always runs in background to update current logs in MongoDB every 10 seconds . |
18,951 | def refresh_queues ( self , fatal = False ) : try : queues = [ ] prefixes = [ q for q in self . config [ "queues" ] if q . endswith ( "/" ) ] known_subqueues = Queue . all_known ( prefixes = prefixes ) for q in self . config [ "queues" ] : queues . append ( Queue ( q ) ) if q . endswith ( "/" ) : for subqueue in known_subqueues : if subqueue . startswith ( q ) : queues . append ( Queue ( subqueue ) ) self . queues = queues except Exception as e : self . log . error ( "When refreshing subqueues: %s" , e ) if fatal : raise | Updates the list of currently known queues and subqueues |
18,952 | def get_worker_report ( self , with_memory = False ) : greenlets = [ ] for greenlet in list ( self . gevent_pool ) : g = { } short_stack = [ ] stack = traceback . format_stack ( greenlet . gr_frame ) for s in stack [ 1 : ] : if "/gevent/hub.py" in s : break short_stack . append ( s ) g [ "stack" ] = short_stack job = get_current_job ( id ( greenlet ) ) if job : job . save ( ) if job . data : g [ "path" ] = job . data [ "path" ] g [ "datestarted" ] = job . datestarted g [ "id" ] = str ( job . id ) g [ "time" ] = getattr ( greenlet , "_trace_time" , 0 ) g [ "switches" ] = getattr ( greenlet , "_trace_switches" , None ) if job . _current_io is not None : g [ "io" ] = job . _current_io greenlets . append ( g ) if ( not with_memory ) or ( self . config [ "add_network_latency" ] != "0" and self . config [ "add_network_latency" ] ) : cpu = { "user" : 0 , "system" : 0 , "percent" : 0 } mem = { "rss" : 0 , "swap" : 0 , "total" : 0 } else : cpu_times = self . process . cpu_times ( ) cpu = { "user" : cpu_times . user , "system" : cpu_times . system , "percent" : self . process . cpu_percent ( 0 ) } mem = self . get_memory ( ) whitelisted_config = [ "max_jobs" , "max_memory" "greenlets" , "processes" , "queues" , "dequeue_strategy" , "scheduler" , "name" , "local_ip" , "external_ip" , "agent_id" , "worker_group" ] io = None if self . _traced_io : io = { } for k , v in iteritems ( self . _traced_io ) : if k == "total" : io [ k ] = v else : io [ k ] = sorted ( list ( v . items ( ) ) , reverse = True , key = lambda x : x [ 1 ] ) used_pool_slots = len ( self . gevent_pool ) used_avg = self . pool_usage_average . next ( used_pool_slots ) return { "status" : self . status , "config" : { k : v for k , v in iteritems ( self . config ) if k in whitelisted_config } , "done_jobs" : self . done_jobs , "usage_avg" : used_avg / self . pool_size , "datestarted" : self . datestarted , "datereported" : datetime . datetime . utcnow ( ) , "name" : self . name , "io" : io , "_id" : str ( self . id ) , "process" : { "pid" : self . process . pid , "cpu" : cpu , "mem" : mem } , "jobs" : greenlets } | Returns a dict containing all the data we can about the current status of the worker and its jobs . |
18,953 | def greenlet_timeouts ( self ) : while True : now = datetime . datetime . utcnow ( ) for greenlet in list ( self . gevent_pool ) : job = get_current_job ( id ( greenlet ) ) if job and job . timeout and job . datestarted : expires = job . datestarted + datetime . timedelta ( seconds = job . timeout ) if now > expires : job . kill ( block = False , reason = "timeout" ) time . sleep ( 1 ) | This greenlet kills jobs in other greenlets if they timeout . |
18,954 | def wait_for_idle ( self ) : while True : time . sleep ( 0.01 ) with self . work_lock : if self . status != "wait" : continue if len ( self . gevent_pool ) > 0 : continue self . refresh_queues ( ) outcome , dequeue_jobs = self . work_once ( free_pool_slots = 1 , max_jobs = None ) if outcome is "wait" and dequeue_jobs == 0 : break | Waits until the worker has nothing more to do . Very useful in tests |
18,955 | def work_once ( self , free_pool_slots = 1 , max_jobs = None ) : dequeued_jobs = 0 available_queues = [ queue for queue in self . queues if queue . root_id not in self . paused_queues and queue . id not in self . paused_queues ] for queue_i in range ( len ( available_queues ) ) : queue = available_queues [ ( queue_i + self . queue_offset ) % len ( available_queues ) ] max_jobs_per_queue = free_pool_slots - dequeued_jobs if max_jobs_per_queue <= 0 : queue_i -= 1 break if self . config [ "dequeue_strategy" ] == "parallel" : max_jobs_per_queue = max ( 1 , int ( max_jobs_per_queue / ( len ( available_queues ) - queue_i ) ) ) for job in queue . dequeue_jobs ( max_jobs = max_jobs_per_queue , job_class = self . job_class , worker = self ) : dequeued_jobs += 1 self . gevent_pool . spawn ( self . perform_job , job ) if self . config [ "dequeue_strategy" ] == "parallel" : self . queue_offset = ( self . queue_offset + queue_i + 1 ) % len ( self . queues ) if max_jobs and self . done_jobs >= max_jobs : self . log . info ( "Reached max_jobs=%s" % self . done_jobs ) return "break" , dequeued_jobs if dequeued_jobs == 0 : if self . config [ "dequeue_strategy" ] == "burst" : self . log . info ( "Burst mode: stopping now because queues were empty" ) return "break" , dequeued_jobs return "wait" , dequeued_jobs return None , dequeued_jobs | Does one lookup for new jobs inside the inner work loop |
18,956 | def work_wait ( self ) : if len ( self . queues_with_notify ) > 0 : connections . redis . blpop ( * ( self . queues_with_notify + [ max ( 1 , int ( self . config [ "max_latency" ] ) ) ] ) ) else : gevent . sleep ( self . config [ "max_latency" ] ) | Wait for new jobs to arrive |
18,957 | def serialize_job_ids ( self , job_ids ) : if len ( job_ids ) == 0 or self . use_large_ids : return job_ids elif isinstance ( job_ids [ 0 ] , ObjectId ) : return [ x . binary for x in job_ids ] else : return [ bytes . fromhex ( str ( x ) ) for x in job_ids ] | Returns job_ids serialized for storage in Redis |
18,958 | def unserialize_job_ids ( self , job_ids ) : if len ( job_ids ) == 0 or self . use_large_ids : return job_ids else : return [ binascii . hexlify ( x . encode ( 'utf-8' ) if ( PY3 and isinstance ( x , str ) ) else x ) . decode ( 'ascii' ) for x in job_ids ] | Unserialize job_ids stored in Redis |
18,959 | def all_active ( cls ) : prefix = context . get_current_config ( ) [ "redis_prefix" ] queues = [ ] for key in context . connections . redis . keys ( ) : if key . startswith ( prefix ) : queues . append ( Queue ( key [ len ( prefix ) + 3 : ] ) ) return queues | List active queues based on their lengths in Redis . Warning uses the unscalable KEYS redis command |
18,960 | def all_known ( cls , sources = None , prefixes = None ) : sources = sources or ( "config" , "jobs" , "raw_subqueues" ) queues = set ( ) if "config" in sources and not prefixes : cfg = context . get_current_config ( ) queues_from_config = [ t . get ( "queue" ) for t in ( cfg . get ( "tasks" ) or { } ) . values ( ) if t . get ( "queue" ) ] queues_from_config += Queue . get_queues_config ( ) . keys ( ) queues_from_config += [ t . get ( "retry_queue" ) for t in Queue . get_queues_config ( ) . values ( ) if t . get ( "retry_queue" ) ] queues |= set ( queues_from_config ) if "jobs" in sources : for q in context . connections . mongodb_jobs . mrq_jobs . distinct ( "queue" ) : if prefixes and not any ( q . startswith ( p ) for p in prefixes ) : continue queues . add ( q ) if "raw_subqueues" in sources : for q in Queue . get_queues_config ( ) : if prefixes and not any ( q + "/" == p for p in prefixes ) : continue queue_obj = Queue ( q ) if queue_obj . is_raw and queue_obj . has_subqueues : queues |= queue_obj . get_known_subqueues ( ) return queues | List all currently known queues |
18,961 | def all ( cls ) : queues = { x : 0 for x in Queue . get_queues_config ( ) } stats = list ( context . connections . mongodb_jobs . mrq_jobs . aggregate ( [ { "$match" : { "status" : "queued" } } , { "$group" : { "_id" : "$queue" , "jobs" : { "$sum" : 1 } } } ] ) ) queues . update ( { x [ "_id" ] : x [ "jobs" ] for x in stats } ) return queues | List all queues in MongoDB via aggregation with their queued jobs counts . Might be slow . |
18,962 | def notify ( self , new_jobs_count ) : if not self . use_notify ( ) : return count = min ( new_jobs_count , 100 ) notify_key = redis_key ( "notify" , self ) context . connections . redis . lpush ( notify_key , * ( [ 1 ] * count ) ) context . connections . redis . expire ( notify_key , max ( 1 , int ( context . get_current_config ( ) [ "max_latency" ] * 2 ) ) ) | We just queued new_jobs_count jobs on this queue wake up the workers if needed |
18,963 | def empty ( self ) : with context . connections . redis . pipeline ( transaction = True ) as pipe : pipe . delete ( self . redis_key ) pipe . delete ( self . redis_key_known_subqueues ) pipe . execute ( ) | Empty a queue . |
18,964 | def enqueue_raw_jobs ( self , params_list ) : if len ( params_list ) == 0 : return if self . is_subqueue : context . connections . redis . sadd ( self . redis_key_known_subqueues , self . id ) if self . is_sorted : if not isinstance ( params_list , dict ) and self . is_timed : now = time . time ( ) params_list = { x : now for x in params_list } context . connections . redis . zadd ( self . redis_key , ** params_list ) elif self . is_set : context . connections . redis . sadd ( self . redis_key , * params_list ) else : context . connections . redis . rpush ( self . redis_key , * params_list ) context . metric ( "queues.%s.enqueued" % self . id , len ( params_list ) ) context . metric ( "queues.all.enqueued" , len ( params_list ) ) | Add Jobs to this queue with raw parameters . They are not yet in MongoDB . |
18,965 | def remove_raw_jobs ( self , params_list ) : if len ( params_list ) == 0 : return if self . is_sorted : context . connections . redis . zrem ( self . redis_key , * iter ( params_list ) ) elif self . is_set : context . connections . redis . srem ( self . redis_key , * params_list ) else : for k in params_list : context . connections . redis . lrem ( self . redis_key , 1 , k ) context . metric ( "queues.%s.removed" % self . id , len ( params_list ) ) context . metric ( "queues.all.removed" , len ( params_list ) ) | Remove jobs from a raw queue with their raw params . |
18,966 | def count_jobs_to_dequeue ( self ) : if self . is_timed : return context . connections . redis . zcount ( self . redis_key , "-inf" , time . time ( ) ) else : return self . size ( ) | Returns the number of jobs that can be dequeued right now from the queue . |
18,967 | def get_sorted_graph ( self , start = 0 , stop = 100 , slices = 100 , include_inf = False , exact = False ) : if not self . is_sorted : raise Exception ( "Not a sorted queue" ) with context . connections . redis . pipeline ( transaction = exact ) as pipe : interval = old_div ( float ( stop - start ) , slices ) for i in range ( 0 , slices ) : pipe . zcount ( self . redis_key , ( start + i * interval ) , "(%s" % ( start + ( i + 1 ) * interval ) ) if include_inf : pipe . zcount ( self . redis_key , stop , "+inf" ) pipe . zcount ( self . redis_key , "-inf" , "(%s" % start ) data = pipe . execute ( ) if include_inf : return data [ - 1 : ] + data [ : - 1 ] return data | Returns a graph of the distribution of jobs in a sorted set |
18,968 | def install_signal_handlers ( self ) : self . graceful_stop = False def request_shutdown_now ( ) : self . shutdown_now ( ) def request_shutdown_graceful ( ) : if self . graceful_stop : self . shutdown_now ( ) else : self . graceful_stop = True self . shutdown_graceful ( ) gevent . signal ( signal . SIGINT , request_shutdown_graceful ) gevent . signal ( signal . SIGTERM , request_shutdown_now ) | Handle events like Ctrl - C from the command line . |
18,969 | def set_commands ( self , commands , timeout = None ) : self . desired_commands = commands target_commands = list ( self . desired_commands ) for process in list ( self . processes ) : found = False for i in range ( len ( target_commands ) ) : if process [ "command" ] == target_commands [ i ] : target_commands . pop ( i ) found = True break if not found : self . stop_process ( process , timeout ) for command in target_commands : self . spawn ( command ) | Sets the processes desired commands for this pool and manages diff to reach that state |
18,970 | def spawn ( self , command ) : env = dict ( os . environ ) env [ "MRQ_IS_SUBPROCESS" ] = "1" env . update ( self . extra_env or { } ) parts = shlex . split ( command ) for p in list ( parts ) : if "=" in p : env [ p . split ( "=" ) [ 0 ] ] = p [ len ( p . split ( "=" ) [ 0 ] ) + 1 : ] parts . pop ( 0 ) else : break p = subprocess . Popen ( parts , shell = False , close_fds = True , env = env , cwd = os . getcwd ( ) ) self . processes . append ( { "subprocess" : p , "pid" : p . pid , "command" : command , "psutil" : psutil . Process ( pid = p . pid ) } ) | Spawns a new process and adds it to the pool |
18,971 | def wait ( self ) : while True : if not self . greenlet_watch : break if self . stopping : gevent . sleep ( 0.1 ) else : gevent . sleep ( 1 ) | Waits for the pool to be fully stopped |
18,972 | def watch_processes ( self ) : for process in list ( self . processes ) : self . watch_process ( process ) self . processes = [ p for p in self . processes if not p . get ( "dead" ) ] if self . stopping and len ( self . processes ) == 0 : self . stop_watch ( ) | Manages the status of all the known processes |
18,973 | def watch_process ( self , process ) : status = process [ "psutil" ] . status ( ) if process . get ( "terminate" ) : if status in ( "zombie" , "dead" ) : process [ "dead" ] = True elif process . get ( "terminate_at" ) : if time . time ( ) > ( process [ "terminate_at" ] + 5 ) : log . warning ( "Process %s had to be sent SIGKILL" % ( process [ "pid" ] , ) ) process [ "subprocess" ] . send_signal ( signal . SIGKILL ) elif time . time ( ) > process [ "terminate_at" ] : log . warning ( "Process %s had to be sent SIGTERM" % ( process [ "pid" ] , ) ) process [ "subprocess" ] . send_signal ( signal . SIGTERM ) else : if status in ( "zombie" , "dead" ) : process [ "dead" ] = True self . spawn ( process [ "command" ] ) elif status not in ( "running" , "sleeping" ) : log . warning ( "Process %s was in status %s" % ( process [ "pid" ] , status ) ) | Manages the status of a single process |
18,974 | def stop ( self , timeout = None ) : self . stopping = True for process in list ( self . processes ) : self . stop_process ( process , timeout = timeout ) | Initiates a graceful stop of the processes |
18,975 | def stop_process ( self , process , timeout = None ) : process [ "terminate" ] = True if timeout is not None : process [ "terminate_at" ] = time . time ( ) + timeout process [ "subprocess" ] . send_signal ( signal . SIGINT ) | Initiates a graceful stop of one process |
18,976 | def terminate ( self ) : for process in list ( self . processes ) : process [ "subprocess" ] . send_signal ( signal . SIGTERM ) self . stop_watch ( ) | Terminates the processes right now with a SIGTERM |
18,977 | def kill ( self ) : for process in list ( self . processes ) : process [ "subprocess" ] . send_signal ( signal . SIGKILL ) self . stop_watch ( ) | Kills the processes right now with a SIGKILL |
18,978 | def stop_watch ( self ) : if self . greenlet_watch : self . greenlet_watch . kill ( block = False ) self . greenlet_watch = None | Stops the periodic watch greenlet thus the pool itself |
18,979 | def send ( messages = None , conf = None , parse_mode = None , disable_web_page_preview = False , files = None , images = None , captions = None , locations = None , timeout = 30 ) : conf = expanduser ( conf ) if conf else get_config_path ( ) config = configparser . ConfigParser ( ) if not config . read ( conf ) or not config . has_section ( "telegram" ) : raise ConfigError ( "Config not found" ) missing_options = set ( [ "token" , "chat_id" ] ) - set ( config . options ( "telegram" ) ) if len ( missing_options ) > 0 : raise ConfigError ( "Missing options in config: {}" . format ( ", " . join ( missing_options ) ) ) token = config . get ( "telegram" , "token" ) chat_id = int ( config . get ( "telegram" , "chat_id" ) ) if config . get ( "telegram" , "chat_id" ) . isdigit ( ) else config . get ( "telegram" , "chat_id" ) request = telegram . utils . request . Request ( read_timeout = timeout ) bot = telegram . Bot ( token , request = request ) if parse_mode == "text" : parse_mode = None if messages : def send_message ( message ) : return bot . send_message ( chat_id = chat_id , text = message , parse_mode = parse_mode , disable_web_page_preview = disable_web_page_preview ) for m in messages : if len ( m ) > MAX_MESSAGE_LENGTH : warn ( markup ( "Message longer than MAX_MESSAGE_LENGTH=%d, splitting into smaller messages." % MAX_MESSAGE_LENGTH , "red" ) ) ms = split_message ( m , MAX_MESSAGE_LENGTH ) for m in ms : send_message ( m ) elif len ( m ) == 0 : continue else : send_message ( m ) if files : for f in files : bot . send_document ( chat_id = chat_id , document = f ) if images : if captions : captions += [ None ] * ( len ( images ) - len ( captions ) ) for ( i , c ) in zip ( images , captions ) : bot . send_photo ( chat_id = chat_id , photo = i , caption = c ) else : for i in images : bot . send_photo ( chat_id = chat_id , photo = i ) if locations : it = iter ( locations ) for loc in it : if "," in loc : lat , lon = loc . split ( "," ) else : lat = loc lon = next ( it ) bot . send_location ( chat_id = chat_id , latitude = float ( lat ) , longitude = float ( lon ) ) | Send data over Telegram . All arguments are optional . |
18,980 | def split_message ( message , max_length ) : ms = [ ] while len ( message ) > max_length : ms . append ( message [ : max_length ] ) message = message [ max_length : ] ms . append ( message ) return ms | Split large message into smaller messages each smaller than the max_length . |
18,981 | def handle_message ( self , msg ) : if msg . msg_id not in self . msg_types : self . report_message_type ( msg ) self . msg_types . add ( msg . msg_id ) self . tc . message ( 'inspection' , typeId = msg . msg_id , message = msg . msg , file = os . path . relpath ( msg . abspath ) . replace ( '\\' , '/' ) , line = str ( msg . line ) , SEVERITY = TC_SEVERITY . get ( msg . category ) ) | Issues an inspection service message based on a PyLint message . Registers each message type upon first encounter . |
18,982 | def display_reports ( self , layout ) : try : score = self . linter . stats [ 'global_note' ] except ( AttributeError , KeyError ) : pass else : self . tc . message ( 'buildStatisticValue' , key = 'PyLintScore' , value = str ( score ) ) | Issues the final PyLint score as a TeamCity build statistic value |
18,983 | def set_webhook_handler ( self , scope , callback ) : scope = scope . lower ( ) if scope == 'after_send' : self . _after_send = callback return if scope not in Page . WEBHOOK_ENDPOINTS : raise ValueError ( "The 'scope' argument must be one of {}." . format ( Page . WEBHOOK_ENDPOINTS ) ) self . _webhook_handlers [ scope ] = callback | Allows adding a webhook_handler as an alternative to the decorators |
18,984 | def getWindows ( input ) : with rasterio . open ( input ) as src : return [ [ window , ij ] for ij , window in src . block_windows ( ) ] | Get a source s windows |
18,985 | def read_function ( data , window , ij , g_args ) : output = ( data [ 0 ] > numpy . mean ( data [ 0 ] ) ) . astype ( data [ 0 ] . dtype ) * data [ 0 ] . max ( ) return output | Takes an array and sets any value above the mean to the max the rest to 0 |
18,986 | def tb_capture ( func ) : @ wraps ( func ) def wrapper ( * args , ** kwds ) : try : return func ( * args , ** kwds ) except Exception : raise MuchoChildError ( ) return wrapper | A decorator which captures worker tracebacks . |
18,987 | def init_worker ( inpaths , g_args ) : global global_args global srcs global_args = g_args srcs = [ rasterio . open ( i ) for i in inpaths ] | The multiprocessing worker initializer |
18,988 | def safe_serialize_type ( l ) : if isinstance ( l , str ) : return l elif isinstance ( l , list ) : return '%s_%s_' % ( l [ 0 ] , '' . join ( map ( safe_serialize_type , l [ 1 : ] ) ) ) else : return str ( l ) | serialize only with letters numbers and _ |
18,989 | def method_call ( receiver , message , args , pseudo_type = None ) : if not isinstance ( receiver , Node ) : receiver = local ( receiver ) return Node ( 'method_call' , receiver = receiver , message = message , args = args , pseudo_type = pseudo_type ) | A shortcut for a method call expands a str receiver to a identifier |
18,990 | def call ( function , args , pseudo_type = None ) : if not isinstance ( function , Node ) : function = local ( function ) return Node ( 'call' , function = function , args = args , pseudo_type = pseudo_type ) | A shortcut for a call with an identifier callee |
18,991 | def to_node ( value ) : if isinstance ( value , Node ) : return value elif isinstance ( value , str ) : return Node ( 'string' , value = value , pseudo_type = 'String' ) elif isinstance ( value , int ) : return Node ( 'int' , value = value , pseudo_type = 'Int' ) elif isinstance ( value , bool ) : return Node ( 'boolean' , value = str ( value ) . lower ( ) , pseudo_type = 'Boolean' ) elif isinstance ( value , float ) : return Node ( 'float' , value = value , pseudo_type = 'Float' ) elif value is None : return Node ( 'null' , pseudo_type = 'Void' ) else : 1 / 0 | Expand to a literal node if a basic type otherwise just returns the node |
18,992 | def to_op ( op , reversed = False ) : def transformer ( receiver , param , pseudo_type ) : if not reversed : return Node ( 'binary_op' , op = op , left = receiver , right = param , pseudo_type = pseudo_type ) return Node ( 'binary_op' , op = op , left = param , right = receiver , pseudo_type = pseudo_type ) return transformer | create a function that transforms a method to a binary op |
18,993 | def leaking ( self , z , module , name , node , context , * data ) : args = [ node . receiver ] + node . args if node . type == 'standard_method_call' else node . args z = z ( module , name , args ) if context == 'expression' : if isinstance ( z , NormalLeakingNode ) : leaked_nodes , exp = z . as_expression ( ) else : leaked_nodes , exp = z . as_expression ( ) zz = local ( z . temp_name ( getattr ( z , 'default' , '' ) ) , node . pseudo_type ) leaked_nodes = z . as_assignment ( zz ) exp = local ( zz , node . pseudo_type ) if exp is None or exp . pseudo_type == 'Void' : raise PseudoTypeError ( "pseudo can't handle values with void type in expression: %s?%s" % ( module , name ) ) self . leaked_nodes += leaked_nodes return exp elif context == 'assignment' : if isinstance ( z , NormalLeakingNode ) : leaked_nodes , exp = z . as_expression ( ) if exp is None or exp . pseudo_type == 'Void' : raise PseudoTypeError ( "pseudo can't handle values with void type in expression: %s?%s" % ( module , name ) ) self . leaked_nodes += leaked_nodes return assignment ( data [ 0 ] , exp ) else : self . leaked_nodes += z . as_assignment ( data [ 0 ] ) return None elif context == 'block' : leaked_nodes , exp = z . as_expression ( ) self . leaked_nodes += leaked_nodes return exp | an expression leaking ... |
18,994 | def _expand_api ( self , api , receiver , args , pseudo_type , equivalent ) : if callable ( api ) : if receiver : return api ( receiver , * ( args + [ pseudo_type ] ) ) else : return api ( * ( args + [ pseudo_type ] ) ) elif isinstance ( api , str ) : if '(' in api : call_api , arg_code = api [ : - 1 ] . split ( '(' ) new_args = [ self . _parse_part ( a . strip ( ) , receiver , args , equivalent ) for a in arg_code . split ( ',' ) ] else : call_api , arg_code = api , '' new_args = args if '#' in call_api : a , b = call_api . split ( '#' ) method_receiver = self . _parse_part ( a , receiver , args , equivalent ) if a else receiver return method_call ( method_receiver , b , new_args , pseudo_type = pseudo_type ) elif '.' in call_api : a , b = call_api . split ( '.' ) static_receiver = self . _parse_part ( a , receiver , args , equivalent ) if a else receiver if b [ - 1 ] != '!' : return Node ( 'static_call' , receiver = static_receiver , message = b , args = new_args , pseudo_type = pseudo_type ) else : return Node ( 'attr' , object = static_receiver , attr = b [ : - 1 ] , pseudo_type = pseudo_type ) else : if receiver : return call ( call_api , [ receiver ] + new_args , pseudo_type = pseudo_type ) else : return call ( call_api , new_args , pseudo_type = pseudo_type ) else : raise PseudoDSLError ( '%s not supported by api dsl' % str ( api ) ) | the heart of api translation dsl |
18,995 | def generate_main ( main , language ) : base = { 'type' : 'module' , 'custom_exceptions' : [ ] , 'definitions' : [ ] , 'constants' : [ ] , 'main' : [ ] , 'pseudo_type' : 'Void' } base_node = pseudo . loader . convert_to_syntax_tree ( base ) if isinstance ( main , dict ) : base [ 'main' ] = [ main ] elif isinstance ( main , list ) : if main and isinstance ( main [ 0 ] , dict ) : base [ 'main' ] = main else : base_node . main = main elif isinstance ( main , pseudo . pseudo_tree . Node ) : base_node . main = [ main ] if base [ 'main' ] : q = pseudo . loader . convert_to_syntax_tree ( base ) else : q = base_node return generate ( q , language ) | generate output code for main in language |
18,996 | def ported_string ( raw_data , encoding = 'utf-8' , errors = 'ignore' ) : if not raw_data : return six . text_type ( ) if isinstance ( raw_data , six . text_type ) : return raw_data . strip ( ) if six . PY2 : try : return six . text_type ( raw_data , encoding , errors ) . strip ( ) except LookupError : return six . text_type ( raw_data , "utf-8" , errors ) . strip ( ) if six . PY3 : try : return six . text_type ( raw_data , encoding ) . strip ( ) except ( LookupError , UnicodeDecodeError ) : return six . text_type ( raw_data , "utf-8" , errors ) . strip ( ) | Give as input raw data and output a str in Python 3 and unicode in Python 2 . |
18,997 | def decode_header_part ( header ) : if not header : return six . text_type ( ) output = six . text_type ( ) try : for d , c in decode_header ( header ) : c = c if c else 'utf-8' output += ported_string ( d , c , 'ignore' ) except ( HeaderParseError , UnicodeError ) : log . error ( "Failed decoding header part: {}" . format ( header ) ) output += header return output | Given an raw header returns an decoded header |
18,998 | def fingerprints ( data ) : Hashes = namedtuple ( 'Hashes' , "md5 sha1 sha256 sha512" ) if six . PY2 : if not isinstance ( data , str ) : data = data . encode ( "utf-8" ) elif six . PY3 : if not isinstance ( data , bytes ) : data = data . encode ( "utf-8" ) md5 = hashlib . md5 ( ) md5 . update ( data ) md5 = md5 . hexdigest ( ) sha1 = hashlib . sha1 ( ) sha1 . update ( data ) sha1 = sha1 . hexdigest ( ) sha256 = hashlib . sha256 ( ) sha256 . update ( data ) sha256 = sha256 . hexdigest ( ) sha512 = hashlib . sha512 ( ) sha512 . update ( data ) sha512 = sha512 . hexdigest ( ) return Hashes ( md5 , sha1 , sha256 , sha512 ) | This function return the fingerprints of data . |
18,999 | def msgconvert ( email ) : log . debug ( "Started converting Outlook email" ) temph , temp = tempfile . mkstemp ( prefix = "outlook_" ) command = [ "msgconvert" , "--outfile" , temp , email ] try : if six . PY2 : with open ( os . devnull , "w" ) as devnull : out = subprocess . Popen ( command , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = devnull ) elif six . PY3 : out = subprocess . Popen ( command , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . DEVNULL ) except OSError : message = "To use this function you must install 'msgconvert' tool" log . exception ( message ) raise MailParserOSError ( message ) else : stdoutdata , _ = out . communicate ( ) return temp , stdoutdata . decode ( "utf-8" ) . strip ( ) finally : os . close ( temph ) | Exec msgconvert tool to convert msg Outlook mail in eml mail format |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.