idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
230,800 | def run_worker ( self , queues = None , module = None , exclude_queues = None , max_workers_per_queue = None , store_tracebacks = None ) : try : module_names = module or '' for module_name in module_names . split ( ',' ) : module_name = module_name . strip ( ) if module_name : importlib . import_module ( module_name ) self . log . debug ( 'imported module' , module_name = module_name ) worker = Worker ( self , queues . split ( ',' ) if queues else None , exclude_queues . split ( ',' ) if exclude_queues else None , max_workers_per_queue = max_workers_per_queue , store_tracebacks = store_tracebacks ) worker . run ( ) except Exception : self . log . exception ( 'Unhandled exception' ) raise | Main worker entry point method . | 194 | 6 |
230,801 | def delay ( self , func , args = None , kwargs = None , queue = None , hard_timeout = None , unique = None , lock = None , lock_key = None , when = None , retry = None , retry_on = None , retry_method = None , max_queue_size = None ) : task = Task ( self , func , args = args , kwargs = kwargs , queue = queue , hard_timeout = hard_timeout , unique = unique , lock = lock , lock_key = lock_key , retry = retry , retry_on = retry_on , retry_method = retry_method ) task . delay ( when = when , max_queue_size = max_queue_size ) return task | Queues a task . See README . rst for an explanation of the options . | 169 | 18 |
230,802 | def get_queue_sizes ( self , queue ) : states = [ QUEUED , SCHEDULED , ACTIVE ] pipeline = self . connection . pipeline ( ) for state in states : pipeline . zcard ( self . _key ( state , queue ) ) results = pipeline . execute ( ) return dict ( zip ( states , results ) ) | Get the queue s number of tasks in each state . | 75 | 11 |
230,803 | def get_queue_system_lock ( self , queue ) : key = self . _key ( LOCK_REDIS_KEY , queue ) return Semaphore . get_system_lock ( self . connection , key ) | Get system lock timeout | 48 | 4 |
230,804 | def set_queue_system_lock ( self , queue , timeout ) : key = self . _key ( LOCK_REDIS_KEY , queue ) Semaphore . set_system_lock ( self . connection , key , timeout ) | Set system lock on a queue . | 51 | 7 |
230,805 | def _install_signal_handlers ( self ) : def request_stop ( signum , frame ) : self . _stop_requested = True self . log . info ( 'stop requested, waiting for task to finish' ) signal . signal ( signal . SIGINT , request_stop ) signal . signal ( signal . SIGTERM , request_stop ) | Sets up signal handlers for safely stopping the worker . | 77 | 11 |
230,806 | def _uninstall_signal_handlers ( self ) : signal . signal ( signal . SIGINT , signal . SIG_DFL ) signal . signal ( signal . SIGTERM , signal . SIG_DFL ) | Restores default signal handlers . | 47 | 6 |
230,807 | def _filter_queues ( self , queues ) : def match ( queue ) : """ Returns whether the given queue should be included by checking each part of the queue name. """ for part in reversed_dotted_parts ( queue ) : if part in self . exclude_queues : return False if part in self . only_queues : return True return not self . only_queues return [ q for q in queues if match ( q ) ] | Applies the queue filter to the given list of queues and returns the queues that match . Note that a queue name matches any subqueues starting with the name followed by a date . For example foo will match both foo and foo . bar . | 95 | 49 |
230,808 | def _worker_queue_scheduled_tasks ( self ) : queues = set ( self . _filter_queues ( self . connection . smembers ( self . _key ( SCHEDULED ) ) ) ) now = time . time ( ) for queue in queues : # Move due items from the SCHEDULED queue to the QUEUED queue. If # items were moved, remove the queue from the scheduled set if it # is empty, and add it to the queued set so the task gets picked # up. If any unique tasks are already queued, don't update their # queue time (because the new queue time would be later). result = self . scripts . zpoppush ( self . _key ( SCHEDULED , queue ) , self . _key ( QUEUED , queue ) , self . config [ 'SCHEDULED_TASK_BATCH_SIZE' ] , now , now , if_exists = ( 'noupdate' , ) , on_success = ( 'update_sets' , queue , self . _key ( SCHEDULED ) , self . _key ( QUEUED ) ) , ) self . log . debug ( 'scheduled tasks' , queue = queue , qty = len ( result ) ) # XXX: ideally this would be in the same pipeline, but we only want # to announce if there was a result. if result : self . connection . publish ( self . _key ( 'activity' ) , queue ) self . _did_work = True | Helper method that takes due tasks from the SCHEDULED queue and puts them in the QUEUED queue for execution . This should be called periodically . | 334 | 32 |
230,809 | def _wait_for_new_tasks ( self , timeout = 0 , batch_timeout = 0 ) : new_queue_found = False start_time = batch_exit = time . time ( ) while True : # Check to see if batch_exit has been updated if batch_exit > start_time : pubsub_sleep = batch_exit - time . time ( ) else : pubsub_sleep = start_time + timeout - time . time ( ) message = self . _pubsub . get_message ( timeout = 0 if pubsub_sleep < 0 or self . _did_work else pubsub_sleep ) # Pull remaining messages off of channel while message : if message [ 'type' ] == 'message' : new_queue_found , batch_exit = self . _process_queue_message ( message [ 'data' ] , new_queue_found , batch_exit , start_time , timeout , batch_timeout ) message = self . _pubsub . get_message ( ) if self . _did_work : break # Exit immediately if we did work during the last # execution loop because there might be more work to do elif time . time ( ) >= batch_exit and new_queue_found : break # After finding a new queue we can wait until the # batch timeout expires elif time . time ( ) - start_time > timeout : break | Check activity channel and wait as necessary . | 294 | 8 |
230,810 | def _execute_forked ( self , tasks , log ) : success = False execution = { } assert len ( tasks ) task_func = tasks [ 0 ] . serialized_func assert all ( [ task_func == task . serialized_func for task in tasks [ 1 : ] ] ) execution [ 'time_started' ] = time . time ( ) exc = None exc_info = None try : func = tasks [ 0 ] . func is_batch_func = getattr ( func , '_task_batch' , False ) g [ 'current_task_is_batch' ] = is_batch_func if is_batch_func : # Batch process if the task supports it. params = [ { 'args' : task . args , 'kwargs' : task . kwargs , } for task in tasks ] task_timeouts = [ task . hard_timeout for task in tasks if task . hard_timeout is not None ] hard_timeout = ( ( max ( task_timeouts ) if task_timeouts else None ) or getattr ( func , '_task_hard_timeout' , None ) or self . config [ 'DEFAULT_HARD_TIMEOUT' ] ) g [ 'current_tasks' ] = tasks with UnixSignalDeathPenalty ( hard_timeout ) : func ( params ) else : # Process sequentially. for task in tasks : hard_timeout = ( task . hard_timeout or getattr ( func , '_task_hard_timeout' , None ) or self . config [ 'DEFAULT_HARD_TIMEOUT' ] ) g [ 'current_tasks' ] = [ task ] with UnixSignalDeathPenalty ( hard_timeout ) : func ( * task . args , * * task . kwargs ) except RetryException as exc : execution [ 'retry' ] = True if exc . method : execution [ 'retry_method' ] = serialize_retry_method ( exc . method ) execution [ 'log_error' ] = exc . log_error execution [ 'exception_name' ] = serialize_func_name ( exc . __class__ ) exc_info = exc . exc_info or sys . exc_info ( ) except ( JobTimeoutException , Exception ) as exc : execution [ 'exception_name' ] = serialize_func_name ( exc . __class__ ) exc_info = sys . exc_info ( ) else : success = True if not success : execution [ 'time_failed' ] = time . time ( ) if self . store_tracebacks : # Currently we only log failed task executions to Redis. execution [ 'traceback' ] = '' . join ( traceback . format_exception ( * exc_info ) ) execution [ 'success' ] = success execution [ 'host' ] = socket . gethostname ( ) serialized_execution = json . dumps ( execution ) for task in tasks : self . connection . rpush ( self . _key ( 'task' , task . id , 'executions' ) , serialized_execution ) return success | Executes the tasks in the forked process . Multiple tasks can be passed for batch processing . However they must all use the same function and will share the execution entry . | 668 | 34 |
230,811 | def _get_queue_batch_size ( self , queue ) : # Fetch one item unless this is a batch queue. # XXX: It would be more efficient to loop in reverse order and break. batch_queues = self . config [ 'BATCH_QUEUES' ] batch_size = 1 for part in dotted_parts ( queue ) : if part in batch_queues : batch_size = batch_queues [ part ] return batch_size | Get queue batch size . | 99 | 5 |
230,812 | def _get_queue_lock ( self , queue , log ) : max_workers = self . max_workers_per_queue # Check if this is single worker queue for part in dotted_parts ( queue ) : if part in self . single_worker_queues : log . debug ( 'single worker queue' ) max_workers = 1 break # Max worker queues require us to get a queue lock before # moving tasks if max_workers : queue_lock = Semaphore ( self . connection , self . _key ( LOCK_REDIS_KEY , queue ) , self . id , max_locks = max_workers , timeout = self . config [ 'ACTIVE_TASK_UPDATE_TIMEOUT' ] ) acquired , locks = queue_lock . acquire ( ) if not acquired : return None , True log . debug ( 'acquired queue lock' , locks = locks ) else : queue_lock = None return queue_lock , False | Get queue lock for max worker queues . | 202 | 8 |
230,813 | def _heartbeat ( self , queue , task_ids ) : now = time . time ( ) self . connection . zadd ( self . _key ( ACTIVE , queue ) , * * { task_id : now for task_id in task_ids } ) | Updates the heartbeat for the given task IDs to prevent them from timing out and being requeued . | 57 | 21 |
230,814 | def _process_queue_message ( self , message_queue , new_queue_found , batch_exit , start_time , timeout , batch_timeout ) : for queue in self . _filter_queues ( [ message_queue ] ) : if queue not in self . _queue_set : if not new_queue_found : new_queue_found = True batch_exit = time . time ( ) + batch_timeout # Limit batch_exit to max timeout if batch_exit > start_time + timeout : batch_exit = start_time + timeout self . _queue_set . add ( queue ) self . log . debug ( 'new queue' , queue = queue ) return new_queue_found , batch_exit | Process a queue message from activity channel . | 156 | 8 |
230,815 | def _process_queue_tasks ( self , queue , queue_lock , task_ids , now , log ) : processed_count = 0 # Get all tasks serialized_tasks = self . connection . mget ( [ self . _key ( 'task' , task_id ) for task_id in task_ids ] ) # Parse tasks tasks = [ ] for task_id , serialized_task in zip ( task_ids , serialized_tasks ) : if serialized_task : task_data = json . loads ( serialized_task ) else : # In the rare case where we don't find the task which is # queued (see ReliabilityTestCase.test_task_disappears), # we log an error and remove the task below. We need to # at least initialize the Task object with an ID so we can # remove it. task_data = { 'id' : task_id } task = Task ( self . tiger , queue = queue , _data = task_data , _state = ACTIVE , _ts = now ) if not serialized_task : # Remove task as per comment above log . error ( 'not found' , task_id = task_id ) task . _move ( ) elif task . id != task_id : log . error ( 'task ID mismatch' , task_id = task_id ) # Remove task task . _move ( ) else : tasks . append ( task ) # List of task IDs that exist and we will update the heartbeat on. valid_task_ids = set ( task . id for task in tasks ) # Group by task func tasks_by_func = OrderedDict ( ) for task in tasks : func = task . serialized_func if func in tasks_by_func : tasks_by_func [ func ] . append ( task ) else : tasks_by_func [ func ] = [ task ] # Execute tasks for each task func for tasks in tasks_by_func . values ( ) : success , processed_tasks = self . _execute_task_group ( queue , tasks , valid_task_ids , queue_lock ) processed_count = processed_count + len ( processed_tasks ) log . debug ( 'processed' , attempted = len ( tasks ) , processed = processed_count ) for task in processed_tasks : self . _finish_task_processing ( queue , task , success ) return processed_count | Process tasks in queue . | 525 | 5 |
230,816 | def _process_from_queue ( self , queue ) : now = time . time ( ) log = self . log . bind ( queue = queue ) batch_size = self . _get_queue_batch_size ( queue ) queue_lock , failed_to_acquire = self . _get_queue_lock ( queue , log ) if failed_to_acquire : return [ ] , - 1 # Move an item to the active queue, if available. # We need to be careful when moving unique tasks: We currently don't # support concurrent processing of multiple unique tasks. If the task # is already in the ACTIVE queue, we need to execute the queued task # later, i.e. move it to the SCHEDULED queue (prefer the earliest # time if it's already scheduled). We want to make sure that the last # queued instance of the task always gets executed no earlier than it # was queued. later = time . time ( ) + self . config [ 'LOCK_RETRY' ] task_ids = self . scripts . zpoppush ( self . _key ( QUEUED , queue ) , self . _key ( ACTIVE , queue ) , batch_size , None , now , if_exists = ( 'add' , self . _key ( SCHEDULED , queue ) , later , 'min' ) , on_success = ( 'update_sets' , queue , self . _key ( QUEUED ) , self . _key ( ACTIVE ) , self . _key ( SCHEDULED ) ) ) log . debug ( 'moved tasks' , src_queue = QUEUED , dest_queue = ACTIVE , qty = len ( task_ids ) ) processed_count = 0 if task_ids : processed_count = self . _process_queue_tasks ( queue , queue_lock , task_ids , now , log ) if queue_lock : queue_lock . release ( ) log . debug ( 'released swq lock' ) return task_ids , processed_count | Internal method to process a task batch from the given queue . | 443 | 12 |
230,817 | def _execute_task_group ( self , queue , tasks , all_task_ids , queue_lock ) : log = self . log . bind ( queue = queue ) locks = [ ] # Keep track of the acquired locks: If two tasks in the list require # the same lock we only acquire it once. lock_ids = set ( ) ready_tasks = [ ] for task in tasks : if task . lock : if task . lock_key : kwargs = task . kwargs lock_id = gen_unique_id ( task . serialized_func , None , { key : kwargs . get ( key ) for key in task . lock_key } , ) else : lock_id = gen_unique_id ( task . serialized_func , task . args , task . kwargs , ) if lock_id not in lock_ids : lock = Lock ( self . connection , self . _key ( 'lock' , lock_id ) , timeout = self . config [ 'ACTIVE_TASK_UPDATE_TIMEOUT' ] ) acquired = lock . acquire ( blocking = False ) if acquired : lock_ids . add ( lock_id ) locks . append ( lock ) else : log . info ( 'could not acquire lock' , task_id = task . id ) # Reschedule the task (but if the task is already # scheduled in case of a unique task, don't prolong # the schedule date). when = time . time ( ) + self . config [ 'LOCK_RETRY' ] task . _move ( from_state = ACTIVE , to_state = SCHEDULED , when = when , mode = 'min' ) # Make sure to remove it from this list so we don't # re-add to the ACTIVE queue by updating the heartbeat. all_task_ids . remove ( task . id ) continue ready_tasks . append ( task ) if not ready_tasks : return True , [ ] if self . stats_thread : self . stats_thread . report_task_start ( ) success = self . _execute ( queue , ready_tasks , log , locks , queue_lock , all_task_ids ) if self . stats_thread : self . stats_thread . report_task_end ( ) for lock in locks : lock . release ( ) return success , ready_tasks | Executes the given tasks in the queue . Updates the heartbeat for task IDs passed in all_task_ids . This internal method is only meant to be called from within _process_from_queue . | 506 | 41 |
230,818 | def _finish_task_processing ( self , queue , task , success ) : log = self . log . bind ( queue = queue , task_id = task . id ) def _mark_done ( ) : # Remove the task from active queue task . _move ( from_state = ACTIVE ) log . info ( 'done' ) if success : _mark_done ( ) else : should_retry = False should_log_error = True # Get execution info (for logging and retry purposes) execution = self . connection . lindex ( self . _key ( 'task' , task . id , 'executions' ) , - 1 ) if execution : execution = json . loads ( execution ) if execution and execution . get ( 'retry' ) : if 'retry_method' in execution : retry_func , retry_args = execution [ 'retry_method' ] else : # We expect the serialized method here. retry_func , retry_args = serialize_retry_method ( self . config [ 'DEFAULT_RETRY_METHOD' ] ) should_log_error = execution [ 'log_error' ] should_retry = True if task . retry_method and not should_retry : retry_func , retry_args = task . retry_method if task . retry_on : if execution : exception_name = execution . get ( 'exception_name' ) try : exception_class = import_attribute ( exception_name ) except TaskImportError : log . error ( 'could not import exception' , exception_name = exception_name ) else : if task . should_retry_on ( exception_class , logger = log ) : should_retry = True else : should_retry = True state = ERROR when = time . time ( ) log_context = { 'func' : task . serialized_func } if should_retry : retry_num = task . n_executions ( ) log_context [ 'retry_func' ] = retry_func log_context [ 'retry_num' ] = retry_num try : func = import_attribute ( retry_func ) except TaskImportError : log . error ( 'could not import retry function' , func = retry_func ) else : try : retry_delay = func ( retry_num , * retry_args ) log_context [ 'retry_delay' ] = retry_delay when += retry_delay except StopRetry : pass else : state = SCHEDULED if execution : if state == ERROR and should_log_error : log_func = log . error else : log_func = log . warning log_context . update ( { 'time_failed' : execution . get ( 'time_failed' ) , 'traceback' : execution . get ( 'traceback' ) , 'exception_name' : execution . get ( 'exception_name' ) , } ) log_func ( 'task error' , * * log_context ) else : log . error ( 'execution not found' , * * log_context ) # Move task to the scheduled queue for retry, or move to error # queue if we don't want to retry. if state == ERROR and not should_log_error : _mark_done ( ) else : task . _move ( from_state = ACTIVE , to_state = state , when = when ) | After a task is executed this method is called and ensures that the task gets properly removed from the ACTIVE queue and in case of an error retried or marked as failed . | 751 | 35 |
230,819 | def run ( self , once = False , force_once = False ) : self . log . info ( 'ready' , id = self . id , queues = sorted ( self . only_queues ) , exclude_queues = sorted ( self . exclude_queues ) , single_worker_queues = sorted ( self . single_worker_queues ) , max_workers = self . max_workers_per_queue ) if not self . scripts . can_replicate_commands : # Older Redis versions may create additional overhead when # executing pipelines. self . log . warn ( 'using old Redis version' ) if self . config [ 'STATS_INTERVAL' ] : self . stats_thread = StatsThread ( self ) self . stats_thread . start ( ) # Queue any periodic tasks that are not queued yet. self . _queue_periodic_tasks ( ) # First scan all the available queues for new items until they're empty. # Then, listen to the activity channel. # XXX: This can get inefficient when having lots of queues. self . _pubsub = self . connection . pubsub ( ) self . _pubsub . subscribe ( self . _key ( 'activity' ) ) self . _queue_set = set ( self . _filter_queues ( self . connection . smembers ( self . _key ( QUEUED ) ) ) ) try : while True : # Update the queue set on every iteration so we don't get stuck # on processing a specific queue. self . _wait_for_new_tasks ( timeout = self . config [ 'SELECT_TIMEOUT' ] , batch_timeout = self . config [ 'SELECT_BATCH_TIMEOUT' ] ) self . _install_signal_handlers ( ) self . _did_work = False self . _worker_run ( ) self . _uninstall_signal_handlers ( ) if once and ( not self . _queue_set or force_once ) : break if self . _stop_requested : raise KeyboardInterrupt ( ) except KeyboardInterrupt : pass except Exception as e : self . log . exception ( event = 'exception' ) raise finally : if self . stats_thread : self . stats_thread . stop ( ) self . stats_thread = None # Free up Redis connection self . _pubsub . reset ( ) self . log . info ( 'done' ) | Main loop of the worker . | 520 | 6 |
230,820 | def can_replicate_commands ( self ) : if not hasattr ( self , '_can_replicate_commands' ) : info = self . redis . info ( 'server' ) version_info = info [ 'redis_version' ] . split ( '.' ) major , minor = int ( version_info [ 0 ] ) , int ( version_info [ 1 ] ) result = major > 3 or major == 3 and minor >= 2 self . _can_replicate_commands = result return self . _can_replicate_commands | Whether Redis supports single command replication . | 122 | 8 |
230,821 | def zpoppush ( self , source , destination , count , score , new_score , client = None , withscores = False , on_success = None , if_exists = None ) : if score is None : score = '+inf' # Include all elements. if withscores : if on_success : raise NotImplementedError ( ) return self . _zpoppush_withscores ( keys = [ source , destination ] , args = [ score , count , new_score ] , client = client ) else : if if_exists and if_exists [ 0 ] == 'add' : _ , if_exists_key , if_exists_score , if_exists_mode = if_exists if if_exists_mode != 'min' : raise NotImplementedError ( ) if not on_success or on_success [ 0 ] != 'update_sets' : raise NotImplementedError ( ) set_value , remove_from_set , add_to_set , add_to_set_if_exists = on_success [ 1 : ] return self . _zpoppush_exists_min_update_sets ( keys = [ source , destination , remove_from_set , add_to_set , add_to_set_if_exists , if_exists_key ] , args = [ score , count , new_score , set_value , if_exists_score ] , ) elif if_exists and if_exists [ 0 ] == 'noupdate' : if not on_success or on_success [ 0 ] != 'update_sets' : raise NotImplementedError ( ) set_value , remove_from_set , add_to_set = on_success [ 1 : ] return self . _zpoppush_exists_ignore_update_sets ( keys = [ source , destination , remove_from_set , add_to_set ] , args = [ score , count , new_score , set_value ] , ) if on_success : if on_success [ 0 ] != 'update_sets' : raise NotImplementedError ( ) else : set_value , remove_from_set , add_to_set = on_success [ 1 : ] return self . _zpoppush_update_sets ( keys = [ source , destination , remove_from_set , add_to_set ] , args = [ score , count , new_score , set_value ] , client = client ) else : return self . _zpoppush ( keys = [ source , destination ] , args = [ score , count , new_score ] , client = client ) | Pops the first count members from the ZSET source and adds them to the ZSET destination with a score of new_score . If score is not None only members up to a score of score are used . Returns the members that were moved and if withscores is True their original scores . | 589 | 60 |
230,822 | def execute_pipeline ( self , pipeline , client = None ) : client = client or self . redis executing_pipeline = None try : # Prepare args stack = pipeline . command_stack script_args = [ int ( self . can_replicate_commands ) , len ( stack ) ] for args , options in stack : script_args += [ len ( args ) - 1 ] + list ( args ) # Run the pipeline if self . can_replicate_commands : # Redis 3.2 or higher # Make sure scripts exist if pipeline . scripts : pipeline . load_scripts ( ) raw_results = self . _execute_pipeline ( args = script_args , client = client ) else : executing_pipeline = client . pipeline ( ) # Always load scripts to avoid issues when Redis loads data # from AOF file / when replicating. for s in pipeline . scripts : executing_pipeline . script_load ( s . script ) # Run actual pipeline lua script self . _execute_pipeline ( args = script_args , client = executing_pipeline ) # Always load all scripts and run actual pipeline lua script raw_results = executing_pipeline . execute ( ) [ - 1 ] # Run response callbacks on results. results = [ ] response_callbacks = pipeline . response_callbacks for ( ( args , options ) , result ) in zip ( stack , raw_results ) : command_name = args [ 0 ] if command_name in response_callbacks : result = response_callbacks [ command_name ] ( result , * * options ) results . append ( result ) return results finally : if executing_pipeline : executing_pipeline . reset ( ) pipeline . reset ( ) | Executes the given Redis pipeline as a Lua script . When an error occurs the transaction stops executing and an exception is raised . This differs from Redis transactions where execution continues after an error . On success a list of results is returned . The pipeline is cleared after execution and can no longer be reused . | 378 | 61 |
230,823 | def gen_unique_id ( serialized_name , args , kwargs ) : return hashlib . sha256 ( json . dumps ( { 'func' : serialized_name , 'args' : args , 'kwargs' : kwargs , } , sort_keys = True ) . encode ( 'utf8' ) ) . hexdigest ( ) | Generates and returns a hex - encoded 256 - bit ID for the given task name and args . Used to generate IDs for unique tasks or for task locks . | 79 | 32 |
230,824 | def serialize_func_name ( func ) : if func . __module__ == '__main__' : raise ValueError ( 'Functions from the __main__ module cannot be ' 'processed by workers.' ) try : # This will only work on Python 3.3 or above, but it will allow us to use static/classmethods func_name = func . __qualname__ except AttributeError : func_name = func . __name__ return ':' . join ( [ func . __module__ , func_name ] ) | Returns the dotted serialized path to the passed function . | 116 | 11 |
230,825 | def dotted_parts ( s ) : idx = - 1 while s : idx = s . find ( '.' , idx + 1 ) if idx == - 1 : yield s break yield s [ : idx ] | For a string a . b . c yields a a . b a . b . c . | 48 | 19 |
230,826 | def reversed_dotted_parts ( s ) : idx = - 1 if s : yield s while s : idx = s . rfind ( '.' , 0 , idx ) if idx == - 1 : break yield s [ : idx ] | For a string a . b . c yields a . b . c a . b a . | 55 | 19 |
230,827 | def tasktiger_processor ( logger , method_name , event_dict ) : if g [ 'current_tasks' ] is not None and not g [ 'current_task_is_batch' ] : event_dict [ 'task_id' ] = g [ 'current_tasks' ] [ 0 ] . id return event_dict | TaskTiger structlog processor . | 75 | 7 |
230,828 | def should_retry_on ( self , exception_class , logger = None ) : for n in ( self . retry_on or [ ] ) : try : if issubclass ( exception_class , import_attribute ( n ) ) : return True except TaskImportError : if logger : logger . error ( 'should_retry_on could not import class' , exception_name = n ) return False | Whether this task should be retried when the given exception occurs . | 88 | 13 |
230,829 | def update_scheduled_time ( self , when ) : tiger = self . tiger ts = get_timestamp ( when ) assert ts pipeline = tiger . connection . pipeline ( ) key = tiger . _key ( SCHEDULED , self . queue ) tiger . scripts . zadd ( key , ts , self . id , mode = 'xx' , client = pipeline ) pipeline . zscore ( key , self . id ) _ , score = pipeline . execute ( ) if not score : raise TaskNotFound ( 'Task {} not found in queue "{}" in state "{}".' . format ( self . id , self . queue , SCHEDULED ) ) self . _ts = ts | Updates a scheduled task s date to the given date . If the task is not scheduled a TaskNotFound exception is raised . | 147 | 26 |
230,830 | def n_executions ( self ) : pipeline = self . tiger . connection . pipeline ( ) pipeline . exists ( self . tiger . _key ( 'task' , self . id ) ) pipeline . llen ( self . tiger . _key ( 'task' , self . id , 'executions' ) ) exists , n_executions = pipeline . execute ( ) if not exists : raise TaskNotFound ( 'Task {} not found.' . format ( self . id ) ) return n_executions | Queries and returns the number of past task executions . | 106 | 11 |
230,831 | def set_input ( self , nr = 2 , qd = 1 , b = 0 ) : self . nr = nr self . qd = qd self . b = b | Set inputs after initialization | 41 | 4 |
230,832 | def generateNoise ( self ) : # Fill wfb array with white noise based on given discrete variance wfb = np . zeros ( self . nr * 2 ) wfb [ : self . nr ] = np . random . normal ( 0 , np . sqrt ( self . qd ) , self . nr ) # Generate the hfb coefficients based on the noise type mhb = - self . b / 2.0 hfb = np . zeros ( self . nr * 2 ) hfb = np . zeros ( self . nr * 2 ) hfb [ 0 ] = 1.0 indices = np . arange ( self . nr - 1 ) hfb [ 1 : self . nr ] = ( mhb + indices ) / ( indices + 1.0 ) hfb [ : self . nr ] = np . multiply . accumulate ( hfb [ : self . nr ] ) # Perform discrete Fourier transform of wfb and hfb time series wfb_fft = np . fft . rfft ( wfb ) hfb_fft = np . fft . rfft ( hfb ) # Perform inverse Fourier transform of the product of wfb and hfb FFTs time_series = np . fft . irfft ( wfb_fft * hfb_fft ) [ : self . nr ] self . time_series = time_series | Generate noise time series based on input parameters | 308 | 9 |
230,833 | def adev ( self , tau0 , tau ) : prefactor = self . adev_from_qd ( tau0 = tau0 , tau = tau ) c = self . c_avar ( ) avar = pow ( prefactor , 2 ) * pow ( tau , c ) return np . sqrt ( avar ) | return predicted ADEV of noise - type at given tau | 78 | 12 |
230,834 | def mdev ( self , tau0 , tau ) : prefactor = self . mdev_from_qd ( tau0 = tau0 , tau = tau ) c = self . c_mvar ( ) mvar = pow ( prefactor , 2 ) * pow ( tau , c ) return np . sqrt ( mvar ) | return predicted MDEV of noise - type at given tau | 78 | 12 |
230,835 | def scipy_psd ( x , f_sample = 1.0 , nr_segments = 4 ) : f_axis , psd_of_x = scipy . signal . welch ( x , f_sample , nperseg = len ( x ) / nr_segments ) return f_axis , psd_of_x | PSD routine from scipy we can compare our own numpy result against this one | 79 | 18 |
230,836 | def iterpink ( depth = 20 ) : values = numpy . random . randn ( depth ) smooth = numpy . random . randn ( depth ) source = numpy . random . randn ( depth ) sumvals = values . sum ( ) i = 0 while True : yield sumvals + smooth [ i ] # advance the index by 1. if the index wraps, generate noise to use in # the calculations, but do not update any of the pink noise values. i += 1 if i == depth : i = 0 smooth = numpy . random . randn ( depth ) source = numpy . random . randn ( depth ) continue # count trailing zeros in i c = 0 while not ( i >> c ) & 1 : c += 1 # replace value c with a new source element sumvals += source [ i ] - values [ c ] values [ c ] = source [ i ] | Generate a sequence of samples of pink noise . | 189 | 10 |
230,837 | def plotline ( plt , alpha , taus , style , label = "" ) : y = [ pow ( tt , alpha ) for tt in taus ] plt . loglog ( taus , y , style , label = label ) | plot a line with the slope alpha | 53 | 7 |
230,838 | def b1_noise_id ( x , af , rate ) : ( taus , devs , errs , ns ) = at . adev ( x , taus = [ af * rate ] , data_type = "phase" , rate = rate ) oadev_x = devs [ 0 ] y = np . diff ( x ) y_cut = np . array ( y [ : len ( y ) - ( len ( y ) % af ) ] ) # cut to length assert len ( y_cut ) % af == 0 y_shaped = y_cut . reshape ( ( int ( len ( y_cut ) / af ) , af ) ) y_averaged = np . average ( y_shaped , axis = 1 ) # average var = np . var ( y_averaged , ddof = 1 ) return var / pow ( oadev_x , 2.0 ) | B1 ratio for noise identification ratio of Standard Variace to AVAR | 192 | 14 |
230,839 | def plot ( self , atDataset , errorbars = False , grid = False ) : if errorbars : self . ax . errorbar ( atDataset . out [ "taus" ] , atDataset . out [ "stat" ] , yerr = atDataset . out [ "stat_err" ] , ) else : self . ax . plot ( atDataset . out [ "taus" ] , atDataset . out [ "stat" ] , ) self . ax . set_xlabel ( "Tau" ) self . ax . set_ylabel ( atDataset . out [ "stat_id" ] ) self . ax . grid ( grid , which = "minor" , ls = "-" , color = '0.65' ) self . ax . grid ( grid , which = "major" , ls = "-" , color = '0.25' ) | use matplotlib methods for plotting | 200 | 7 |
230,840 | def greenhall_table2 ( alpha , d ) : row_idx = int ( - alpha + 2 ) # map 2-> row0 and -4-> row6 assert ( row_idx in [ 0 , 1 , 2 , 3 , 4 , 5 ] ) col_idx = int ( d - 1 ) table2 = [ [ ( 3.0 / 2.0 , 1.0 / 2.0 ) , ( 35.0 / 18.0 , 1.0 ) , ( 231.0 / 100.0 , 3.0 / 2.0 ) ] , # alpha=+2 [ ( 78.6 , 25.2 ) , ( 790.0 , 410.0 ) , ( 9950.0 , 6520.0 ) ] , [ ( 2.0 / 3.0 , 1.0 / 6.0 ) , ( 2.0 / 3.0 , 1.0 / 3.0 ) , ( 7.0 / 9.0 , 1.0 / 2.0 ) ] , # alpha=0 [ ( - 1 , - 1 ) , ( 0.852 , 0.375 ) , ( 0.997 , 0.617 ) ] , # -1 [ ( - 1 , - 1 ) , ( 1.079 , 0.368 ) , ( 1.033 , 0.607 ) ] , #-2 [ ( - 1 , - 1 ) , ( - 1 , - 1 ) , ( 1.053 , 0.553 ) ] , #-3 [ ( - 1 , - 1 ) , ( - 1 , - 1 ) , ( 1.302 , 0.535 ) ] , # alpha=-4 ] #print("table2 = ", table2[row_idx][col_idx]) return table2 [ row_idx ] [ col_idx ] | Table 2 from Greenhall 2004 | 392 | 6 |
230,841 | def greenhall_table1 ( alpha , d ) : row_idx = int ( - alpha + 2 ) # map 2-> row0 and -4-> row6 col_idx = int ( d - 1 ) table1 = [ [ ( 2.0 / 3.0 , 1.0 / 3.0 ) , ( 7.0 / 9.0 , 1.0 / 2.0 ) , ( 22.0 / 25.0 , 2.0 / 3.0 ) ] , # alpha=+2 [ ( 0.840 , 0.345 ) , ( 0.997 , 0.616 ) , ( 1.141 , 0.843 ) ] , [ ( 1.079 , 0.368 ) , ( 1.033 , 0.607 ) , ( 1.184 , 0.848 ) ] , [ ( - 1 , - 1 ) , ( 1.048 , 0.534 ) , ( 1.180 , 0.816 ) ] , # -1 [ ( - 1 , - 1 ) , ( 1.302 , 0.535 ) , ( 1.175 , 0.777 ) ] , #-2 [ ( - 1 , - 1 ) , ( - 1 , - 1 ) , ( 1.194 , 0.703 ) ] , #-3 [ ( - 1 , - 1 ) , ( - 1 , - 1 ) , ( 1.489 , 0.702 ) ] , # alpha=-4 ] #print("table1 = ", table1[row_idx][col_idx]) return table1 [ row_idx ] [ col_idx ] | Table 1 from Greenhall 2004 | 346 | 6 |
230,842 | def edf_mtotdev ( N , m , alpha ) : assert ( alpha in [ 2 , 1 , 0 , - 1 , - 2 ] ) NIST_SP1065_table8 = [ ( 1.90 , 2.1 ) , ( 1.20 , 1.40 ) , ( 1.10 , 1.2 ) , ( 0.85 , 0.50 ) , ( 0.75 , 0.31 ) ] #(b, c) = NIST_SP1065_table8[ abs(alpha-2) ] ( b , c ) = NIST_SP1065_table8 [ abs ( alpha - 2 ) ] edf = b * ( float ( N ) / float ( m ) ) - c print ( "mtotdev b,c= " , ( b , c ) , " edf=" , edf ) return edf | Equivalent degrees of freedom for Modified Total Deviation NIST SP1065 page 41 Table 8 | 188 | 19 |
230,843 | def edf_simple ( N , m , alpha ) : N = float ( N ) m = float ( m ) if alpha in [ 2 , 1 , 0 , - 1 , - 2 ] : # NIST SP 1065, Table 5 if alpha == + 2 : edf = ( N + 1 ) * ( N - 2 * m ) / ( 2 * ( N - m ) ) if alpha == 0 : edf = ( ( ( 3 * ( N - 1 ) / ( 2 * m ) ) - ( 2 * ( N - 2 ) / N ) ) * ( ( 4 * pow ( m , 2 ) ) / ( ( 4 * pow ( m , 2 ) ) + 5 ) ) ) if alpha == 1 : a = ( N - 1 ) / ( 2 * m ) b = ( 2 * m + 1 ) * ( N - 1 ) / 4 edf = np . exp ( np . sqrt ( np . log ( a ) * np . log ( b ) ) ) if alpha == - 1 : if m == 1 : edf = 2 * ( N - 2 ) / ( 2.3 * N - 4.9 ) if m >= 2 : edf = 5 * N ** 2 / ( 4 * m * ( N + ( 3 * m ) ) ) if alpha == - 2 : a = ( N - 2 ) / ( m * ( N - 3 ) ** 2 ) b = ( N - 1 ) ** 2 c = 3 * m * ( N - 1 ) d = 4 * m ** 2 edf = a * ( b - c + d ) else : edf = ( N - 1 ) print ( "Noise type not recognized. Defaulting to N - 1 degrees of freedom." ) return edf | Equivalent degrees of freedom . Simple approximate formulae . | 371 | 12 |
230,844 | def example1 ( ) : N = 1000 f = 1 y = np . random . randn ( 1 , N ) [ 0 , : ] x = [ xx for xx in np . linspace ( 1 , len ( y ) , len ( y ) ) ] x_ax , y_ax , ( err_l , err_h ) , ns = allan . gradev ( y , data_type = 'phase' , rate = f , taus = x ) plt . errorbar ( x_ax , y_ax , yerr = [ err_l , err_h ] , label = 'GRADEV, no gaps' ) y [ int ( np . floor ( 0.4 * N ) ) : int ( np . floor ( 0.6 * N ) ) ] = np . NaN # Simulate missing data x_ax , y_ax , ( err_l , err_h ) , ns = allan . gradev ( y , data_type = 'phase' , rate = f , taus = x ) plt . errorbar ( x_ax , y_ax , yerr = [ err_l , err_h ] , label = 'GRADEV, with gaps' ) plt . xscale ( 'log' ) plt . yscale ( 'log' ) plt . grid ( ) plt . legend ( ) plt . xlabel ( 'Tau / s' ) plt . ylabel ( 'Overlapping Allan deviation' ) plt . show ( ) | Compute the GRADEV of a white phase noise . Compares two different scenarios . 1 ) The original data and 2 ) ADEV estimate with gap robust ADEV . | 327 | 35 |
230,845 | def example2 ( ) : N = 1000 # number of samples f = 1 # data samples per second s = 1 + 5 / N * np . arange ( 0 , N ) y = s * np . random . randn ( 1 , N ) [ 0 , : ] x = [ xx for xx in np . linspace ( 1 , len ( y ) , len ( y ) ) ] x_ax , y_ax , ( err_l , err_h ) , ns = allan . gradev ( y , data_type = 'phase' , rate = f , taus = x ) plt . loglog ( x_ax , y_ax , 'b.' , label = "No gaps" ) y [ int ( 0.4 * N ) : int ( 0.6 * N , ) ] = np . NaN # Simulate missing data x_ax , y_ax , ( err_l , err_h ) , ns = allan . gradev ( y , data_type = 'phase' , rate = f , taus = x ) plt . loglog ( x_ax , y_ax , 'g.' , label = "With gaps" ) plt . grid ( ) plt . legend ( ) plt . xlabel ( 'Tau / s' ) plt . ylabel ( 'Overlapping Allan deviation' ) plt . show ( ) | Compute the GRADEV of a nonstationary white phase noise . | 300 | 15 |
230,846 | def tdev ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( taus , md , mde , ns ) = mdev ( phase , rate = rate , taus = taus ) td = taus * md / np . sqrt ( 3.0 ) tde = td / np . sqrt ( ns ) return taus , td , tde , ns | Time deviation . Based on modified Allan variance . | 107 | 9 |
230,847 | def mdev ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( phase , ms , taus_used ) = tau_generator ( phase , rate , taus = taus ) data , taus = np . array ( phase ) , np . array ( taus ) md = np . zeros_like ( ms ) mderr = np . zeros_like ( ms ) ns = np . zeros_like ( ms ) # this is a 'loop-unrolled' algorithm following # http://www.leapsecond.com/tools/adev_lib.c for idx , m in enumerate ( ms ) : m = int ( m ) # without this we get: VisibleDeprecationWarning: # using a non-integer number instead of an integer # will result in an error in the future tau = taus_used [ idx ] # First loop sum d0 = phase [ 0 : m ] d1 = phase [ m : 2 * m ] d2 = phase [ 2 * m : 3 * m ] e = min ( len ( d0 ) , len ( d1 ) , len ( d2 ) ) v = np . sum ( d2 [ : e ] - 2 * d1 [ : e ] + d0 [ : e ] ) s = v * v # Second part of sum d3 = phase [ 3 * m : ] d2 = phase [ 2 * m : ] d1 = phase [ 1 * m : ] d0 = phase [ 0 : ] e = min ( len ( d0 ) , len ( d1 ) , len ( d2 ) , len ( d3 ) ) n = e + 1 v_arr = v + np . cumsum ( d3 [ : e ] - 3 * d2 [ : e ] + 3 * d1 [ : e ] - d0 [ : e ] ) s = s + np . sum ( v_arr * v_arr ) s /= 2.0 * m * m * tau * tau * n s = np . sqrt ( s ) md [ idx ] = s mderr [ idx ] = ( s / np . sqrt ( n ) ) ns [ idx ] = n return remove_small_ns ( taus_used , md , mderr , ns ) | Modified Allan deviation . Used to distinguish between White and Flicker Phase Modulation . | 525 | 17 |
230,848 | def adev ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( phase , m , taus_used ) = tau_generator ( phase , rate , taus ) ad = np . zeros_like ( taus_used ) ade = np . zeros_like ( taus_used ) adn = np . zeros_like ( taus_used ) for idx , mj in enumerate ( m ) : # loop through each tau value m(j) ( ad [ idx ] , ade [ idx ] , adn [ idx ] ) = calc_adev_phase ( phase , rate , mj , mj ) return remove_small_ns ( taus_used , ad , ade , adn ) | Allan deviation . Classic - use only if required - relatively poor confidence . | 193 | 15 |
230,849 | def ohdev ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( phase , m , taus_used ) = tau_generator ( phase , rate , taus ) hdevs = np . zeros_like ( taus_used ) hdeverrs = np . zeros_like ( taus_used ) ns = np . zeros_like ( taus_used ) for idx , mj in enumerate ( m ) : ( hdevs [ idx ] , hdeverrs [ idx ] , ns [ idx ] ) = calc_hdev_phase ( phase , rate , mj , 1 ) return remove_small_ns ( taus_used , hdevs , hdeverrs , ns ) | Overlapping Hadamard deviation . Better confidence than normal Hadamard . | 190 | 16 |
230,850 | def calc_hdev_phase ( phase , rate , mj , stride ) : tau0 = 1.0 / float ( rate ) mj = int ( mj ) stride = int ( stride ) d3 = phase [ 3 * mj : : stride ] d2 = phase [ 2 * mj : : stride ] d1 = phase [ 1 * mj : : stride ] d0 = phase [ : : stride ] n = min ( len ( d0 ) , len ( d1 ) , len ( d2 ) , len ( d3 ) ) v_arr = d3 [ : n ] - 3 * d2 [ : n ] + 3 * d1 [ : n ] - d0 [ : n ] s = np . sum ( v_arr * v_arr ) if n == 0 : n = 1 h = np . sqrt ( s / 6.0 / float ( n ) ) / float ( tau0 * mj ) e = h / np . sqrt ( n ) return h , e , n | main calculation fungtion for HDEV and OHDEV | 222 | 10 |
230,851 | def totdev ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( phase , m , taus_used ) = tau_generator ( phase , rate , taus ) N = len ( phase ) # totdev requires a new dataset # Begin by adding reflected data before dataset x1 = 2.0 * phase [ 0 ] * np . ones ( ( N - 2 , ) ) x1 = x1 - phase [ 1 : - 1 ] x1 = x1 [ : : - 1 ] # Reflected data at end of dataset x2 = 2.0 * phase [ - 1 ] * np . ones ( ( N - 2 , ) ) x2 = x2 - phase [ 1 : - 1 ] [ : : - 1 ] # check length of new dataset assert len ( x1 ) + len ( phase ) + len ( x2 ) == 3 * N - 4 # Combine into a single array x = np . zeros ( ( 3 * N - 4 ) ) x [ 0 : N - 2 ] = x1 x [ N - 2 : 2 * ( N - 2 ) + 2 ] = phase # original data in the middle x [ 2 * ( N - 2 ) + 2 : ] = x2 devs = np . zeros_like ( taus_used ) deverrs = np . zeros_like ( taus_used ) ns = np . zeros_like ( taus_used ) mid = len ( x1 ) for idx , mj in enumerate ( m ) : mj = int ( mj ) d0 = x [ mid + 1 : ] d1 = x [ mid + mj + 1 : ] d1n = x [ mid - mj + 1 : ] e = min ( len ( d0 ) , len ( d1 ) , len ( d1n ) ) v_arr = d1n [ : e ] - 2.0 * d0 [ : e ] + d1 [ : e ] dev = np . sum ( v_arr [ : mid ] * v_arr [ : mid ] ) dev /= float ( 2 * pow ( mj / rate , 2 ) * ( N - 2 ) ) dev = np . sqrt ( dev ) devs [ idx ] = dev deverrs [ idx ] = dev / np . sqrt ( mid ) ns [ idx ] = mid return remove_small_ns ( taus_used , devs , deverrs , ns ) | Total deviation . Better confidence at long averages for Allan . | 552 | 11 |
230,852 | def mtotdev ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( phase , ms , taus_used ) = tau_generator ( phase , rate , taus , maximum_m = float ( len ( phase ) ) / 3.0 ) devs = np . zeros_like ( taus_used ) deverrs = np . zeros_like ( taus_used ) ns = np . zeros_like ( taus_used ) for idx , mj in enumerate ( ms ) : devs [ idx ] , deverrs [ idx ] , ns [ idx ] = calc_mtotdev_phase ( phase , rate , mj ) return remove_small_ns ( taus_used , devs , deverrs , ns ) | PRELIMINARY - REQUIRES FURTHER TESTING . Modified Total deviation . Better confidence at long averages for modified Allan | 195 | 28 |
230,853 | def htotdev ( data , rate = 1.0 , data_type = "phase" , taus = None ) : if data_type == "phase" : phase = data freq = phase2frequency ( phase , rate ) elif data_type == "freq" : phase = frequency2phase ( data , rate ) freq = data else : raise Exception ( "unknown data_type: " + data_type ) rate = float ( rate ) ( freq , ms , taus_used ) = tau_generator ( freq , rate , taus , maximum_m = float ( len ( freq ) ) / 3.0 ) phase = np . array ( phase ) freq = np . array ( freq ) devs = np . zeros_like ( taus_used ) deverrs = np . zeros_like ( taus_used ) ns = np . zeros_like ( taus_used ) # NOTE at mj==1 we use ohdev(), based on comment from here: # http://www.wriley.com/paper4ht.htm # "For best consistency, the overlapping Hadamard variance is used # instead of the Hadamard total variance at m=1" # FIXME: this uses both freq and phase datasets, which uses double the memory really needed... for idx , mj in enumerate ( ms ) : if int ( mj ) == 1 : ( devs [ idx ] , deverrs [ idx ] , ns [ idx ] ) = calc_hdev_phase ( phase , rate , mj , 1 ) else : ( devs [ idx ] , deverrs [ idx ] , ns [ idx ] ) = calc_htotdev_freq ( freq , mj ) return remove_small_ns ( taus_used , devs , deverrs , ns ) | PRELIMINARY - REQUIRES FURTHER TESTING . Hadamard Total deviation . Better confidence at long averages for Hadamard deviation | 406 | 32 |
230,854 | def theo1 ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) tau0 = 1.0 / rate ( phase , ms , taus_used ) = tau_generator ( phase , rate , taus , even = True ) devs = np . zeros_like ( taus_used ) deverrs = np . zeros_like ( taus_used ) ns = np . zeros_like ( taus_used ) N = len ( phase ) for idx , m in enumerate ( ms ) : m = int ( m ) # to avoid: VisibleDeprecationWarning: using a # non-integer number instead of an integer will # result in an error in the future assert m % 2 == 0 # m must be even dev = 0 n = 0 for i in range ( int ( N - m ) ) : s = 0 for d in range ( int ( m / 2 ) ) : # inner sum pre = 1.0 / ( float ( m ) / 2 - float ( d ) ) s += pre * pow ( phase [ i ] - phase [ i - d + int ( m / 2 ) ] + phase [ i + m ] - phase [ i + d + int ( m / 2 ) ] , 2 ) n = n + 1 dev += s assert n == ( N - m ) * m / 2 # N-m outer sums, m/2 inner sums dev = dev / ( 0.75 * ( N - m ) * pow ( m * tau0 , 2 ) ) # factor 0.75 used here? http://tf.nist.gov/general/pdf/1990.pdf # but not here? http://tf.nist.gov/timefreq/general/pdf/2220.pdf page 29 devs [ idx ] = np . sqrt ( dev ) deverrs [ idx ] = devs [ idx ] / np . sqrt ( N - m ) ns [ idx ] = n return remove_small_ns ( taus_used , devs , deverrs , ns ) | PRELIMINARY - REQUIRES FURTHER TESTING . Theo1 is a two - sample variance with improved confidence and extended averaging factor range . | 468 | 33 |
230,855 | def tierms ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( data , m , taus_used ) = tau_generator ( phase , rate , taus ) count = len ( phase ) devs = np . zeros_like ( taus_used ) deverrs = np . zeros_like ( taus_used ) ns = np . zeros_like ( taus_used ) for idx , mj in enumerate ( m ) : mj = int ( mj ) # This seems like an unusual way to phases = np . column_stack ( ( phase [ : - mj ] , phase [ mj : ] ) ) p_max = np . max ( phases , axis = 1 ) p_min = np . min ( phases , axis = 1 ) phases = p_max - p_min tie = np . sqrt ( np . mean ( phases * phases ) ) ncount = count - mj devs [ idx ] = tie deverrs [ idx ] = 0 / np . sqrt ( ncount ) # TODO! I THINK THIS IS WRONG! ns [ idx ] = ncount return remove_small_ns ( taus_used , devs , deverrs , ns ) | Time Interval Error RMS . | 295 | 7 |
230,856 | def mtie ( data , rate = 1.0 , data_type = "phase" , taus = None ) : phase = input_to_phase ( data , rate , data_type ) ( phase , m , taus_used ) = tau_generator ( phase , rate , taus ) devs = np . zeros_like ( taus_used ) deverrs = np . zeros_like ( taus_used ) ns = np . zeros_like ( taus_used ) for idx , mj in enumerate ( m ) : rw = mtie_rolling_window ( phase , int ( mj + 1 ) ) win_max = np . max ( rw , axis = 1 ) win_min = np . min ( rw , axis = 1 ) tie = win_max - win_min dev = np . max ( tie ) ncount = phase . shape [ 0 ] - mj devs [ idx ] = dev deverrs [ idx ] = dev / np . sqrt ( ncount ) ns [ idx ] = ncount return remove_small_ns ( taus_used , devs , deverrs , ns ) | Maximum Time Interval Error . | 255 | 6 |
230,857 | def mtie_phase_fast ( phase , rate = 1.0 , data_type = "phase" , taus = None ) : rate = float ( rate ) phase = np . asarray ( phase ) k_max = int ( np . floor ( np . log2 ( len ( phase ) ) ) ) phase = phase [ 0 : pow ( 2 , k_max ) ] # truncate data to 2**k_max datapoints assert len ( phase ) == pow ( 2 , k_max ) #k = 1 taus = [ pow ( 2 , k ) for k in range ( k_max ) ] #while k <= k_max: # tau = pow(2, k) # taus.append(tau) #print tau # k += 1 print ( "taus N=" , len ( taus ) , " " , taus ) devs = np . zeros ( len ( taus ) ) deverrs = np . zeros ( len ( taus ) ) ns = np . zeros ( len ( taus ) ) taus_used = np . array ( taus ) # [(1.0/rate)*t for t in taus] # matrices to store results mtie_max = np . zeros ( ( len ( phase ) - 1 , k_max ) ) mtie_min = np . zeros ( ( len ( phase ) - 1 , k_max ) ) for kidx in range ( k_max ) : k = kidx + 1 imax = len ( phase ) - pow ( 2 , k ) + 1 #print k, imax tie = np . zeros ( imax ) ns [ kidx ] = imax #print np.max( tie ) for i in range ( imax ) : if k == 1 : mtie_max [ i , kidx ] = max ( phase [ i ] , phase [ i + 1 ] ) mtie_min [ i , kidx ] = min ( phase [ i ] , phase [ i + 1 ] ) else : p = int ( pow ( 2 , k - 1 ) ) mtie_max [ i , kidx ] = max ( mtie_max [ i , kidx - 1 ] , mtie_max [ i + p , kidx - 1 ] ) mtie_min [ i , kidx ] = min ( mtie_min [ i , kidx - 1 ] , mtie_min [ i + p , kidx - 1 ] ) #for i in range(imax): tie [ i ] = mtie_max [ i , kidx ] - mtie_min [ i , kidx ] #print tie[i] devs [ kidx ] = np . amax ( tie ) # maximum along axis #print "maximum %2.4f" % devs[kidx] #print np.amax( tie ) #for tau in taus: #for devs = np . array ( devs ) print ( "devs N=" , len ( devs ) , " " , devs ) print ( "taus N=" , len ( taus_used ) , " " , taus_used ) return remove_small_ns ( taus_used , devs , deverrs , ns ) | fast binary decomposition algorithm for MTIE | 698 | 8 |
230,858 | def gradev ( data , rate = 1.0 , data_type = "phase" , taus = None , ci = 0.9 , noisetype = 'wp' ) : if ( data_type == "freq" ) : print ( "Warning : phase data is preferred as input to gradev()" ) phase = input_to_phase ( data , rate , data_type ) ( data , m , taus_used ) = tau_generator ( phase , rate , taus ) ad = np . zeros_like ( taus_used ) ade_l = np . zeros_like ( taus_used ) ade_h = np . zeros_like ( taus_used ) adn = np . zeros_like ( taus_used ) for idx , mj in enumerate ( m ) : ( dev , deverr , n ) = calc_gradev_phase ( data , rate , mj , 1 , ci , noisetype ) # stride=1 for overlapping ADEV ad [ idx ] = dev ade_l [ idx ] = deverr [ 0 ] ade_h [ idx ] = deverr [ 1 ] adn [ idx ] = n # Note that errors are split in 2 arrays return remove_small_ns ( taus_used , ad , [ ade_l , ade_h ] , adn ) | gap resistant overlapping Allan deviation | 312 | 5 |
230,859 | def input_to_phase ( data , rate , data_type ) : if data_type == "phase" : return data elif data_type == "freq" : return frequency2phase ( data , rate ) else : raise Exception ( "unknown data_type: " + data_type ) | Take either phase or frequency as input and return phase | 64 | 10 |
230,860 | def trim_data ( x ) : # Find indices for first and last valid data first = 0 while np . isnan ( x [ first ] ) : first += 1 last = len ( x ) while np . isnan ( x [ last - 1 ] ) : last -= 1 return x [ first : last ] | Trim leading and trailing NaNs from dataset This is done by browsing the array from each end and store the index of the first non - NaN in each case the return the appropriate slice of the array | 65 | 41 |
230,861 | def three_cornered_hat_phase ( phasedata_ab , phasedata_bc , phasedata_ca , rate , taus , function ) : ( tau_ab , dev_ab , err_ab , ns_ab ) = function ( phasedata_ab , data_type = 'phase' , rate = rate , taus = taus ) ( tau_bc , dev_bc , err_bc , ns_bc ) = function ( phasedata_bc , data_type = 'phase' , rate = rate , taus = taus ) ( tau_ca , dev_ca , err_ca , ns_ca ) = function ( phasedata_ca , data_type = 'phase' , rate = rate , taus = taus ) var_ab = dev_ab * dev_ab var_bc = dev_bc * dev_bc var_ca = dev_ca * dev_ca assert len ( var_ab ) == len ( var_bc ) == len ( var_ca ) var_a = 0.5 * ( var_ab + var_ca - var_bc ) var_a [ var_a < 0 ] = 0 # don't return imaginary deviations (?) dev_a = np . sqrt ( var_a ) err_a = [ d / np . sqrt ( nn ) for ( d , nn ) in zip ( dev_a , ns_ab ) ] return tau_ab , dev_a , err_a , ns_ab | Three Cornered Hat Method | 325 | 5 |
230,862 | def frequency2phase ( freqdata , rate ) : dt = 1.0 / float ( rate ) # Protect against NaN values in input array (issue #60) # Reintroduces data trimming as in commit 503cb82 freqdata = trim_data ( freqdata ) phasedata = np . cumsum ( freqdata ) * dt phasedata = np . insert ( phasedata , 0 , 0 ) # FIXME: why do we do this? # so that phase starts at zero and len(phase)=len(freq)+1 ?? return phasedata | integrate fractional frequency data and output phase data | 124 | 10 |
230,863 | def phase2radians ( phasedata , v0 ) : fi = [ 2 * np . pi * v0 * xx for xx in phasedata ] return fi | Convert phase in seconds to phase in radians | 34 | 10 |
230,864 | def frequency2fractional ( frequency , mean_frequency = - 1 ) : if mean_frequency == - 1 : mu = np . mean ( frequency ) else : mu = mean_frequency y = [ ( x - mu ) / mu for x in frequency ] return y | Convert frequency in Hz to fractional frequency | 57 | 9 |
230,865 | def set_input ( self , data , rate = 1.0 , data_type = "phase" , taus = None ) : self . inp [ "data" ] = data self . inp [ "rate" ] = rate self . inp [ "data_type" ] = data_type self . inp [ "taus" ] = taus | Optionnal method if you chose not to set inputs on init | 79 | 13 |
230,866 | def compute ( self , function ) : try : func = getattr ( allantools , function ) except AttributeError : raise AttributeError ( "function must be defined in allantools" ) whitelisted = [ "theo1" , "mtie" , "tierms" ] if function [ - 3 : ] != "dev" and function not in whitelisted : # this should probably raise a custom exception type so # it's easier to distinguish from other bad things raise RuntimeError ( "function must be one of the 'dev' functions" ) result = func ( self . inp [ "data" ] , rate = self . inp [ "rate" ] , data_type = self . inp [ "data_type" ] , taus = self . inp [ "taus" ] ) keys = [ "taus" , "stat" , "stat_err" , "stat_n" ] result = { key : result [ i ] for i , key in enumerate ( keys ) } self . out = result . copy ( ) self . out [ "stat_id" ] = function return result | Evaluate the passed function with the supplied data . | 242 | 11 |
230,867 | def many_psds ( k = 2 , fs = 1.0 , b0 = 1.0 , N = 1024 ) : psd = [ ] for j in range ( k ) : print j x = noise . white ( N = 2 * 4096 , b0 = b0 , fs = fs ) f , tmp = noise . numpy_psd ( x , fs ) if j == 0 : psd = tmp else : psd = psd + tmp return f , psd / k | compute average of many PSDs | 106 | 7 |
230,868 | def list_my ( self ) : org_list = self . call_contract_command ( "Registry" , "listOrganizations" , [ ] ) rez_owner = [ ] rez_member = [ ] for idx , org_id in enumerate ( org_list ) : ( found , org_id , org_name , owner , members , serviceNames , repositoryNames ) = self . call_contract_command ( "Registry" , "getOrganizationById" , [ org_id ] ) if ( not found ) : raise Exception ( "Organization was removed during this call. Please retry." ) if self . ident . address == owner : rez_owner . append ( ( org_name , bytes32_to_str ( org_id ) ) ) if self . ident . address in members : rez_member . append ( ( org_name , bytes32_to_str ( org_id ) ) ) if ( rez_owner ) : self . _printout ( "# Organizations you are the owner of" ) self . _printout ( "# OrgName OrgId" ) for n , i in rez_owner : self . _printout ( "%s %s" % ( n , i ) ) if ( rez_member ) : self . _printout ( "# Organizations you are the member of" ) self . _printout ( "# OrgName OrgId" ) for n , i in rez_member : self . _printout ( "%s %s" % ( n , i ) ) | Find organization that has the current identity as the owner or as the member | 335 | 14 |
230,869 | def add_group ( self , group_name , payment_address ) : if ( self . is_group_name_exists ( group_name ) ) : raise Exception ( "the group \"%s\" is already present" % str ( group_name ) ) group_id_base64 = base64 . b64encode ( secrets . token_bytes ( 32 ) ) self . m [ "groups" ] += [ { "group_name" : group_name , "group_id" : group_id_base64 . decode ( "ascii" ) , "payment_address" : payment_address } ] return group_id_base64 | Return new group_id in base64 | 142 | 8 |
230,870 | def is_group_name_exists ( self , group_name ) : groups = self . m [ "groups" ] for g in groups : if ( g [ "group_name" ] == group_name ) : return True return False | check if group with given name is already exists | 52 | 9 |
230,871 | def get_group_name_nonetrick ( self , group_name = None ) : groups = self . m [ "groups" ] if ( len ( groups ) == 0 ) : raise Exception ( "Cannot find any groups in metadata" ) if ( not group_name ) : if ( len ( groups ) > 1 ) : raise Exception ( "We have more than one payment group in metadata, so group_name should be specified" ) return groups [ 0 ] [ "group_name" ] return group_name | In all getter function in case of single payment group group_name can be None | 110 | 17 |
230,872 | def get_from_ipfs_and_checkhash ( ipfs_client , ipfs_hash_base58 , validate = True ) : if validate : from snet_cli . resources . proto . unixfs_pb2 import Data from snet_cli . resources . proto . merckledag_pb2 import MerkleNode # No nice Python library to parse ipfs blocks, so do it ourselves. block_data = ipfs_client . block_get ( ipfs_hash_base58 ) mn = MerkleNode ( ) mn . ParseFromString ( block_data ) unixfs_data = Data ( ) unixfs_data . ParseFromString ( mn . Data ) assert unixfs_data . Type == unixfs_data . DataType . Value ( 'File' ) , "IPFS hash must be a file" data = unixfs_data . Data # multihash has a badly registered base58 codec, overwrite it... multihash . CodecReg . register ( 'base58' , base58 . b58encode , base58 . b58decode ) # create a multihash object from our ipfs hash mh = multihash . decode ( ipfs_hash_base58 . encode ( 'ascii' ) , 'base58' ) # Convenience method lets us directly use a multihash to verify data if not mh . verify ( block_data ) : raise Exception ( "IPFS hash mismatch with data" ) else : data = ipfs_client . cat ( ipfs_hash_base58 ) return data | Get file from ipfs We must check the hash becasue we cannot believe that ipfs_client wasn t been compromise | 349 | 25 |
230,873 | def hash_to_bytesuri ( s ) : # TODO: we should pad string with zeros till closest 32 bytes word because of a bug in processReceipt (in snet_cli.contract.process_receipt) s = "ipfs://" + s return s . encode ( "ascii" ) . ljust ( 32 * ( len ( s ) // 32 + 1 ) , b"\0" ) | Convert in and from bytes uri format used in Registry contract | 94 | 13 |
230,874 | def _get_stub_and_request_classes ( self , service_name ) : # Compile protobuf if needed codegen_dir = Path . home ( ) . joinpath ( ".snet" , "mpe_client" , "control_service" ) proto_dir = Path ( __file__ ) . absolute ( ) . parent . joinpath ( "resources" , "proto" ) if ( not codegen_dir . joinpath ( "control_service_pb2.py" ) . is_file ( ) ) : compile_proto ( proto_dir , codegen_dir , proto_file = "control_service.proto" ) stub_class , request_class , _ = import_protobuf_from_dir ( codegen_dir , service_name ) return stub_class , request_class | import protobuf and return stub and request class | 183 | 10 |
230,875 | def _start_claim_channels ( self , grpc_channel , channels_ids ) : unclaimed_payments = self . _call_GetListUnclaimed ( grpc_channel ) unclaimed_payments_dict = { p [ "channel_id" ] : p for p in unclaimed_payments } to_claim = [ ] for channel_id in channels_ids : if ( channel_id not in unclaimed_payments_dict or unclaimed_payments_dict [ channel_id ] [ "amount" ] == 0 ) : self . _printout ( "There is nothing to claim for channel %i, we skip it" % channel_id ) continue blockchain = self . _get_channel_state_from_blockchain ( channel_id ) if ( unclaimed_payments_dict [ channel_id ] [ "nonce" ] != blockchain [ "nonce" ] ) : self . _printout ( "Old payment for channel %i is still in progress. Please run claim for this channel later." % channel_id ) continue to_claim . append ( ( channel_id , blockchain [ "nonce" ] ) ) payments = [ self . _call_StartClaim ( grpc_channel , channel_id , nonce ) for channel_id , nonce in to_claim ] return payments | Safely run StartClaim for given channels | 287 | 8 |
230,876 | def _claim_in_progress_and_claim_channels ( self , grpc_channel , channels ) : # first we get the list of all 'payments in progress' in case we 'lost' some payments. payments = self . _call_GetListInProgress ( grpc_channel ) if ( len ( payments ) > 0 ) : self . _printout ( "There are %i payments in 'progress' (they haven't been claimed in blockchain). We will claim them." % len ( payments ) ) self . _blockchain_claim ( payments ) payments = self . _start_claim_channels ( grpc_channel , channels ) self . _blockchain_claim ( payments ) | Claim all pending payments in progress and after we claim given channels | 150 | 12 |
230,877 | def create_default_config ( self ) : # make config directory with the minimal possible permission self . _config_file . parent . mkdir ( mode = 0o700 , exist_ok = True ) self [ "network.kovan" ] = { "default_eth_rpc_endpoint" : "https://kovan.infura.io" , "default_gas_price" : "medium" } self [ "network.mainnet" ] = { "default_eth_rpc_endpoint" : "https://mainnet.infura.io" , "default_gas_price" : "medium" } self [ "network.ropsten" ] = { "default_eth_rpc_endpoint" : "https://ropsten.infura.io" , "default_gas_price" : "medium" } self [ "network.rinkeby" ] = { "default_eth_rpc_endpoint" : "https://rinkeby.infura.io" , "default_gas_price" : "medium" } self [ "ipfs" ] = { "default_ipfs_endpoint" : "http://ipfs.singularitynet.io:80" } self [ "session" ] = { "network" : "kovan" } self . _persist ( ) print ( "We've created configuration file with default values in: %s\n" % str ( self . _config_file ) ) | Create default configuration if config file does not exist | 320 | 9 |
230,878 | def switch_to_json_payload_encoding ( call_fn , response_class ) : def json_serializer ( * args , * * kwargs ) : return bytes ( json_format . MessageToJson ( args [ 0 ] , True , preserving_proto_field_name = True ) , "utf-8" ) def json_deserializer ( * args , * * kwargs ) : resp = response_class ( ) json_format . Parse ( args [ 0 ] , resp , True ) return resp call_fn . _request_serializer = json_serializer call_fn . _response_deserializer = json_deserializer | Switch payload encoding to JSON for GRPC call | 147 | 9 |
230,879 | def print_agi_and_mpe_balances ( self ) : if ( self . args . account ) : account = self . args . account else : account = self . ident . address eth_wei = self . w3 . eth . getBalance ( account ) agi_cogs = self . call_contract_command ( "SingularityNetToken" , "balanceOf" , [ account ] ) mpe_cogs = self . call_contract_command ( "MultiPartyEscrow" , "balances" , [ account ] ) # we cannot use _pprint here because it doesn't conserve order yet self . _printout ( " account: %s" % account ) self . _printout ( " ETH: %s" % self . w3 . fromWei ( eth_wei , 'ether' ) ) self . _printout ( " AGI: %s" % cogs2stragi ( agi_cogs ) ) self . _printout ( " MPE: %s" % cogs2stragi ( mpe_cogs ) ) | Print balance of ETH AGI and MPE wallet | 232 | 10 |
230,880 | def publish_proto_in_ipfs ( self ) : ipfs_hash_base58 = utils_ipfs . publish_proto_in_ipfs ( self . _get_ipfs_client ( ) , self . args . protodir ) self . _printout ( ipfs_hash_base58 ) | Publish proto files in ipfs and print hash | 72 | 10 |
230,881 | def publish_proto_metadata_update ( self ) : metadata = load_mpe_service_metadata ( self . args . metadata_file ) ipfs_hash_base58 = utils_ipfs . publish_proto_in_ipfs ( self . _get_ipfs_client ( ) , self . args . protodir ) metadata . set_simple_field ( "model_ipfs_hash" , ipfs_hash_base58 ) metadata . save_pretty ( self . args . metadata_file ) | Publish protobuf model in ipfs and update existing metadata file | 115 | 14 |
230,882 | def _get_persistent_mpe_dir ( self ) : mpe_address = self . get_mpe_address ( ) . lower ( ) registry_address = self . get_registry_address ( ) . lower ( ) return Path . home ( ) . joinpath ( ".snet" , "mpe_client" , "%s_%s" % ( mpe_address , registry_address ) ) | get persistent storage for mpe | 92 | 6 |
230,883 | def _check_mpe_address_metadata ( self , metadata ) : mpe_address = self . get_mpe_address ( ) if ( str ( mpe_address ) . lower ( ) != str ( metadata [ "mpe_address" ] ) . lower ( ) ) : raise Exception ( "MultiPartyEscrow contract address from metadata %s do not correspond to current MultiPartyEscrow address %s" % ( metadata [ "mpe_address" ] , mpe_address ) ) | we make sure that MultiPartyEscrow address from metadata is correct | 108 | 13 |
230,884 | def _init_or_update_registered_service_if_needed ( self ) : if ( self . is_service_initialized ( ) ) : old_reg = self . _read_service_info ( self . args . org_id , self . args . service_id ) # metadataURI will be in old_reg only for service which was initilized from registry (not from metadata) # we do nothing for services which were initilized from metadata if ( "metadataURI" not in old_reg ) : return service_registration = self . _get_service_registration ( ) # if metadataURI hasn't been changed we do nothing if ( not self . is_metadataURI_has_changed ( service_registration ) ) : return else : service_registration = self . _get_service_registration ( ) service_metadata = self . _get_service_metadata_from_registry ( ) self . _init_or_update_service_if_needed ( service_metadata , service_registration ) | similar to _init_or_update_service_if_needed but we get service_registraion from registry so we can update only registered services | 222 | 32 |
230,885 | def _smart_get_initialized_channel_for_service ( self , metadata , filter_by , is_try_initailize = True ) : channels = self . _get_initialized_channels_for_service ( self . args . org_id , self . args . service_id ) group_id = metadata . get_group_id ( self . args . group_name ) channels = [ c for c in channels if c [ filter_by ] . lower ( ) == self . ident . address . lower ( ) and c [ "groupId" ] == group_id ] if ( len ( channels ) == 0 and is_try_initailize ) : # this will work only in simple case where signer == sender self . _initialize_already_opened_channel ( metadata , self . ident . address , self . ident . address ) return self . _smart_get_initialized_channel_for_service ( metadata , filter_by , is_try_initailize = False ) if ( len ( channels ) == 0 ) : raise Exception ( "Cannot find initialized channel for service with org_id=%s service_id=%s and signer=%s" % ( self . args . org_id , self . args . service_id , self . ident . address ) ) if ( self . args . channel_id is None ) : if ( len ( channels ) > 1 ) : channel_ids = [ channel [ "channelId" ] for channel in channels ] raise Exception ( "We have several initialized channel: %s. You should use --channel-id to select one" % str ( channel_ids ) ) return channels [ 0 ] for channel in channels : if ( channel [ "channelId" ] == self . args . channel_id ) : return channel raise Exception ( "Channel %i has not been initialized or your are not the sender/signer of it" % self . args . channel_id ) | - filter_by can be sender or signer | 418 | 10 |
230,886 | def _get_all_filtered_channels ( self , topics_without_signature ) : mpe_address = self . get_mpe_address ( ) event_signature = self . ident . w3 . sha3 ( text = "ChannelOpen(uint256,uint256,address,address,address,bytes32,uint256,uint256)" ) . hex ( ) topics = [ event_signature ] + topics_without_signature logs = self . ident . w3 . eth . getLogs ( { "fromBlock" : self . args . from_block , "address" : mpe_address , "topics" : topics } ) abi = get_contract_def ( "MultiPartyEscrow" ) event_abi = abi_get_element_by_name ( abi , "ChannelOpen" ) channels_ids = [ get_event_data ( event_abi , l ) [ "args" ] [ "channelId" ] for l in logs ] return channels_ids | get all filtered chanels from blockchain logs | 222 | 9 |
230,887 | def list_repo ( self ) : req = proto . ListRepoRequest ( ) res = self . stub . ListRepo ( req , metadata = self . metadata ) if hasattr ( res , 'repo_info' ) : return res . repo_info return [ ] | Returns info about all Repos . | 60 | 7 |
230,888 | def delete_repo ( self , repo_name = None , force = False , all = False ) : if not all : if repo_name : req = proto . DeleteRepoRequest ( repo = proto . Repo ( name = repo_name ) , force = force ) self . stub . DeleteRepo ( req , metadata = self . metadata ) else : raise ValueError ( "Either a repo_name or all=True needs to be provided" ) else : if not repo_name : req = proto . DeleteRepoRequest ( force = force , all = all ) self . stub . DeleteRepo ( req , metadata = self . metadata ) else : raise ValueError ( "Cannot specify a repo_name if all=True" ) | Deletes a repo and reclaims the storage space it was using . | 158 | 14 |
230,889 | def start_commit ( self , repo_name , branch = None , parent = None , description = None ) : req = proto . StartCommitRequest ( parent = proto . Commit ( repo = proto . Repo ( name = repo_name ) , id = parent ) , branch = branch , description = description ) res = self . stub . StartCommit ( req , metadata = self . metadata ) return res | Begins the process of committing data to a Repo . Once started you can write to the Commit with PutFile and when all the data has been written you must finish the Commit with FinishCommit . NOTE data is not persisted until FinishCommit is called . A Commit object is returned . | 86 | 59 |
230,890 | def finish_commit ( self , commit ) : req = proto . FinishCommitRequest ( commit = commit_from ( commit ) ) res = self . stub . FinishCommit ( req , metadata = self . metadata ) return res | Ends the process of committing data to a Repo and persists the Commit . Once a Commit is finished the data becomes immutable and future attempts to write to it with PutFile will error . | 48 | 38 |
230,891 | def commit ( self , repo_name , branch = None , parent = None , description = None ) : commit = self . start_commit ( repo_name , branch , parent , description ) try : yield commit except Exception as e : print ( "An exception occurred during an open commit. " "Trying to finish it (Currently a commit can't be cancelled)" ) raise e finally : self . finish_commit ( commit ) | A context manager for doing stuff inside a commit . | 89 | 10 |
230,892 | def inspect_commit ( self , commit ) : req = proto . InspectCommitRequest ( commit = commit_from ( commit ) ) return self . stub . InspectCommit ( req , metadata = self . metadata ) | Returns info about a specific Commit . | 45 | 7 |
230,893 | def list_commit ( self , repo_name , to_commit = None , from_commit = None , number = 0 ) : req = proto . ListCommitRequest ( repo = proto . Repo ( name = repo_name ) , number = number ) if to_commit is not None : req . to . CopyFrom ( commit_from ( to_commit ) ) if from_commit is not None : getattr ( req , 'from' ) . CopyFrom ( commit_from ( from_commit ) ) res = self . stub . ListCommit ( req , metadata = self . metadata ) if hasattr ( res , 'commit_info' ) : return res . commit_info return [ ] | Gets a list of CommitInfo objects . | 149 | 9 |
230,894 | def delete_commit ( self , commit ) : req = proto . DeleteCommitRequest ( commit = commit_from ( commit ) ) self . stub . DeleteCommit ( req , metadata = self . metadata ) | Deletes a commit . | 44 | 5 |
230,895 | def flush_commit ( self , commits , repos = tuple ( ) ) : req = proto . FlushCommitRequest ( commit = [ commit_from ( c ) for c in commits ] , to_repo = [ proto . Repo ( name = r ) for r in repos ] ) res = self . stub . FlushCommit ( req , metadata = self . metadata ) return res | Blocks until all of the commits which have a set of commits as provenance have finished . For commits to be considered they must have all of the specified commits as provenance . This in effect waits for all of the jobs that are triggered by a set of commits to complete . It returns an error if any of the commits it s waiting on are cancelled due to one of the jobs encountering an error during runtime . Note that it s never necessary to call FlushCommit to run jobs they ll run no matter what FlushCommit just allows you to wait for them to complete and see their output once they do . This returns an iterator of CommitInfo objects . | 85 | 133 |
230,896 | def subscribe_commit ( self , repo_name , branch , from_commit_id = None ) : repo = proto . Repo ( name = repo_name ) req = proto . SubscribeCommitRequest ( repo = repo , branch = branch ) if from_commit_id is not None : getattr ( req , 'from' ) . CopyFrom ( proto . Commit ( repo = repo , id = from_commit_id ) ) res = self . stub . SubscribeCommit ( req , metadata = self . metadata ) return res | SubscribeCommit is like ListCommit but it keeps listening for commits as they come in . This returns an iterator Commit objects . | 112 | 26 |
230,897 | def list_branch ( self , repo_name ) : req = proto . ListBranchRequest ( repo = proto . Repo ( name = repo_name ) ) res = self . stub . ListBranch ( req , metadata = self . metadata ) if hasattr ( res , 'branch_info' ) : return res . branch_info return [ ] | Lists the active Branch objects on a Repo . | 77 | 11 |
230,898 | def set_branch ( self , commit , branch_name ) : res = proto . SetBranchRequest ( commit = commit_from ( commit ) , branch = branch_name ) self . stub . SetBranch ( res , metadata = self . metadata ) | Sets a commit and its ancestors as a branch . | 55 | 11 |
230,899 | def delete_branch ( self , repo_name , branch_name ) : res = proto . DeleteBranchRequest ( repo = Repo ( name = repo_name ) , branch = branch_name ) self . stub . DeleteBranch ( res , metadata = self . metadata ) | Deletes a branch but leaves the commits themselves intact . In other words those commits can still be accessed via commit IDs and other branches they happen to be on . | 60 | 32 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.