idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
44,600
def run_project ( project_directory : str , output_directory : str = None , logging_path : str = None , reader_path : str = None , reload_project_libraries : bool = False , ** kwargs ) -> ExecutionResult : from cauldron . cli import batcher return batcher . run_project ( project_directory = project_directory , output_directory = output_directory , log_path = logging_path , reader_path = reader_path , reload_project_libraries = reload_project_libraries , shared_data = kwargs )
Runs a project as a single command directly within the current Python interpreter .
44,601
def join ( self , timeout : float = None ) -> bool : try : self . thread . join ( timeout ) return True except AttributeError : return False
Joins on the thread associated with the response if it exists or just returns after a no - op if no thread exists to join .
44,602
def deserialize ( serial_data : dict ) -> 'Response' : r = Response ( serial_data . get ( 'id' ) ) r . data . update ( serial_data . get ( 'data' , { } ) ) r . ended = serial_data . get ( 'ended' , False ) r . failed = not serial_data . get ( 'success' , True ) def load_messages ( message_type : str ) : messages = [ ResponseMessage ( ** data ) for data in serial_data . get ( message_type , [ ] ) ] setattr ( r , message_type , getattr ( r , message_type ) + messages ) load_messages ( 'errors' ) load_messages ( 'warnings' ) load_messages ( 'messages' ) return r
Converts a serialized dictionary response to a Response object
44,603
def abort_thread ( ) : thread = threading . current_thread ( ) if not isinstance ( thread , CauldronThread ) : return if thread . is_executing and thread . abort : raise ThreadAbortError ( 'User Aborted Execution' )
This function checks to see if the user has indicated that they want the currently running execution to stop prematurely by marking the running thread as aborted . It only applies to operations that are run within CauldronThreads and not the main thread .
44,604
def is_running ( self ) -> bool : return ( self . _has_started and self . is_alive ( ) or self . completed_at is None or ( datetime . utcnow ( ) - self . completed_at ) . total_seconds ( ) < 0.5 )
Specifies whether or not the thread is running
44,605
def run ( self ) : async def run_command ( ) : try : self . result = self . command ( context = self . context , ** self . kwargs ) except Exception as error : self . exception = error print ( error ) import traceback traceback . print_exc ( ) import sys self . context . response . fail ( code = 'COMMAND_EXECUTION_ERROR' , message = 'Failed to execute command due to internal error' , error = error ) . console ( whitespace = 1 ) self . _has_started = True self . _loop = asyncio . new_event_loop ( ) self . _loop . run_until_complete ( run_command ( ) ) self . _loop . close ( ) self . _loop = None self . completed_at = datetime . utcnow ( )
Executes the Cauldron command in a thread to prevent long - running computations from locking the main Cauldron thread which is needed to serve and print status information .
44,606
def abort_running ( self ) -> bool : if not self . _loop : return False try : self . _loop . stop ( ) return True except Exception : return False finally : self . completed_at = datetime . utcnow ( )
Executes a hard abort by shutting down the event loop in this thread in which the running command was operating . This is carried out using the asyncio library to prevent the stopped execution from destabilizing the Python environment .
44,607
def from_request ( request = None ) -> dict : request = request if request else flask_request try : json_args = request . get_json ( silent = True ) except Exception : json_args = None try : get_args = request . values except Exception : get_args = None arg_sources = list ( filter ( lambda arg : arg is not None , [ json_args , get_args , { } ] ) ) return arg_sources [ 0 ]
Fetches the arguments for the current Flask application request
44,608
def rule_str2rule ( rule_str , prob ) : if rule_str == "default" : return Rule ( [ ] , prob ) raw_rules = rule_str [ 1 : - 1 ] . split ( "," ) clauses = [ ] for raw_rule in raw_rules : idx = raw_rule . find ( "=" ) if idx == - 1 : raise ValueError ( "No \"=\" find in the rule!" ) clauses . append ( Clause ( int ( raw_rule [ 1 : idx ] ) , int ( raw_rule [ ( idx + 1 ) : ] ) ) ) return Rule ( clauses , prob )
A helper function that converts the resulting string returned from C function to the Rule object
44,609
def put ( self , * args , ** kwargs ) -> 'SharedCache' : environ . abort_thread ( ) index = 0 while index < ( len ( args ) - 1 ) : key = args [ index ] value = args [ index + 1 ] self . _shared_cache_data [ key ] = value index += 2 for key , value in kwargs . items ( ) : if value is None and key in self . _shared_cache_data : del self . _shared_cache_data [ key ] else : self . _shared_cache_data [ key ] = value return self
Adds one or more variables to the cache .
44,610
def grab ( self , * keys : typing . List [ str ] , default_value = None ) -> typing . Tuple : return tuple ( [ self . fetch ( k , default_value ) for k in keys ] )
Returns a tuple containing multiple values from the cache specified by the keys arguments
44,611
def fetch ( self , key : typing . Union [ str , None ] , default_value = None ) : environ . abort_thread ( ) if key is None : return self . _shared_cache_data return self . _shared_cache_data . get ( key , default_value )
Retrieves the value of the specified variable from the cache
44,612
def train_sbrl ( data_file , label_file , lambda_ = 20 , eta = 2 , max_iters = 300000 , n_chains = 20 , alpha = 1 , seed = None , verbose = 0 ) : if isinstance ( alpha , int ) : alphas = np . array ( [ alpha ] , dtype = np . int32 ) elif isinstance ( alpha , list ) : for a in alpha : assert isinstance ( a , int ) alphas = np . array ( alpha , dtype = np . int32 ) else : raise ValueError ( 'the argument alpha can only be int or List[int]' ) if seed is None : seed = - 1 if not os . path . isfile ( data_file ) : raise FileNotFoundError ( 'data file %s does not exists!' % data_file ) if not os . path . isfile ( label_file ) : raise FileNotFoundError ( 'label file %s does not exists!' % label_file ) return _train ( data_file , label_file , lambda_ , eta , max_iters , n_chains , alphas , seed , verbose )
The basic training function of the scalable bayesian rule list . Users are suggested to use SBRL instead of this function . It takes the paths of the pre - processed data and label files as input and return the parameters of the trained rule list .
44,613
def parse ( self , line ) : tree = list ( self . parser . raw_parse ( line ) ) [ 0 ] tree = tree [ 0 ] return tree
Returns tree objects from a sentence
44,614
def pack_chunk ( source_data : bytes ) -> str : if not source_data : return '' chunk_compressed = zlib . compress ( source_data ) return binascii . b2a_base64 ( chunk_compressed ) . decode ( 'utf-8' )
Packs the specified binary source data by compressing it with the Zlib library and then converting the bytes to a base64 encoded string for non - binary transmission .
44,615
def unpack_chunk ( chunk_data : str ) -> bytes : if not chunk_data : return b'' chunk_compressed = binascii . a2b_base64 ( chunk_data . encode ( 'utf-8' ) ) return zlib . decompress ( chunk_compressed )
Unpacks a previously packed chunk data back into the original bytes representation
44,616
def get_file_chunk_count ( file_path : str , chunk_size : int = DEFAULT_CHUNK_SIZE ) -> int : if not os . path . exists ( file_path ) : return 0 file_size = os . path . getsize ( file_path ) return max ( 1 , int ( math . ceil ( file_size / chunk_size ) ) )
Determines the number of chunks necessary to send the file for the given chunk size
44,617
def read_file_chunks ( file_path : str , chunk_size : int = DEFAULT_CHUNK_SIZE ) -> bytes : chunk_count = get_file_chunk_count ( file_path , chunk_size ) if chunk_count < 1 : return '' with open ( file_path , mode = 'rb' ) as fp : for chunk_index in range ( chunk_count ) : source = fp . read ( chunk_size ) chunk = pack_chunk ( source ) yield chunk
Reads the specified file in chunks and returns a generator where each returned chunk is a compressed base64 encoded string for sync transmission
44,618
def write_file_chunk ( file_path : str , packed_chunk : str , append : bool = True , offset : int = - 1 ) : mode = 'ab' if append else 'wb' contents = unpack_chunk ( packed_chunk ) writer . write_file ( file_path , contents , mode = mode , offset = offset )
Write or append the specified chunk data to the given file path unpacking the chunk before writing . If the file does not yet exist it will be created . Set the append argument to False if you do not want the chunk to be appended to an existing file .
44,619
def add_output_path ( path : str = None ) -> str : cleaned = paths . clean ( path or os . getcwd ( ) ) if cleaned not in _logging_paths : _logging_paths . append ( cleaned ) return cleaned
Adds the specified path to the output logging paths if it is not already in the listed paths .
44,620
def remove_output_path ( path : str = None ) -> str : cleaned = paths . clean ( path or os . getcwd ( ) ) if cleaned in _logging_paths : _logging_paths . remove ( path ) return cleaned
Removes the specified path from the output logging paths if it is in the listed paths .
44,621
def log ( message : typing . Union [ str , typing . List [ str ] ] , whitespace : int = 0 , whitespace_top : int = 0 , whitespace_bottom : int = 0 , indent_by : int = 0 , trace : bool = True , file_path : str = None , append_to_file : bool = True , ** kwargs ) -> str : m = add_to_message ( message ) for key , value in kwargs . items ( ) : m . append ( '{key}: {value}' . format ( key = key , value = value ) ) pre_whitespace = int ( max ( whitespace , whitespace_top ) ) post_whitespace = int ( max ( whitespace , whitespace_bottom ) ) if pre_whitespace : m . insert ( 0 , max ( 0 , pre_whitespace - 1 ) * '\n' ) if post_whitespace : m . append ( max ( 0 , post_whitespace - 1 ) * '\n' ) message = indent ( '\n' . join ( m ) , ' ' * indent_by ) raw ( message = message , trace = trace , file_path = file_path , append_to_file = append_to_file ) return message
Logs a message to the console with the formatting support beyond a simple print statement or logger statement .
44,622
def add_to_message ( data , indent_level = 0 ) -> list : message = [ ] if isinstance ( data , str ) : message . append ( indent ( dedent ( data . strip ( '\n' ) ) . strip ( ) , indent_level * ' ' ) ) return message for line in data : offset = 0 if isinstance ( line , str ) else 1 message += add_to_message ( line , indent_level + offset ) return message
Adds data to the message object
44,623
def add ( mode_id : str ) -> list : if not has ( mode_id ) : _current_modes . append ( mode_id ) return _current_modes . copy ( )
Adds the specified mode identifier to the list of active modes and returns a copy of the currently active modes list .
44,624
def remove ( mode_id : str ) -> bool : had_mode = has ( mode_id ) if had_mode : _current_modes . remove ( mode_id ) return had_mode
Removes the specified mode identifier from the active modes and returns whether or not a remove operation was carried out . If the mode identifier is not in the currently active modes it does need to be removed .
44,625
def clone ( client ) : kwargs = client . redis . connection_pool . connection_kwargs kwargs [ 'parser_class' ] = redis . connection . PythonParser pool = redis . connection . ConnectionPool ( ** kwargs ) return redis . Redis ( connection_pool = pool )
Clone the redis client to be slowlog - compatible
44,626
def pretty ( timings , label ) : results = [ ( sum ( values ) , len ( values ) , key ) for key , values in timings . items ( ) ] print ( label ) print ( '=' * 65 ) print ( '%20s => %13s | %8s | %13s' % ( 'Command' , 'Average' , '# Calls' , 'Total time' ) ) print ( '-' * 65 ) for total , length , key in sorted ( results , reverse = True ) : print ( '%20s => %10.5f us | %8i | %10i us' % ( key , float ( total ) / length , length , total ) )
Print timing stats
44,627
def start ( self ) : self . _configs = self . _client . config_get ( 'slow-*' ) self . _client . config_set ( 'slowlog-max-len' , 100000 ) self . _client . config_set ( 'slowlog-log-slower-than' , 0 ) self . _client . execute_command ( 'slowlog' , 'reset' )
Get ready for a profiling run
44,628
def stop ( self ) : for key , value in self . _configs . items ( ) : self . _client . config_set ( key , value ) logs = self . _client . execute_command ( 'slowlog' , 'get' , 100000 ) current = { 'name' : None , 'accumulated' : defaultdict ( list ) } for _ , _ , duration , request in logs : command = request [ 0 ] if command == 'slowlog' : continue if 'eval' in command . lower ( ) : subcommand = request [ 3 ] self . _timings [ 'qless-%s' % subcommand ] . append ( duration ) if current [ 'name' ] : if current [ 'name' ] not in self . _commands : self . _commands [ current [ 'name' ] ] = defaultdict ( list ) for key , values in current [ 'accumulated' ] . items ( ) : self . _commands [ current [ 'name' ] ] [ key ] . extend ( values ) current = { 'name' : subcommand , 'accumulated' : defaultdict ( list ) } else : self . _timings [ command ] . append ( duration ) if current [ 'name' ] : current [ 'accumulated' ] [ command ] . append ( duration ) if current [ 'name' ] : if current [ 'name' ] not in self . _commands : self . _commands [ current [ 'name' ] ] = defaultdict ( list ) for key , values in current [ 'accumulated' ] . items ( ) : self . _commands [ current [ 'name' ] ] [ key ] . extend ( values )
Set everything back to normal and collect our data
44,629
def display ( self ) : self . pretty ( self . _timings , 'Raw Redis Commands' ) print ( ) for key , value in self . _commands . items ( ) : self . pretty ( value , 'Qless "%s" Command' % key ) print ( )
Print the results of this profiling
44,630
def add_shell_action ( sub_parser : ArgumentParser ) -> ArgumentParser : sub_parser . add_argument ( '-p' , '--project' , dest = 'project_directory' , type = str , default = None ) sub_parser . add_argument ( '-l' , '--log' , dest = 'logging_path' , type = str , default = None ) sub_parser . add_argument ( '-o' , '--output' , dest = 'output_directory' , type = str , default = None ) sub_parser . add_argument ( '-s' , '--shared' , dest = 'shared_data_path' , type = str , default = None ) return sub_parser
Populates the sub parser with the shell arguments
44,631
def running ( self , offset = 0 , count = 25 ) : return self . client ( 'jobs' , 'running' , self . name , offset , count )
Return all the currently - running jobs
44,632
def stalled ( self , offset = 0 , count = 25 ) : return self . client ( 'jobs' , 'stalled' , self . name , offset , count )
Return all the currently - stalled jobs
44,633
def scheduled ( self , offset = 0 , count = 25 ) : return self . client ( 'jobs' , 'scheduled' , self . name , offset , count )
Return all the currently - scheduled jobs
44,634
def depends ( self , offset = 0 , count = 25 ) : return self . client ( 'jobs' , 'depends' , self . name , offset , count )
Return all the currently dependent jobs
44,635
def recurring ( self , offset = 0 , count = 25 ) : return self . client ( 'jobs' , 'recurring' , self . name , offset , count )
Return all the recurring jobs
44,636
def class_string ( self , klass ) : if isinstance ( klass , string_types ) : return klass return klass . __module__ + '.' + klass . __name__
Return a string representative of the class
44,637
def put ( self , klass , data , priority = None , tags = None , delay = None , retries = None , jid = None , depends = None ) : return self . client ( 'put' , self . worker_name , self . name , jid or uuid . uuid4 ( ) . hex , self . class_string ( klass ) , json . dumps ( data ) , delay or 0 , 'priority' , priority or 0 , 'tags' , json . dumps ( tags or [ ] ) , 'retries' , retries or 5 , 'depends' , json . dumps ( depends or [ ] ) )
Either create a new job in the provided queue with the provided attributes or move that job into that queue . If the job is being serviced by a worker subsequent attempts by that worker to either heartbeat or complete the job should fail and return false .
44,638
def recur ( self , klass , data , interval , offset = 0 , priority = None , tags = None , retries = None , jid = None ) : return self . client ( 'recur' , self . name , jid or uuid . uuid4 ( ) . hex , self . class_string ( klass ) , json . dumps ( data ) , 'interval' , interval , offset , 'priority' , priority or 0 , 'tags' , json . dumps ( tags or [ ] ) , 'retries' , retries or 5 )
Place a recurring job in this queue
44,639
def pop ( self , count = None ) : results = [ Job ( self . client , ** job ) for job in json . loads ( self . client ( 'pop' , self . name , self . worker_name , count or 1 ) ) ] if count is None : return ( len ( results ) and results [ 0 ] ) or None return results
Passing in the queue from which to pull items the current time when the locks for these returned items should expire and the number of items to be popped off .
44,640
def peek ( self , count = None ) : results = [ Job ( self . client , ** rec ) for rec in json . loads ( self . client ( 'peek' , self . name , count or 1 ) ) ] if count is None : return ( len ( results ) and results [ 0 ] ) or None return results
Similar to the pop command except that it merely peeks at the next items
44,641
def run ( project : 'projects.Project' , step : 'projects.ProjectStep' ) -> dict : with open ( step . source_path , 'r' ) as f : code = f . read ( ) try : cauldron . display . markdown ( code , ** project . shared . fetch ( None ) ) return { 'success' : True } except Exception as err : return dict ( success = False , html_message = templating . render_template ( 'markdown-error.html' , error = err ) )
Runs the markdown file and renders the contents to the notebook display
44,642
def fetch ( reload : bool = False ) -> dict : if len ( list ( COMMANDS . keys ( ) ) ) > 0 and not reload : return COMMANDS COMMANDS . clear ( ) for key in dir ( commands ) : e = getattr ( commands , key ) if e and hasattr ( e , 'NAME' ) and hasattr ( e , 'DESCRIPTION' ) : COMMANDS [ e . NAME ] = e return dict ( COMMANDS . items ( ) )
Returns a dictionary containing all of the available Cauldron commands currently registered . This data is cached for performance . Unless the reload argument is set to True the command list will only be generated the first time this function is called .
44,643
def get_command_from_module ( command_module , remote_connection : environ . RemoteConnection ) : use_remote = ( remote_connection . active and hasattr ( command_module , 'execute_remote' ) ) return ( command_module . execute_remote if use_remote else command_module . execute )
Returns the execution command to use for the specified module which may be different depending upon remote connection
44,644
def _import ( klass ) : mod = __import__ ( klass . rpartition ( '.' ) [ 0 ] ) for segment in klass . split ( '.' ) [ 1 : - 1 ] : mod = getattr ( mod , segment ) if klass not in BaseJob . _loaded : BaseJob . _loaded [ klass ] = time . time ( ) if hasattr ( mod , '__file__' ) : try : mtime = os . stat ( mod . __file__ ) . st_mtime if BaseJob . _loaded [ klass ] < mtime : mod = reload_module ( mod ) except OSError : logger . warn ( 'Could not check modification time of %s' , mod . __file__ ) return getattr ( mod , klass . rpartition ( '.' ) [ 2 ] )
1 ) Get a reference to the module 2 ) Check the file that module s imported from 3 ) If that file s been updated force a reload of that module return it
44,645
def process ( self ) : try : method = getattr ( self . klass , self . queue_name , getattr ( self . klass , 'process' , None ) ) except Exception as exc : logger . exception ( 'Failed to import %s' , self . klass_name ) return self . fail ( self . queue_name + '-' + exc . __class__ . __name__ , 'Failed to import %s' % self . klass_name ) if method : if isinstance ( method , types . FunctionType ) : try : logger . info ( 'Processing %s in %s' , self . jid , self . queue_name ) method ( self ) logger . info ( 'Completed %s in %s' , self . jid , self . queue_name ) except Exception as exc : logger . exception ( 'Failed %s in %s: %s' , self . jid , self . queue_name , repr ( method ) ) self . fail ( self . queue_name + '-' + exc . __class__ . __name__ , traceback . format_exc ( ) ) else : logger . error ( 'Failed %s in %s : %s is not static' , self . jid , self . queue_name , repr ( method ) ) self . fail ( self . queue_name + '-method-type' , repr ( method ) + ' is not static' ) else : logger . error ( 'Failed %s : %s is missing a method "%s" or "process"' , self . jid , self . klass_name , self . queue_name ) self . fail ( self . queue_name + '-method-missing' , self . klass_name + ' is missing a method "' + self . queue_name + '" or "process"' )
Load the module containing your class and run the appropriate method . For example if this job was popped from the queue testing then this would invoke the testing staticmethod of your class .
44,646
def move ( self , queue , delay = 0 , depends = None ) : logger . info ( 'Moving %s to %s from %s' , self . jid , queue , self . queue_name ) return self . client ( 'put' , self . worker_name , queue , self . jid , self . klass_name , json . dumps ( self . data ) , delay , 'depends' , json . dumps ( depends or [ ] ) )
Move this job out of its existing state and into another queue . If a worker has been given this job then that worker s attempts to heartbeat that job will fail . Like Queue . put this accepts a delay and dependencies
44,647
def complete ( self , nextq = None , delay = None , depends = None ) : if nextq : logger . info ( 'Advancing %s to %s from %s' , self . jid , nextq , self . queue_name ) return self . client ( 'complete' , self . jid , self . client . worker_name , self . queue_name , json . dumps ( self . data ) , 'next' , nextq , 'delay' , delay or 0 , 'depends' , json . dumps ( depends or [ ] ) ) or False else : logger . info ( 'Completing %s' , self . jid ) return self . client ( 'complete' , self . jid , self . client . worker_name , self . queue_name , json . dumps ( self . data ) ) or False
Turn this job in as complete optionally advancing it to another queue . Like Queue . put and move it accepts a delay and dependencies
44,648
def heartbeat ( self ) : logger . debug ( 'Heartbeating %s (ttl = %s)' , self . jid , self . ttl ) try : self . expires_at = float ( self . client ( 'heartbeat' , self . jid , self . client . worker_name , json . dumps ( self . data ) ) or 0 ) except QlessException : raise LostLockException ( self . jid ) logger . debug ( 'Heartbeated %s (ttl = %s)' , self . jid , self . ttl ) return self . expires_at
Renew the heartbeat if possible and optionally update the job s user data .
44,649
def fail ( self , group , message ) : logger . warn ( 'Failing %s (%s): %s' , self . jid , group , message ) return self . client ( 'fail' , self . jid , self . client . worker_name , group , message , json . dumps ( self . data ) ) or False
Mark the particular job as failed with the provided type and a more specific message . By type we mean some phrase that might be one of several categorical modes of failure . The message is something more job - specific like perhaps a traceback .
44,650
def retry ( self , delay = 0 , group = None , message = None ) : args = [ 'retry' , self . jid , self . queue_name , self . worker_name , delay ] if group is not None and message is not None : args . append ( group ) args . append ( message ) return self . client ( * args )
Retry this job in a little bit in the same queue . This is meant for the times when you detect a transient failure yourself
44,651
def has_extension ( file_path : str , * args : typing . Tuple [ str ] ) -> bool : def add_dot ( extension ) : return ( extension if extension . startswith ( '.' ) else '.{}' . format ( extension ) ) return any ( [ file_path . endswith ( add_dot ( extension ) ) for extension in args ] )
Checks to see if the given file path ends with any of the specified file extensions . If a file extension does not begin with a . it will be added automatically
44,652
def get_docstring ( target ) -> str : raw = getattr ( target , '__doc__' ) if raw is None : return '' return textwrap . dedent ( raw )
Retrieves the documentation string from the target object and returns it after removing insignificant whitespace
44,653
def get_doc_entries ( target : typing . Callable ) -> list : raw = get_docstring ( target ) if not raw : return [ ] raw_lines = [ line . strip ( ) for line in raw . replace ( '\r' , '' ) . split ( '\n' ) ] def compactify ( compacted : list , entry : str ) -> list : chars = entry . strip ( ) if not chars : return compacted if len ( compacted ) < 1 or chars . startswith ( ':' ) : compacted . append ( entry . rstrip ( ) ) else : compacted [ - 1 ] = '{}\n{}' . format ( compacted [ - 1 ] , entry . rstrip ( ) ) return compacted return [ textwrap . dedent ( block ) . strip ( ) for block in functools . reduce ( compactify , raw_lines , [ ] ) ]
Gets the lines of documentation from the given target which are formatted so that each line is a documentation entry .
44,654
def parse_function ( name : str , target : typing . Callable ) -> typing . Union [ None , dict ] : if not hasattr ( target , '__code__' ) : return None lines = get_doc_entries ( target ) docs = ' ' . join ( filter ( lambda line : not line . startswith ( ':' ) , lines ) ) params = parse_params ( target , lines ) returns = parse_returns ( target , lines ) return dict ( name = getattr ( target , '__name__' ) , doc = docs , params = params , returns = returns )
Parses the documentation for a function which is specified by the name of the function and the function itself .
44,655
def read_all ( self ) -> str : try : buffered_bytes = self . bytes_buffer . getvalue ( ) if buffered_bytes is None : return '' return buffered_bytes . decode ( self . source_encoding ) except Exception as err : return 'Redirect Buffer Error: {}' . format ( err )
Reads the current state of the buffer and returns a string those contents
44,656
def create_data ( step : 'projects.ProjectStep' ) -> STEP_DATA : return STEP_DATA ( name = step . definition . name , status = step . status ( ) , has_error = False , body = None , data = dict ( ) , includes = [ ] , cauldron_version = list ( environ . version_info ) , file_writes = [ ] )
Creates the data object that stores the step information in the notebook results JavaScript file .
44,657
def get_cached_data ( step : 'projects.ProjectStep' ) -> typing . Union [ None , STEP_DATA ] : cache_path = step . report . results_cache_path if not os . path . exists ( cache_path ) : return None out = create_data ( step ) try : with open ( cache_path , 'r' ) as f : cached_data = json . load ( f ) except Exception : return None file_writes = [ file_io . entry_from_dict ( fw ) for fw in cached_data [ 'file_writes' ] ] return out . _replace ( ** cached_data ) . _replace ( file_writes = file_writes )
Attempts to load and return the cached step data for the specified step . If not cached data exists or the cached data is corrupt a None value is returned instead .
44,658
def initialize_logging_path ( path : str = None ) -> str : path = environ . paths . clean ( path if path else '.' ) if os . path . isdir ( path ) and os . path . exists ( path ) : path = os . path . join ( path , 'cauldron_run.log' ) elif os . path . exists ( path ) : os . remove ( path ) directory = os . path . dirname ( path ) if not os . path . exists ( directory ) : os . makedirs ( directory ) return path
Initializes the logging path for running the project . If no logging path is specified the current directory will be used instead .
44,659
def stop ( self , sig = signal . SIGINT ) : for cpid in self . sandboxes : logger . warn ( 'Stopping %i...' % cpid ) try : os . kill ( cpid , sig ) except OSError : logger . exception ( 'Error stopping %s...' % cpid ) for cpid in list ( self . sandboxes ) : try : logger . info ( 'Waiting for %i...' % cpid ) pid , status = os . waitpid ( cpid , 0 ) logger . warn ( '%i stopped with status %i' % ( pid , status >> 8 ) ) except OSError : logger . exception ( 'Error waiting for %i...' % cpid ) finally : self . sandboxes . pop ( cpid , None )
Stop all the workers and then wait for them
44,660
def spawn ( self , ** kwargs ) : copy = dict ( self . kwargs ) copy . update ( kwargs ) if isinstance ( self . klass , string_types ) : self . klass = util . import_class ( self . klass ) return self . klass ( self . queues , self . client , ** copy )
Return a new worker for a child process
44,661
def run ( self ) : self . signals ( ( 'TERM' , 'INT' , 'QUIT' ) ) resume = self . divide ( self . resume , self . count ) for index in range ( self . count ) : sandbox = os . path . join ( os . getcwd ( ) , 'qless-py-workers' , 'sandbox-%s' % index ) cpid = os . fork ( ) if cpid : logger . info ( 'Spawned worker %i' % cpid ) self . sandboxes [ cpid ] = sandbox else : with Worker . sandbox ( sandbox ) : os . chdir ( sandbox ) try : self . spawn ( resume = resume [ index ] , sandbox = sandbox ) . run ( ) except : logger . exception ( 'Exception in spawned worker' ) finally : os . _exit ( 0 ) try : while not self . shutdown : pid , status = os . wait ( ) logger . warn ( 'Worker %i died with status %i from signal %i' % ( pid , status >> 8 , status & 0xff ) ) sandbox = self . sandboxes . pop ( pid ) cpid = os . fork ( ) if cpid : logger . info ( 'Spawned replacement worker %i' % cpid ) self . sandboxes [ cpid ] = sandbox else : with Worker . sandbox ( sandbox ) : os . chdir ( sandbox ) try : self . spawn ( sandbox = sandbox ) . run ( ) except : logger . exception ( 'Exception in spawned worker' ) finally : os . _exit ( 0 ) finally : self . stop ( signal . SIGKILL )
Run this worker
44,662
def get_stack_frames ( error_stack : bool = True ) -> list : cauldron_path = environ . paths . package ( ) resources_path = environ . paths . resources ( ) frames = ( list ( traceback . extract_tb ( sys . exc_info ( ) [ - 1 ] ) ) if error_stack else traceback . extract_stack ( ) ) . copy ( ) def is_cauldron_code ( test_filename : str ) -> bool : if not test_filename or not test_filename . startswith ( cauldron_path ) : return False if test_filename . startswith ( resources_path ) : return False return True while len ( frames ) > 1 and is_cauldron_code ( frames [ 0 ] . filename ) : frames . pop ( 0 ) return frames
Returns a list of the current stack frames which are pruned focus on the Cauldron code where the relevant information resides .
44,663
def format_stack_frame ( stack_frame , project : 'projects.Project' ) -> dict : filename = stack_frame . filename if filename . startswith ( project . source_directory ) : filename = filename [ len ( project . source_directory ) + 1 : ] location = stack_frame . name if location == '<module>' : location = None return dict ( filename = filename , location = location , line_number = stack_frame . lineno , line = stack_frame . line )
Formats a raw stack frame into a dictionary formatted for render templating and enriched with information from the currently open project .
44,664
def arg_type_to_string ( arg_type ) -> str : union_params = ( getattr ( arg_type , '__union_params__' , None ) or getattr ( arg_type , '__args__' , None ) ) if union_params and isinstance ( union_params , ( list , tuple ) ) : return ', ' . join ( [ arg_type_to_string ( item ) for item in union_params ] ) try : return arg_type . __name__ except AttributeError : return '{}' . format ( arg_type )
Converts the argument type to a string
44,665
def merge_components ( * components : typing . List [ typing . Union [ list , tuple , COMPONENT ] ] ) -> COMPONENT : flat_components = functools . reduce ( flatten_reducer , components , [ ] ) return COMPONENT ( includes = functools . reduce ( functools . partial ( combine_lists_reducer , 'includes' ) , flat_components , [ ] ) , files = functools . reduce ( functools . partial ( combine_lists_reducer , 'files' ) , flat_components , [ ] ) )
Merges multiple COMPONENT instances into a single one by merging the lists of includes and files . Has support for elements of the components arguments list to be lists or tuples of COMPONENT instances as well .
44,666
def flatten_reducer ( flattened_list : list , entry : typing . Union [ list , tuple , COMPONENT ] ) -> list : if hasattr ( entry , 'includes' ) and hasattr ( entry , 'files' ) : flattened_list . append ( entry ) elif entry : flattened_list . extend ( entry ) return flattened_list
Flattens a list of COMPONENT instances to remove any lists or tuples of COMPONENTS contained within the list
44,667
def combine_lists_reducer ( key : str , merged_list : list , component : COMPONENT ) -> list : merged_list . extend ( getattr ( component , key ) ) return merged_list
Reducer function to combine the lists for the specified key into a single flat list
44,668
def listen ( self ) : try : self . _pubsub . subscribe ( self . _channels ) for message in self . _pubsub . listen ( ) : if message [ 'type' ] == 'message' : yield message finally : self . _channels = [ ]
Listen for events as they come in
44,669
def thread ( self ) : thread = threading . Thread ( target = self . listen ) thread . start ( ) try : yield self finally : self . unlisten ( ) thread . join ( )
Run in a thread
44,670
def listen ( self ) : for message in Listener . listen ( self ) : logger . debug ( 'Message: %s' , message ) channel = message [ 'channel' ] [ len ( self . namespace ) : ] func = self . _callbacks . get ( channel ) if func : func ( message [ 'data' ] )
Listen for events
44,671
def on ( self , evt , func ) : if evt not in self . _callbacks : raise NotImplementedError ( 'callback "%s"' % evt ) else : self . _callbacks [ evt ] = func
Set a callback handler for a pubsub event
44,672
def get ( self , option , default = None ) : val = self [ option ] return ( val is None and default ) or val
Get a particular option or the default if it s missing
44,673
def pop ( self , option , default = None ) : val = self [ option ] del self [ option ] return ( val is None and default ) or val
Just like dict . pop
44,674
def update ( self , other = ( ) , ** kwargs ) : _kwargs = dict ( kwargs ) _kwargs . update ( other ) for key , value in _kwargs . items ( ) : self [ key ] = value
Just like dict . update
44,675
def touch_project ( ) : r = Response ( ) project = cd . project . get_internal_project ( ) if project : project . refresh ( ) else : r . fail ( code = 'NO_PROJECT' , message = 'No open project to refresh' ) return r . update ( sync_time = sync_status . get ( 'time' , 0 ) ) . flask_serialize ( )
Touches the project to trigger refreshing its cauldron . json state .
44,676
def fetch_synchronize_status ( ) : r = Response ( ) project = cd . project . get_internal_project ( ) if not project : r . fail ( code = 'NO_PROJECT' , message = 'No open project on which to retrieve status' ) else : with open ( project . source_path , 'r' ) as f : definition = json . load ( f ) result = status . of_project ( project ) r . update ( sync_time = sync_status . get ( 'time' , 0 ) , source_directory = project . source_directory , remote_source_directory = project . remote_source_directory , status = result , definition = definition ) return r . flask_serialize ( )
Returns the synchronization status information for the currently opened project
44,677
def download_file ( filename : str ) : project = cd . project . get_internal_project ( ) source_directory = project . source_directory if project else None if not filename or not project or not source_directory : return '' , 204 path = os . path . realpath ( os . path . join ( source_directory , '..' , '__cauldron_downloads' , filename ) ) if not os . path . exists ( path ) : return '' , 204 return flask . send_file ( path , mimetype = mimetypes . guess_type ( path ) [ 0 ] )
downloads the specified project file if it exists
44,678
def get_project_source_path ( path : str ) -> str : path = environ . paths . clean ( path ) if not path . endswith ( 'cauldron.json' ) : return os . path . join ( path , 'cauldron.json' ) return path
Converts the given path into a project source path to the cauldron . json file . If the path already points to a cauldron . json file the path is returned without modification .
44,679
def load_project_definition ( path : str ) -> dict : source_path = get_project_source_path ( path ) if not os . path . exists ( source_path ) : raise FileNotFoundError ( 'Missing project file: {}' . format ( source_path ) ) with open ( source_path , 'r' ) as f : out = json . load ( f ) project_folder = os . path . split ( os . path . dirname ( source_path ) ) [ - 1 ] if 'id' not in out or not out [ 'id' ] : out [ 'id' ] = project_folder return out
Load the cauldron . json project definition file for the given path . The path can be either a source path to the cauldron . json file or the source directory where a cauldron . json file resides .
44,680
def simplify_path ( path : str , path_prefixes : list = None ) -> str : test_path = '{}' . format ( path if path else '' ) replacements = ( path_prefixes if path_prefixes else [ ] ) . copy ( ) replacements . append ( ( '~' , os . path . expanduser ( '~' ) ) ) for key , value in replacements : if test_path . startswith ( value ) : return '{}{}' . format ( key , test_path [ len ( value ) : ] ) return test_path
Simplifies package paths by replacing path prefixes with values specified in the replacements list
44,681
def module_to_package_data ( name : str , entry , path_prefixes : list = None ) -> typing . Union [ dict , None ] : if name . find ( '.' ) > - 1 : return None version = getattr ( entry , '__version__' , None ) version = version if not hasattr ( version , 'version' ) else version . version location = getattr ( entry , '__file__' , sys . exec_prefix ) if version is None or location . startswith ( sys . exec_prefix ) : return None return dict ( name = name , version = version , location = simplify_path ( location , path_prefixes ) )
Converts a module entry into a package data dictionary with information about the module . including version and location on disk
44,682
def get_system_data ( ) -> typing . Union [ None , dict ] : site_packages = get_site_packages ( ) path_prefixes = [ ( '[SP]' , p ) for p in site_packages ] path_prefixes . append ( ( '[CORE]' , sys . exec_prefix ) ) packages = [ module_to_package_data ( name , entry , path_prefixes ) for name , entry in list ( sys . modules . items ( ) ) ] python_data = dict ( version = list ( sys . version_info ) , executable = simplify_path ( sys . executable ) , directory = simplify_path ( sys . exec_prefix ) , site_packages = [ simplify_path ( sp ) for sp in site_packages ] ) return dict ( python = python_data , packages = [ p for p in packages if p is not None ] )
Returns information about the system in which Cauldron is running . If the information cannot be found None is returned instead .
44,683
def remove ( path : str , max_retries : int = 3 ) -> bool : if not path : return False if not os . path . exists ( path ) : return True remover = os . remove if os . path . isfile ( path ) else shutil . rmtree for attempt in range ( max_retries ) : try : remover ( path ) return True except Exception : time . sleep ( 0.02 ) return False
Removes the specified path from the local filesystem if it exists . Directories will be removed along with all files and folders within them as well as files .
44,684
def end ( code : int ) : print ( '\n' ) if code != 0 : log ( 'Failed with status code: {}' . format ( code ) , whitespace = 1 ) sys . exit ( code )
Ends the application with the specified error code adding whitespace to the end of the console log output for clarity
44,685
def folder ( self ) -> typing . Union [ str , None ] : if 'folder' in self . data : return self . data . get ( 'folder' ) elif self . project_folder : if callable ( self . project_folder ) : return self . project_folder ( ) else : return self . project_folder return None
The folder relative to the project source_directory where the file resides
44,686
def render_stop_display ( step : 'projects.ProjectStep' , message : str ) : stack = render_stack . get_formatted_stack_frame ( project = step . project , error_stack = False ) try : names = [ frame [ 'filename' ] for frame in stack ] index = names . index ( os . path . realpath ( __file__ ) ) frame = stack [ index - 1 ] except Exception : frame = { } stop_message = ( '{}' . format ( message ) if message else 'This step was explicitly stopped prior to its completion' ) dom = templating . render_template ( 'step-stop.html' , message = stop_message , frame = frame ) step . report . append_body ( dom )
Renders a stop action to the Cauldron display .
44,687
def id ( self ) -> typing . Union [ str , None ] : return self . _project . id if self . _project else None
Identifier for the project .
44,688
def display ( self ) -> typing . Union [ None , report . Report ] : return ( self . _project . current_step . report if self . _project and self . _project . current_step else None )
The display report for the current project .
44,689
def shared ( self ) -> typing . Union [ None , SharedCache ] : return self . _project . shared if self . _project else None
The shared display object associated with this project .
44,690
def settings ( self ) -> typing . Union [ None , SharedCache ] : return self . _project . settings if self . _project else None
The settings associated with this project .
44,691
def title ( self ) -> typing . Union [ None , str ] : return self . _project . title if self . _project else None
The title of this project .
44,692
def title ( self , value : typing . Union [ None , str ] ) : if not self . _project : raise RuntimeError ( 'Failed to assign title to an unloaded project' ) self . _project . title = value
Modifies the title of the project which is initially loaded from the cauldron . json file .
44,693
def load ( self , project : typing . Union [ projects . Project , None ] ) : self . _project = project
Connects this object to the specified source project .
44,694
def path ( self , * args : typing . List [ str ] ) -> typing . Union [ None , str ] : if not self . _project : return None return environ . paths . clean ( os . path . join ( self . _project . source_directory , * args ) )
Creates an absolute path in the project source directory from the relative path components .
44,695
def stop ( self , message : str = None , silent : bool = False ) : me = self . get_internal_project ( ) if not me or not me . current_step : return if not silent : render_stop_display ( me . current_step , message ) raise UserAbortError ( halt = True )
Stops the execution of the project at the current step immediately without raising an error . Use this to abort running the project in situations where some critical branching action should prevent the project from continuing to run .
44,696
def get_internal_project ( self , timeout : float = 1 ) -> typing . Union [ 'projects.Project' , None ] : count = int ( timeout / 0.1 ) for _ in range ( count ) : project = self . internal_project if project : return project time . sleep ( 0.1 ) return self . internal_project
Attempts to return the internally loaded project . This function prevents race condition issues where projects are loaded via threads because the internal loop will try to continuously load the internal project until it is available or until the timeout is reached .
44,697
def _step ( self ) -> typing . Union [ None , 'projects.ProjectStep' ] : import cauldron try : return cauldron . project . get_internal_project ( ) . current_step except Exception : return None
Internal access to the source step . Should not be used outside of Cauldron development .
44,698
def stop ( self , message : str = None , silent : bool = False , halt : bool = False ) : step = self . _step if not step : return if not silent : render_stop_display ( step , message ) raise UserAbortError ( halt = halt )
Stops the execution of the current step immediately without raising an error . Use this to abort the step running process if you want to return early .
44,699
def write_to_console ( self , message : str ) : if not self . _step : raise ValueError ( 'Cannot write to the console stdout on an uninitialized step' ) interceptor = self . _step . report . stdout_interceptor interceptor . write_source ( '{}' . format ( message ) )
Writes the specified message to the console stdout without including it in the notebook display .