idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
241,400
def extract_pattern ( pattern : str , ifiles : List [ str ] ) -> Dict [ str , any ] : res = glob_wildcards ( pattern , [ ] ) for ifile in ifiles : matched = glob_wildcards ( pattern , [ ifile ] ) for key in matched . keys ( ) : if not matched [ key ] : #env.logger.warning('Filename {} does not match pattern {}. None returned.'.format(ifile, pattern)) res [ key ] . append ( None ) else : res [ key ] . extend ( matched [ key ] ) return res
This function match pattern to a list of input files extract and return pieces of filenames as a list of variables with keys defined by pattern .
127
29
241,401
def expand_pattern ( pattern : str ) -> List [ str ] : ofiles = [ ] sz = None res = glob_wildcards ( pattern , [ ] ) sz = None wildcard = [ { } ] for key in res . keys ( ) : if key not in env . sos_dict : raise ValueError ( f'Undefined variable {key} in pattern {pattern}' ) if not isinstance ( env . sos_dict [ key ] , str ) and isinstance ( env . sos_dict [ key ] , collections . Sequence ) : if sz is None : sz = len ( env . sos_dict [ key ] ) wildcard = [ copy . deepcopy ( wildcard [ 0 ] ) for x in range ( sz ) ] elif sz != len ( env . sos_dict [ key ] ) : raise ValueError ( f'Variables in output pattern should have the same length (other={sz}, len({key})={len(env.sos_dict[key])})' ) for idx , value in enumerate ( env . sos_dict [ key ] ) : wildcard [ idx ] [ key ] = value else : for v in wildcard : v [ key ] = env . sos_dict [ key ] # for card in wildcard : ofiles . append ( apply_wildcards ( pattern , card , fill_missing = False , fail_dynamic = False , dynamic_fill = None , keep_dynamic = False ) ) return ofiles
This function expand patterns against the current namespace and return a list of filenames
330
16
241,402
def interpolate ( text , global_dict = None , local_dict = None ) : # step 1, make it a f-string (add quotation marks and f # step 2, evaluate as a string try : return eval ( as_fstring ( text ) , global_dict , local_dict ) except Exception as e : raise ValueError ( f'Failed to interpolate {text}: {e}' )
Evaluate expressions in text
88
6
241,403
def SoS_eval ( expr : str , extra_dict : dict = { } ) -> Any : return eval ( expr , env . sos_dict . dict ( ) , extra_dict )
Evaluate an expression with sos dict .
42
10
241,404
def SoS_exec ( script : str , _dict : dict = None , return_result : bool = True ) -> None : if _dict is None : _dict = env . sos_dict . dict ( ) if not return_result : exec ( compile ( script , filename = stmtHash . hash ( script ) , mode = 'exec' ) , _dict ) return None try : stmts = list ( ast . iter_child_nodes ( ast . parse ( script ) ) ) if not stmts : return if isinstance ( stmts [ - 1 ] , ast . Expr ) : # the last one is an expression and we will try to return the results # so we first execute the previous statements if len ( stmts ) > 1 : exec ( compile ( ast . Module ( body = stmts [ : - 1 ] ) , filename = stmtHash . hash ( script ) , mode = "exec" ) , _dict ) # then we eval the last one res = eval ( compile ( ast . Expression ( body = stmts [ - 1 ] . value ) , filename = stmtHash . hash ( script ) , mode = "eval" ) , _dict ) else : # otherwise we just execute the entire code exec ( compile ( script , filename = stmtHash . hash ( script ) , mode = 'exec' ) , _dict ) res = None except SyntaxError as e : raise SyntaxError ( f"Invalid code {script}: {e}" ) # if check_readonly: # env.sos_dict.check_readonly_vars() return res
Execute a statement .
345
5
241,405
def expand_depends_files ( * args , * * kwargs ) : args = [ x . resolve ( ) if isinstance ( x , dynamic ) else x for x in args ] kwargs = { x : ( y . resolve ( ) if isinstance ( y , dynamic ) else y ) for x , y in kwargs . items ( ) } return sos_targets ( * args , * * kwargs , _verify_existence = True , _undetermined = False , _source = env . sos_dict [ 'step_name' ] )
handle directive depends
126
3
241,406
def wait_for_subworkflows ( self , workflow_results ) : wf_ids = sum ( [ x [ 'pending_workflows' ] for x in workflow_results ] , [ ] ) for wf_id in wf_ids : # here we did not check if workflow ids match yield self . socket res = self . socket . recv_pyobj ( ) if res is None : sys . exit ( 0 ) elif isinstance ( res , Exception ) : raise res
Wait for results from subworkflows
107
7
241,407
def Rmarkdown ( script = None , input = None , output = None , args = '{input:r}, output_file={output:ar}' , * * kwargs ) : if not R_library ( 'rmarkdown' ) . target_exists ( ) : raise RuntimeError ( 'Library rmarkdown does not exist' ) input = sos_targets ( collect_input ( script , input ) ) output = sos_targets ( output ) if len ( output ) == 0 : write_to_stdout = True output = sos_targets ( tempfile . NamedTemporaryFile ( mode = 'w+t' , suffix = '.html' , delete = False ) . name ) else : write_to_stdout = False # ret = 1 try : # render(input, output_format = NULL, output_file = NULL, output_dir = NULL, # output_options = NULL, intermediates_dir = NULL, # runtime = c("auto", "static", "shiny"), # clean = TRUE, params = NULL, knit_meta = NULL, envir = parent.frame(), # run_Rmarkdown = TRUE, quiet = FALSE, encoding = getOption("encoding")) cmd = interpolate ( f'Rscript -e "rmarkdown::render({args})"' , { 'input' : input , 'output' : output } ) if 'ACTION' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] : env . log_to_file ( 'ACTION' , f'Running command "{cmd}"' ) if env . config [ 'run_mode' ] == 'interactive' : # need to catch output and send to python output, which will in trun be hijacked by SoS notebook p = subprocess . Popen ( cmd , shell = True , stderr = subprocess . PIPE , stdout = subprocess . PIPE ) #pid = p.pid out , err = p . communicate ( ) sys . stdout . write ( out . decode ( ) ) sys . stderr . write ( err . decode ( ) ) ret = p . returncode else : p = subprocess . Popen ( cmd , shell = True ) #pid = p.pid ret = p . wait ( ) except Exception as e : env . logger . error ( e ) if ret != 0 : temp_file = os . path . join ( '.sos' , f'{"Rmarkdown"}_{os.getpid()}.md' ) shutil . copyfile ( str ( input ) , temp_file ) cmd = interpolate ( f'Rscript -e "rmarkdown::render({args})"' , { 'input' : input , 'output' : sos_targets ( temp_file ) } ) raise RuntimeError ( f'Failed to execute script. Please use command \n"{cmd}"\nunder {os.getcwd()} to test it.' ) if write_to_stdout : with open ( str ( output [ 0 ] ) ) as out : sys . stdout . write ( out . read ( ) ) else : env . logger . info ( f'Report saved to {output}' )
Convert input file to output using Rmarkdown
713
10
241,408
def total_memory ( self , image = 'ubuntu' ) : try : ret = subprocess . check_output ( f'''docker run -t {image} cat /proc/meminfo | grep MemTotal''' , shell = True , stdin = subprocess . DEVNULL ) # ret: MemTotal: 30208916 kB self . tot_mem = int ( ret . split ( ) [ 1 ] ) except Exception : # some system does not have cat or grep self . tot_mem = None return self . tot_mem
Get the available ram fo the docker machine in Kb
116
11
241,409
def script ( script , interpreter = '' , suffix = '' , args = '' , * * kwargs ) : return SoS_ExecuteScript ( script , interpreter , suffix , args ) . run ( * * kwargs )
Execute specified script using specified interpreter . This action accepts common action arguments such as input active workdir docker_image and args . In particular content of one or more files specified by option input would be prepended before the specified script .
49
47
241,410
def stop_if ( expr , msg = '' , no_output = False ) : if expr : raise StopInputGroup ( msg = msg , keep_output = not no_output ) return 0
Abort the execution of the current step or loop and yield an warning message msg if expr is False
41
20
241,411
def download ( URLs , dest_dir = '.' , dest_file = None , decompress = False , max_jobs = 5 ) : if env . config [ 'run_mode' ] == 'dryrun' : print ( f'HINT: download\n{URLs}\n' ) return None if isinstance ( URLs , str ) : urls = [ x . strip ( ) for x in URLs . split ( ) if x . strip ( ) ] else : urls = list ( URLs ) if not urls : env . logger . debug ( f'No download URL specified: {URLs}' ) return # if dest_file is not None and len ( urls ) != 1 : raise RuntimeError ( 'Only one URL is allowed if a destination file is specified.' ) # if dest_file is None : filenames = [ ] for idx , url in enumerate ( urls ) : token = urllib . parse . urlparse ( url ) # if no scheme or netloc, the URL is not acceptable if not all ( [ getattr ( token , qualifying_attr ) for qualifying_attr in ( 'scheme' , 'netloc' ) ] ) : raise ValueError ( f'Invalid URL {url}' ) filename = os . path . split ( token . path ) [ - 1 ] if not filename : raise ValueError ( f'Cannot determine destination file for {url}' ) filenames . append ( os . path . join ( dest_dir , filename ) ) else : token = urllib . parse . urlparse ( urls [ 0 ] ) if not all ( [ getattr ( token , qualifying_attr ) for qualifying_attr in ( 'scheme' , 'netloc' ) ] ) : raise ValueError ( f'Invalid URL {url}' ) filenames = [ dest_file ] # succ = [ ( False , None ) for x in urls ] with ProcessPoolExecutor ( max_workers = max_jobs ) as executor : for idx , ( url , filename ) in enumerate ( zip ( urls , filenames ) ) : # if there is alot, start download succ [ idx ] = executor . submit ( downloadURL , url , filename , decompress , idx ) succ = [ x . result ( ) for x in succ ] # for su, url in zip(succ, urls): # if not su: # env.logger.warning('Failed to download {}'.format(url)) failed = [ y for x , y in zip ( succ , urls ) if not x ] if failed : if len ( urls ) == 1 : raise RuntimeError ( 'Failed to download {urls[0]}' ) else : raise RuntimeError ( f'Failed to download {failed[0]} ({len(failed)} out of {len(urls)})' ) return 0
Download files from specified URL which should be space tab or newline separated URLs . The files will be downloaded to specified destination . If filename . md5 files are downloaded they are used to validate downloaded filename . Unless otherwise specified compressed files are decompressed . If max_jobs is given a maximum of max_jobs concurrent download jobs will be used for each domain . This restriction applies to domain names and will be applied to multiple download instances .
620
87
241,412
def run ( script , args = '' , * * kwargs ) : if sys . platform == 'win32' : # in the case there is no interpreter, we put the script # at first (this is the case for windows) # and we donot add default args. interpreter = '' else : # if there is a shebang line, we ... if not script . startswith ( '#!' ) : interpreter = '/bin/bash' if not args : args = '-ev {filename:q}' else : # execute script directly interpreter = '' return SoS_ExecuteScript ( script , interpreter , '' , args ) . run ( * * kwargs )
Execute specified script using bash . This action accepts common action arguments such as input active workdir docker_image and args . In particular content of one or more files specified by option input would be prepended before the specified script .
144
46
241,413
def get_changed_vars ( section : SoS_Step ) : if 'shared' not in section . options : return set ( ) changed_vars = set ( ) svars = section . options [ 'shared' ] if isinstance ( svars , str ) : changed_vars . add ( svars ) svars = { svars : svars } elif isinstance ( svars , Sequence ) : for item in svars : if isinstance ( item , str ) : changed_vars . add ( item ) elif isinstance ( item , Mapping ) : changed_vars |= set ( item . keys ( ) ) else : raise ValueError ( f'Option shared should be a string, a mapping of expression, or list of string or mappings. {svars} provided' ) elif isinstance ( svars , Mapping ) : changed_vars |= set ( svars . keys ( ) ) else : raise ValueError ( f'Option shared should be a string, a mapping of expression, or list of string or mappings. {svars} provided' ) return changed_vars
changed vars are variables that are shared and therefore provides to others
241
13
241,414
def get_all_used_vars ( section ) : all_used_vars = set ( ) for statement in section . statements : if statement [ 0 ] == '=' : all_used_vars |= accessed_vars ( '=' . join ( statement [ 1 : 3 ] ) ) elif statement [ 0 ] == '!' : all_used_vars |= accessed_vars ( statement [ 1 ] ) elif statement [ 0 ] == ':' : all_used_vars |= accessed_vars ( statement [ 2 ] , mode = 'eval' ) if statement [ 1 ] != 'input' : continue if 'paired_with' in statement [ 2 ] : try : pws = get_names_of_param ( 'paired_with' , statement [ 2 ] , extra_dict = env . sos_dict . dict ( ) ) all_used_vars |= set ( pws ) except Exception as e : raise ValueError ( f'Failed to parse parameter paired_with: {e}' ) if 'group_with' in statement [ 2 ] : try : pws = get_names_of_param ( 'group_with' , statement [ 2 ] , extra_dict = env . sos_dict . dict ( ) ) all_used_vars |= set ( pws ) except Exception as e : raise ValueError ( f'Failed to parse parameter group_with: {e}' ) if 'for_each' in statement [ 2 ] : try : pws = get_names_of_param ( 'for_each' , statement [ 2 ] , extra_dict = env . sos_dict . dict ( ) ) for pw in pws : all_used_vars |= set ( pw . split ( ',' ) ) except Exception as e : raise ValueError ( f'Failed to parse parameter for_each: {e}' ) if section . task : all_used_vars |= accessed_vars ( section . task ) # now we have a list of global variables that are actually used in the functions # this is specifically designed to handle the last case in #1225 func_with_vars = [ y for x , y in used_in_func ( section . global_stmts ) . items ( ) if x in all_used_vars ] return set . union ( all_used_vars , * func_with_vars )
Get variables which are variables used by input statement and statements before it
534
13
241,415
def get_signature_vars ( section ) : # signature vars should contain parameters defined in global section # #1155 signature_vars = set ( section . parameters . keys ( ) & accessed_vars ( strip_param_defs ( section . global_stmts ) ) ) input_idx = find_statement ( section , 'input' ) after_input_idx = 0 if input_idx is None else input_idx + 1 for statement in section . statements [ after_input_idx : ] : if statement [ 0 ] == '=' : signature_vars |= accessed_vars ( '=' . join ( statement [ 1 : 3 ] ) ) elif statement [ 0 ] == '!' : signature_vars |= accessed_vars ( statement [ 1 ] ) # finally, tasks.. if section . task : signature_vars |= accessed_vars ( section . task ) return { x for x in signature_vars if not x . startswith ( '__' ) }
Get signature variables which are variables that will be saved with step signatures
224
13
241,416
def get_step_input ( section , default_input ) : step_input : sos_targets = sos_targets ( ) dynamic_input = True # look for input statement. input_idx = find_statement ( section , 'input' ) if input_idx is None : return step_input , dynamic_input # input statement stmt = section . statements [ input_idx ] [ 2 ] try : svars = [ 'output_from' , 'named_output' , 'sos_step' , 'sos_variable' ] old_values = { x : env . sos_dict . dict ( ) [ x ] for x in svars if x in env . sos_dict . dict ( ) } env . sos_dict . quick_update ( { 'output_from' : lambda * args , * * kwargs : None , 'named_output' : lambda * args , * * kwargs : None , 'traced' : lambda * args , * * kwargs : sos_targets ( * args , * * kwargs ) , 'sos_step' : no_sos_step , 'sos_variable' : no_sos_variable , } ) args , kwargs = SoS_eval ( f'__null_func__({stmt})' , extra_dict = env . sos_dict . dict ( ) ) if not args : if default_input is None : step_input = sos_targets ( ) else : step_input = default_input elif not any ( isinstance ( x , ( dynamic , remote ) ) for x in args ) : step_input = sos_targets ( * args ) except SyntaxError : raise except Exception as e : # if anything is not evalutable, keep Undetermined env . logger . debug ( f'Input of step {section.name if section.index is None else f"{section.name}_{section.index}"} is set to Undertermined: {e}' ) # expression ... step_input = sos_targets ( _undetermined = stmt ) finally : [ env . sos_dict . dict ( ) . pop ( x ) for x in svars ] env . sos_dict . quick_update ( old_values ) return step_input , dynamic_input
Find step input
518
3
241,417
def get_step_output ( section , default_output ) : step_output : sos_targets = sos_targets ( ) # if 'provides' in section . options and default_output : step_output = default_output # look for input statement. output_idx = find_statement ( section , 'output' ) if output_idx is None : return step_output # output statement value = section . statements [ output_idx ] [ 2 ] # output, depends, and process can be processed multiple times try : svars = [ 'output_from' , 'named_output' , 'sos_step' , 'sos_variable' ] old_values = { x : env . sos_dict . dict ( ) [ x ] for x in svars if x in env . sos_dict . dict ( ) } env . sos_dict . quick_update ( { 'output_from' : no_output_from , 'named_output' : no_named_output , 'sos_step' : no_sos_step , 'sos_variable' : no_sos_variable , } ) args , kwargs = SoS_eval ( f'__null_func__({value})' , extra_dict = env . sos_dict . dict ( ) ) if not any ( isinstance ( x , ( dynamic , remote ) ) for x in args ) : step_output = sos_targets ( * args , * * { x : y for x , y in kwargs . items ( ) if x not in SOS_TARGETS_OPTIONS } ) except SyntaxError : raise except Exception as e : if 'STEP' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] : env . log_to_file ( 'STEP' , f"Args {value} cannot be determined: {e}" ) finally : [ env . sos_dict . dict ( ) . pop ( x ) for x in svars ] env . sos_dict . quick_update ( old_values ) if 'provides' in section . options and default_output is not None and step_output . valid ( ) : for out in default_output : # 981 if not isinstance ( out , sos_step ) and out not in step_output : raise ValueError ( f'Defined output fail to produce expected output: {step_output} generated, {default_output} expected.' ) return step_output
determine step output
558
5
241,418
def analyze_section ( section : SoS_Step , default_input : Optional [ sos_targets ] = None , default_output : Optional [ sos_targets ] = None , context = { } , vars_and_output_only : bool = False ) -> Dict [ str , Any ] : # analysis_key = (section.md5, section.step_name(), # default_input.target_name() if hasattr(default_input, 'target_name') else '', # default_output.target_name() if hasattr(default_output, 'target_name') else '', vars_and_output_only) #if analysis_key in analysis_cache: # return analysis_cache[analysis_key] # use a fresh env for analysis new_env , old_env = env . request_new ( ) try : prepare_env ( section . global_def , section . global_vars , context ) env . sos_dict . set ( 'step_name' , section . step_name ( ) ) env . sos_dict . set ( '__null_func__' , __null_func__ ) if 'STEP' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] : env . log_to_file ( 'STEP' , f'Analyzing {section.step_name()} {"(output only)" if vars_and_output_only else ""}' ) res = { 'step_name' : section . step_name ( ) , 'step_output' : get_step_output ( section , default_output ) , # variables starting with __ are internals... 'environ_vars' : get_environ_vars ( section ) , 'signature_vars' : get_signature_vars ( section ) , 'changed_vars' : get_changed_vars ( section ) } if not vars_and_output_only : inps = get_step_input ( section , default_input ) res [ 'step_input' ] = inps [ 0 ] res [ 'dynamic_input' ] = inps [ 1 ] deps = get_step_depends ( section ) res [ 'step_depends' ] = deps [ 0 ] res [ 'dynamic_depends' ] = deps [ 1 ] # analysis_cache[analysis_key] = res finally : # restore env env . restore_to_old ( new_env , old_env ) # #1225 # The global section can contain a lot of variables, some of which can be large. Here we # found all variables that will be used in the step, including ones used in substep (signature_vars) # and ones that will be used in input statement etc. section . global_vars = { x : y for x , y in section . global_vars . items ( ) if x in get_all_used_vars ( section ) } return res
Analyze a section for how it uses input and output what variables it uses and input output etc .
666
20
241,419
def extract_workflow ( notebook ) : if isinstance ( notebook , str ) : nb = nbformat . read ( notebook , nbformat . NO_CONVERT ) else : nb = notebook cells = nb . cells content = '#!/usr/bin/env sos-runner\n#fileformat=SOS1.0\n\n' for cell in cells : if cell . cell_type != "code" : continue # Non-sos code cells are also ignored if 'kernel' in cell . metadata and cell . metadata [ 'kernel' ] not in ( 'sos' , 'SoS' , None ) : continue lines = cell . source . split ( '\n' ) valid_cell = False for idx , line in enumerate ( lines ) : if valid_cell or ( line . startswith ( '%include' ) or line . startswith ( '%from' ) ) : content += line + '\n' elif SOS_SECTION_HEADER . match ( line ) : valid_cell = True # look retrospectively for comments c = idx - 1 comment = '' while c >= 0 and lines [ c ] . startswith ( '#' ) : comment = lines [ c ] + '\n' + comment c -= 1 content += comment + line + '\n' if valid_cell : content += '\n' return content
Extract workflow from a notebook file or notebook JSON instance
303
11
241,420
def vim_ipython_is_open ( ) : for w in vim . windows : if w . buffer . name is not None and w . buffer . name . endswith ( "vim-ipython" ) : return True return False
Helper function to let us know if the vim - ipython shell is currently visible
51
16
241,421
def with_subchannel ( f , * args ) : def f_with_update ( * args ) : try : f ( * args ) if monitor_subchannel : update_subchannel_msgs ( force = True ) except AttributeError : #if kc is None echo ( "not connected to IPython" , 'Error' ) return f_with_update
conditionally monitor subchannel
79
5
241,422
def set_pid ( ) : global pid lines = '\n' . join ( [ 'import os' , '_pid = os.getpid()' ] ) try : msg_id = send ( lines , silent = True , user_variables = [ '_pid' ] ) except TypeError : # change in IPython 3.0+ msg_id = send ( lines , silent = True , user_expressions = { '_pid' : '_pid' } ) # wait to get message back from kernel try : child = get_child_msg ( msg_id ) except Empty : echo ( "no reply from IPython kernel" ) return try : pid = int ( child [ 'content' ] [ 'user_variables' ] [ '_pid' ] ) except TypeError : # change in IPython 1.0.dev moved this out pid = int ( child [ 'content' ] [ 'user_variables' ] [ '_pid' ] [ 'data' ] [ 'text/plain' ] ) except KeyError : # change in IPython 3.0+ pid = int ( child [ 'content' ] [ 'user_expressions' ] [ '_pid' ] [ 'data' ] [ 'text/plain' ] ) except KeyError : # change in IPython 1.0.dev moved this out echo ( "Could not get PID information, kernel not running Python?" ) return pid
Explicitly ask the ipython kernel for its pid
306
11
241,423
def fetchmany ( self , size = - 1 ) : self . _cursorLock . acquire ( ) # default value (or just checking that someone did not put a ridiculous size) if size < 0 or size > self . MAX_BLOCK_SIZE : size = self . arraysize recs = [ ] for i in range ( 0 , size ) : recs . append ( self . fetchone ( ) ) self . _cursorLock . release ( ) return recs
return a sequential set of records . This is guaranteed by locking so that no other thread can grab a few records while a set is fetched . this has the side effect that other threads may have to wait for an arbitrary long time for the completion of the current request .
100
54
241,424
def on_number ( self , ctx , value ) : value = int ( value ) if value . isdigit ( ) else float ( value ) top = self . _stack [ - 1 ] if top is JSONCompositeType . OBJECT : self . fire ( JSONStreamer . VALUE_EVENT , value ) elif top is JSONCompositeType . ARRAY : self . fire ( JSONStreamer . ELEMENT_EVENT , value ) else : raise RuntimeError ( 'Invalid json-streamer state' )
Since this is defined both integer and double callbacks are useless
111
12
241,425
def close ( self ) : self . fire ( JSONStreamer . DOC_END_EVENT ) self . _stack = None self . _parser . close ( )
Closes the streamer which causes a DOC_END_EVENT to be fired and frees up memory used by yajl
34
27
241,426
async def minizinc ( mzn , * dzn_files , args = None , data = None , include = None , stdlib_dir = None , globals_dir = None , declare_enums = True , allow_multiple_assignments = False , keep = False , output_vars = None , output_base = None , output_mode = 'dict' , solver = None , timeout = None , two_pass = None , pre_passes = None , output_objective = False , non_unique = False , all_solutions = False , num_solutions = None , free_search = False , parallel = None , seed = None , rebase_arrays = True , keep_solutions = True , return_enums = False , max_queue_size = 0 , * * kwargs ) : mzn_file , dzn_files , data_file , data , keep , _output_mode , types = _minizinc_preliminaries ( mzn , * dzn_files , args = args , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_vars = output_vars , keep = keep , output_base = output_base , output_mode = output_mode , declare_enums = declare_enums , allow_multiple_assignments = allow_multiple_assignments ) if not solver : solver = config . get ( 'solver' , gecode ) solver_args = { * * kwargs , * * config . get ( 'solver_args' , { } ) } proc = await solve ( solver , mzn_file , * dzn_files , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_mode = _output_mode , timeout = timeout , two_pass = two_pass , pre_passes = pre_passes , output_objective = output_objective , non_unique = non_unique , all_solutions = all_solutions , num_solutions = num_solutions , free_search = free_search , parallel = parallel , seed = seed , allow_multiple_assignments = allow_multiple_assignments , * * solver_args ) if output_mode == 'raw' : solns = asyncio . Queue ( maxsize = max_queue_size ) task = asyncio . create_task ( _collect ( proc , solns ) ) else : parser = AsyncSolutionParser ( solver , output_mode = output_mode , rebase_arrays = rebase_arrays , types = types , keep_solutions = keep_solutions , return_enums = return_enums , max_queue_size = max_queue_size ) solns = await parser . parse ( proc ) task = parser . parse_task if not keep : task . add_done_callback ( partial ( _cleanup_cb , [ mzn_file , data_file ] ) ) return solns
Coroutine version of the pymzn . minizinc function .
687
14
241,427
def parse_value ( val , var_type = None , enums = None , rebase_arrays = True ) : if not var_type : p_val = _parse_array ( val , rebase_arrays = rebase_arrays , enums = enums , raise_errors = False ) if p_val is not None : return p_val return _parse_val ( val , enums = enums ) if 'dims' in var_type : return _parse_array ( val , rebase_arrays = rebase_arrays , var_type = var_type , enums = enums ) return _parse_val ( val , var_type = var_type , enums = enums )
Parses the value of a dzn statement .
160
11
241,428
def dzn2dict ( dzn , * , rebase_arrays = True , types = None , return_enums = False ) : dzn_ext = os . path . splitext ( dzn ) [ 1 ] if dzn_ext == '.dzn' : with open ( dzn ) as f : dzn = f . read ( ) var_types = None if types : var_types = { } for var , var_type in types . items ( ) : if isinstance ( var_type , str ) : var_types [ var ] = _to_var_type ( var , var_type ) elif isinstance ( var_type , dict ) : var_types [ var ] = var_type else : err = 'Type of variable {} must be a string or a dict.' raise ValueError ( err . format ( var ) ) enum_types = None if var_types : enum_types = [ ] for var , var_type in var_types . items ( ) : if 'enum_type' in var_type and var_type [ 'enum_type' ] == var : enum_types . append ( var ) var_list = [ ] dzn = _comm_p . sub ( '\n' , dzn ) stmts = _stmt_p . findall ( dzn ) for stmt in stmts : var_m = _var_p . match ( stmt ) if var_m : var = var_m . group ( 'var' ) val = var_m . group ( 'val' ) var_list . append ( ( var , val ) ) else : raise ValueError ( 'Unsupported parsing for statement:\n{}' . format ( repr ( stmt ) ) ) enums = None if enum_types : enums = { } remaining = [ ] while len ( var_list ) > 0 : var , val = var_list . pop ( 0 ) if var in enum_types : enum = None enum_m = _enum_p . match ( val ) if enum_m : vals = enum_m . group ( 'vals' ) . strip ( ) if vals : enum_vals = _parse_enum_vals ( vals . split ( ',' ) ) enum = IntEnum ( var , { v : i + 1 for i , v in enumerate ( enum_vals ) } ) if enum is None : raise ValueError ( 'Cannot parse enum type \'{} = {}\'.' . format ( var , val ) ) enums [ var ] = enum else : remaining . append ( ( var , val ) ) var_list = remaining assign = { } for var , val in var_list : var_type = None if var_types : var_type = var_types . get ( var , None ) assign [ var ] = parse_value ( val , var_type = var_type , enums = enums , rebase_arrays = rebase_arrays ) if return_enums and enums : assign . update ( enums ) return assign
Parses a dzn string or file into a dictionary of variable assignments .
662
16
241,429
def args ( self , all_solutions = False , num_solutions = None , free_search = False , parallel = None , seed = None , * * kwargs ) : args = [ '-s' , '-v' ] if all_solutions : args . append ( '-a' ) if num_solutions is not None : args += [ '-n' , num_solutions ] if free_search : args . append ( '-f' ) if parallel is not None : args += [ '-p' , parallel ] if seed is not None : args += [ '-r' , seed ] return args
Returns a list of command line arguments for the specified options .
138
12
241,430
def debug ( dbg = True ) : global _debug_handler if dbg and _debug_handler is None : _debug_handler = logging . StreamHandler ( ) logger . addHandler ( _debug_handler ) logger . setLevel ( logging . DEBUG ) elif not dbg and _debug_handler is not None : logger . removeHandler ( _debug_handler ) _debug_handler = None logger . setLevel ( logging . WARNING )
Enables or disables debugging messages on the standard output .
94
12
241,431
def minizinc_version ( ) : vs = _run_minizinc ( '--version' ) m = re . findall ( 'version ([\d\.]+)' , vs ) if not m : raise RuntimeError ( 'MiniZinc executable not found.' ) return m [ 0 ]
Returns the version of the found minizinc executable .
64
11
241,432
def preprocess_model ( model , rewrap = True , * * kwargs ) : args = { * * kwargs , * * config . get ( 'args' , { } ) } model = _process_template ( model , * * args ) if rewrap : model = rewrap_model ( model ) return model
Preprocess a MiniZinc model .
71
8
241,433
def save_model ( model , output_file = None , output_dir = None , output_prefix = 'pymzn' ) : if output_file : mzn_file = output_file output_file = open ( output_file , 'w+' , buffering = 1 ) else : output_prefix += '_' output_file = NamedTemporaryFile ( dir = output_dir , prefix = output_prefix , suffix = '.mzn' , delete = False , mode = 'w+' , buffering = 1 ) mzn_file = output_file . name output_file . write ( model ) output_file . close ( ) logger . info ( 'Generated file {}' . format ( mzn_file ) ) return mzn_file
Save a model to file .
165
6
241,434
def check_instance ( mzn , * dzn_files , data = None , include = None , stdlib_dir = None , globals_dir = None , allow_multiple_assignments = False ) : args = [ '--instance-check-only' ] args += _flattening_args ( mzn , * dzn_files , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , allow_multiple_assignments = allow_multiple_assignments ) input = mzn if args [ - 1 ] == '-' else None proc = _run_minizinc_proc ( * args , input = input ) if proc . stderr_data : raise MiniZincError ( mzn if input is None else '\n' + mzn + '\n' , args , proc . stderr_data )
Perform instance checking on a model + data .
199
10
241,435
def check_model ( mzn , * , include = None , stdlib_dir = None , globals_dir = None ) : args = [ '--model-check-only' ] args += _flattening_args ( mzn , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir ) input = mzn if args [ - 1 ] == '-' else None proc = _run_minizinc_proc ( * args , input = input ) if proc . stderr_data : raise MiniZincError ( mzn if input is None else '\n' + mzn + '\n' , args , proc . stderr_data )
Perform model checking on a given model .
155
9
241,436
def minizinc ( mzn , * dzn_files , args = None , data = None , include = None , stdlib_dir = None , globals_dir = None , declare_enums = True , allow_multiple_assignments = False , keep = False , output_vars = None , output_base = None , output_mode = 'dict' , solver = None , timeout = None , two_pass = None , pre_passes = None , output_objective = False , non_unique = False , all_solutions = False , num_solutions = None , free_search = False , parallel = None , seed = None , rebase_arrays = True , keep_solutions = True , return_enums = False , * * kwargs ) : mzn_file , dzn_files , data_file , data , keep , _output_mode , types = _minizinc_preliminaries ( mzn , * dzn_files , args = args , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_vars = output_vars , keep = keep , output_base = output_base , output_mode = output_mode , declare_enums = declare_enums , allow_multiple_assignments = allow_multiple_assignments ) if not solver : solver = config . get ( 'solver' , gecode ) solver_args = { * * kwargs , * * config . get ( 'solver_args' , { } ) } proc = solve ( solver , mzn_file , * dzn_files , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_mode = _output_mode , timeout = timeout , two_pass = two_pass , pre_passes = pre_passes , output_objective = output_objective , non_unique = non_unique , all_solutions = all_solutions , num_solutions = num_solutions , free_search = free_search , parallel = parallel , seed = seed , allow_multiple_assignments = allow_multiple_assignments , * * solver_args ) if not keep : _cleanup ( [ mzn_file , data_file ] ) if output_mode == 'raw' : return proc . stdout_data parser = SolutionParser ( solver , output_mode = output_mode , rebase_arrays = rebase_arrays , types = types , keep_solutions = keep_solutions , return_enums = return_enums ) solns = parser . parse ( proc ) return solns
Implements the workflow for solving a CSP problem encoded with MiniZinc .
610
17
241,437
def solve ( solver , mzn , * dzn_files , data = None , include = None , stdlib_dir = None , globals_dir = None , allow_multiple_assignments = False , output_mode = 'item' , timeout = None , two_pass = None , pre_passes = None , output_objective = False , non_unique = False , all_solutions = False , num_solutions = None , free_search = False , parallel = None , seed = None , * * kwargs ) : args = _solve_args ( solver , timeout = timeout , two_pass = two_pass , pre_passes = pre_passes , output_objective = output_objective , non_unique = non_unique , all_solutions = all_solutions , num_solutions = num_solutions , free_search = free_search , parallel = parallel , seed = seed , * * kwargs ) args += _flattening_args ( mzn , * dzn_files , data = data , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_mode = output_mode , include = include , allow_multiple_assignments = allow_multiple_assignments ) input = mzn if args [ - 1 ] == '-' else None t0 = _time ( ) try : proc = _run_minizinc_proc ( * args , input = input ) except RuntimeError as err : raise MiniZincError ( mzn_file , args ) from err solve_time = _time ( ) - t0 logger . info ( 'Solving completed in {:>3.2f} sec' . format ( solve_time ) ) return proc
Flatten and solve a MiniZinc program .
385
10
241,438
def mzn2fzn ( mzn , * dzn_files , args = None , data = None , include = None , stdlib_dir = None , globals_dir = None , declare_enums = True , allow_multiple_assignments = False , keep = False , output_vars = None , output_base = None , output_mode = 'item' , no_ozn = False ) : mzn_file , dzn_files , data_file , data , keep , _output_mode , types = _minizinc_preliminaries ( mzn , * dzn_files , args = args , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_vars = output_vars , keep = keep , output_base = output_base , output_mode = output_mode , declare_enums = declare_enums , allow_multiple_assignments = allow_multiple_assignments ) args = [ '--compile' ] args += _flattening_args ( mzn_file , * dzn_files , data = data , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_mode = output_mode , include = include , no_ozn = no_ozn , output_base = output_base , allow_multiple_assignments = allow_multiple_assignments ) t0 = _time ( ) _run_minizinc ( * args ) flattening_time = _time ( ) - t0 logger . info ( 'Flattening completed in {:>3.2f} sec' . format ( flattening_time ) ) if not keep : with contextlib . suppress ( FileNotFoundError ) : if data_file : os . remove ( data_file ) logger . info ( 'Deleted file: {}' . format ( data_file ) ) if output_base : mzn_base = output_base else : mzn_base = os . path . splitext ( mzn_file ) [ 0 ] fzn_file = '.' . join ( [ mzn_base , 'fzn' ] ) fzn_file = fzn_file if os . path . isfile ( fzn_file ) else None ozn_file = '.' . join ( [ mzn_base , 'ozn' ] ) ozn_file = ozn_file if os . path . isfile ( ozn_file ) else None if fzn_file : logger . info ( 'Generated file: {}' . format ( fzn_file ) ) if ozn_file : logger . info ( 'Generated file: {}' . format ( ozn_file ) ) return fzn_file , ozn_file
Flatten a MiniZinc model into a FlatZinc one .
619
14
241,439
def print ( self , output_file = sys . stdout , log = False ) : for soln in iter ( self ) : print ( soln , file = output_file ) print ( SOLN_SEP , file = output_file ) if self . status == 0 : print ( SEARCH_COMPLETE , file = output_file ) if ( self . status == 1 and self . _n_solns == 0 ) or self . status >= 2 : print ( { Status . INCOMPLETE : ERROR , Status . UNKNOWN : UNKNOWN , Status . UNSATISFIABLE : UNSATISFIABLE , Status . UNBOUNDED : UNBOUNDED , Status . UNSATorUNBOUNDED : UNSATorUNBOUNDED , Status . ERROR : ERROR } [ self . status ] , file = output_file ) if self . stderr : print ( self . stderr . strip ( ) , file = sys . stderr ) elif log : print ( str ( self . log ) , file = output_file )
Print the solution stream
232
4
241,440
def dump ( self ) : try : import yaml cfg_file = self . _cfg_file ( ) cfg_dir , __ = os . path . split ( cfg_file ) os . makedirs ( cfg_dir , exist_ok = True ) with open ( cfg_file , 'w' ) as f : yaml . dump ( self , f ) except ImportError as err : raise RuntimeError ( 'Cannot dump the configuration settings to file. You need to ' 'install the necessary dependencies (pyyaml, appdirs).' ) from err
Writes the changes to the configuration file .
125
9
241,441
def discretize ( value , factor = 100 ) : if not isinstance ( value , Iterable ) : return int ( value * factor ) int_value = list ( deepcopy ( value ) ) for i in range ( len ( int_value ) ) : int_value [ i ] = int ( int_value [ i ] * factor ) return int_value
Discretize the given value pre - multiplying by the given factor
77
13
241,442
def from_string ( source , args = None ) : if _has_jinja : logger . info ( 'Precompiling model with arguments: {}' . format ( args ) ) return _jenv . from_string ( source ) . render ( args or { } ) if args : raise RuntimeError ( _except_text ) return source
Renders a template string
72
5
241,443
def add_package ( package_name , package_path = 'templates' , encoding = 'utf-8' ) : if not _has_jinja : raise RuntimeError ( _except_text ) _jload . add_loader ( PackageLoader ( package_name , package_path , encoding ) )
Adds the given package to the template search routine
66
9
241,444
def add_path ( searchpath , encoding = 'utf-8' , followlinks = False ) : if not _has_jinja : raise RuntimeError ( _except_text ) _jload . add_loader ( FileSystemLoader ( searchpath , encoding , followlinks ) )
Adds the given path to the template search routine
60
9
241,445
def val2dzn ( val , wrap = True ) : if _is_value ( val ) : dzn_val = _dzn_val ( val ) elif _is_set ( val ) : dzn_val = _dzn_set ( val ) elif _is_array_type ( val ) : dzn_val = _dzn_array_nd ( val ) else : raise TypeError ( 'Unsupported serialization of value: {}' . format ( repr ( val ) ) ) if wrap : wrapper = _get_wrapper ( ) dzn_val = wrapper . fill ( dzn_val ) return dzn_val
Serializes a value into its dzn representation .
140
10
241,446
def stmt2dzn ( name , val , declare = True , assign = True , wrap = True ) : if not ( declare or assign ) : raise ValueError ( 'The statement must be a declaration or an assignment.' ) stmt = [ ] if declare : val_type = _dzn_type ( val ) stmt . append ( '{}: ' . format ( val_type ) ) stmt . append ( name ) if assign : val_str = val2dzn ( val , wrap = wrap ) stmt . append ( ' = {}' . format ( val_str ) ) stmt . append ( ';' ) return '' . join ( stmt )
Returns a dzn statement declaring and assigning the given value .
144
12
241,447
def stmt2enum ( enum_type , declare = True , assign = True , wrap = True ) : if not ( declare or assign ) : raise ValueError ( 'The statement must be a declaration or an assignment.' ) stmt = [ ] if declare : stmt . append ( 'enum ' ) stmt . append ( enum_type . __name__ ) if assign : val_str = [ ] for v in list ( enum_type ) : val_str . append ( v . name ) val_str = '' . join ( [ '{' , ',' . join ( val_str ) , '}' ] ) if wrap : wrapper = _get_wrapper ( ) val_str = wrapper . fill ( val_str ) stmt . append ( ' = {}' . format ( val_str ) ) stmt . append ( ';' ) return '' . join ( stmt )
Returns a dzn enum declaration from an enum type .
191
11
241,448
def dict2dzn ( objs , declare = False , assign = True , declare_enums = True , wrap = True , fout = None ) : log = logging . getLogger ( __name__ ) vals = [ ] enums = set ( ) for key , val in objs . items ( ) : if _is_enum ( val ) and declare_enums : enum_type = type ( val ) enum_name = enum_type . __name__ if enum_name not in enums : enum_stmt = stmt2enum ( enum_type , declare = declare , assign = assign , wrap = wrap ) vals . append ( enum_stmt ) enums . add ( enum_name ) stmt = stmt2dzn ( key , val , declare = declare , assign = assign , wrap = wrap ) vals . append ( stmt ) if fout : log . debug ( 'Writing file: {}' . format ( fout ) ) with open ( fout , 'w' ) as f : for val in vals : f . write ( '{}\n\n' . format ( val ) ) return vals
Serializes the objects in input and produces a list of strings encoding them into dzn format . Optionally the produced dzn is written on a file .
249
31
241,449
def async_or_eager ( self , * * options ) : args = options . pop ( "args" , None ) kwargs = options . pop ( "kwargs" , None ) possible_broker_errors = self . _get_possible_broker_errors_tuple ( ) try : return self . apply_async ( args , kwargs , * * options ) except possible_broker_errors : return self . apply ( args , kwargs , * * options )
Attempt to call self . apply_async or if that fails because of a problem with the broker run the task eagerly and return an EagerResult .
109
31
241,450
def async_or_fail ( self , * * options ) : args = options . pop ( "args" , None ) kwargs = options . pop ( "kwargs" , None ) possible_broker_errors = self . _get_possible_broker_errors_tuple ( ) try : return self . apply_async ( args , kwargs , * * options ) except possible_broker_errors as e : return self . simulate_async_error ( e )
Attempt to call self . apply_async but if that fails with an exception we fake the task completion using the exception as the result . This allows us to seamlessly handle errors on task creation the same way we handle errors when a task runs simplifying the user interface .
107
54
241,451
def delay_or_eager ( self , * args , * * kwargs ) : return self . async_or_eager ( args = args , kwargs = kwargs )
Wrap async_or_eager with a convenience signiture like delay
42
15
241,452
def delay_or_run ( self , * args , * * kwargs ) : warnings . warn ( "delay_or_run is deprecated. Please use delay_or_eager" , DeprecationWarning , ) possible_broker_errors = self . _get_possible_broker_errors_tuple ( ) try : result = self . apply_async ( args = args , kwargs = kwargs ) required_fallback = False except possible_broker_errors : result = self ( ) . run ( * args , * * kwargs ) required_fallback = True return result , required_fallback
Attempt to call self . delay or if that fails call self . run .
139
15
241,453
def delay_or_fail ( self , * args , * * kwargs ) : return self . async_or_fail ( args = args , kwargs = kwargs )
Wrap async_or_fail with a convenience signiture like delay
40
14
241,454
def simulate_async_error ( self , exception ) : task_id = gen_unique_id ( ) async_result = self . AsyncResult ( task_id ) einfo = ExceptionInfo ( sys . exc_info ( ) ) async_result . backend . mark_as_failure ( task_id , exception , traceback = einfo . traceback , ) return async_result
Take this exception and store it as an error in the result backend . This unifies the handling of broker - connection errors with any other type of error that might occur when running the task . So the same error - handling that might retry a task or display a useful message to the user can also handle this error .
85
64
241,455
def calc_progress ( self , completed_count , total_count ) : self . logger . debug ( "calc_progress(%s, %s)" , completed_count , total_count , ) current_time = time . time ( ) time_spent = current_time - self . start_time self . logger . debug ( "Progress time spent: %s" , time_spent ) if total_count == 0 : return 100 , 1 completion_fraction = completed_count / total_count if completion_fraction == 0 : completion_fraction = 1 total_time = 0 total_time = time_spent / completion_fraction time_remaining = total_time - time_spent completion_display = completion_fraction * 100 if completion_display == 100 : return 100 , 1 # 1 second to finish up return completion_display , time_remaining
Calculate the percentage progress and estimated remaining time based on the current number of items completed of the total .
191
22
241,456
def update_progress ( self , completed_count , total_count , update_frequency = 1 , ) : if completed_count - self . _last_update_count < update_frequency : # We've updated the progress too recently. Don't stress out the # result backend return # Store progress for display progress_percent , time_remaining = self . calc_progress ( completed_count , total_count ) self . logger . debug ( "Updating progress: %s percent, %s remaining" , progress_percent , time_remaining ) if self . request . id : self . _last_update_count = completed_count self . update_state ( None , PROGRESS , { "progress_percent" : progress_percent , "time_remaining" : time_remaining , } )
Update the task backend with both an estimated percentage complete and number of seconds remaining until completion .
172
18
241,457
def _validate_required_class_vars ( self ) : required_members = ( 'significant_kwargs' , 'herd_avoidance_timeout' , ) for required_member in required_members : if not hasattr ( self , required_member ) : raise Exception ( "JobtasticTask's must define a %s" % required_member )
Ensure that this subclass has defined all of the required class variables .
79
14
241,458
def on_success ( self , retval , task_id , args , kwargs ) : if self . request . is_eager : # Store the result because celery wouldn't otherwise self . update_state ( task_id , SUCCESS , retval )
Store results in the backend even if we re always eager . This ensures the delay_or_run calls always at least have results .
58
27
241,459
def _get_cache ( self ) : if not self . _cache : self . _cache = get_cache ( self . app ) return self . _cache
Return the cache to use for thundering herd protection etc .
34
12
241,460
def _get_cache_key ( self , * * kwargs ) : m = md5 ( ) for significant_kwarg in self . significant_kwargs : key , to_str = significant_kwarg try : m . update ( to_str ( kwargs [ key ] ) ) except ( TypeError , UnicodeEncodeError ) : # Python 3.x strings aren't accepted by hash.update(). # String should be byte-encoded first. m . update ( to_str ( kwargs [ key ] ) . encode ( 'utf-8' ) ) if hasattr ( self , 'cache_prefix' ) : cache_prefix = self . cache_prefix else : cache_prefix = '%s.%s' % ( self . __module__ , self . __name__ ) return '%s:%s' % ( cache_prefix , m . hexdigest ( ) )
Take this task s configured significant_kwargs and build a hash that all equivalent task calls will match .
195
21
241,461
def get_cache ( app ) : jobtastic_cache_setting = app . conf . get ( 'JOBTASTIC_CACHE' ) if isinstance ( jobtastic_cache_setting , BaseCache ) : return jobtastic_cache_setting if 'Django' in CACHES : if jobtastic_cache_setting : try : return WrappedCache ( get_django_cache ( jobtastic_cache_setting ) ) except InvalidCacheBackendError : pass else : return WrappedCache ( get_django_cache ( 'default' ) ) if 'Werkzeug' in CACHES : if jobtastic_cache_setting : backend , url = get_backend_by_url ( jobtastic_cache_setting ) backend = backend ( app = app , url = url ) else : backend = app . backend if isinstance ( backend , CacheBackend ) : return WrappedCache ( MemcachedCache ( backend . client ) ) elif isinstance ( backend , RedisBackend ) : return WrappedCache ( RedisCache ( backend . client ) ) # Give up raise RuntimeError ( 'Cannot find a suitable cache for Jobtastic' )
Attempt to find a valid cache from the Celery configuration
262
11
241,462
def select ( * args ) : def select_columns ( df , args ) : columns = [ column . _name for column in args ] if df . _grouped_on : for col in df . _grouped_on [ : : - 1 ] : if col not in columns : columns . insert ( 0 , col ) return columns return lambda df : df [ select_columns ( df , args ) ]
Select specific columns from DataFrame .
88
7
241,463
def arrange ( * args ) : names = [ column . _name for column in args ] def f ( df ) : sortby_df = df >> mutate ( * args ) index = sortby_df . sort_values ( [ str ( arg ) for arg in args ] ) . index return df . loc [ index ] return f
Sort DataFrame by the input column arguments .
71
9
241,464
def rename ( * * kwargs ) : def rename_columns ( df ) : column_assignments = { old_name_later . _name : new_name for new_name , old_name_later in kwargs . items ( ) } return df . rename ( columns = column_assignments ) return rename_columns
Rename one or more columns leaving other columns unchanged
75
10
241,465
def transmute ( * args , * * kwargs ) : mutate_dateframe_fn = mutate ( * args , * * dict ( kwargs ) ) column_names_args = [ str ( arg ) for arg in args ] column_names_kwargs = [ name for name , _ in _dict_to_possibly_ordered_tuples ( kwargs ) ] column_names = column_names_args + column_names_kwargs return lambda df : mutate_dateframe_fn ( df ) [ column_names ]
Similar to select but allows mutation in column definitions .
119
10
241,466
def get_join_cols ( by_entry ) : left_cols = [ ] right_cols = [ ] for col in by_entry : if isinstance ( col , str ) : left_cols . append ( col ) right_cols . append ( col ) else : left_cols . append ( col [ 0 ] ) right_cols . append ( col [ 1 ] ) return left_cols , right_cols
helper function used for joins builds left and right join list for join function
97
15
241,467
def mutating_join ( * args , * * kwargs ) : # candidate for improvement left = args [ 0 ] right = args [ 1 ] if 'by' in kwargs : left_cols , right_cols = get_join_cols ( kwargs [ 'by' ] ) else : left_cols , right_cols = None , None if 'suffixes' in kwargs : dsuffixes = kwargs [ 'suffixes' ] else : dsuffixes = ( '_x' , '_y' ) if left . _grouped_on : outDf = ( DplyFrame ( ( left >> ungroup ( ) ) . merge ( right , how = kwargs [ 'how' ] , left_on = left_cols , right_on = right_cols , suffixes = dsuffixes ) ) . regroup ( left . _grouped_on ) ) else : outDf = DplyFrame ( left . merge ( right , how = kwargs [ 'how' ] , left_on = left_cols , right_on = right_cols , suffixes = dsuffixes ) ) return outDf
generic function for mutating dplyr - style joins
266
11
241,468
def _chart_support ( self , name , data , caller , * * kwargs ) : id = 'chart-%s' % next ( self . id ) name = self . _chart_class_name ( name ) options = dict ( self . environment . options ) options . update ( name = name , id = id ) # jinja2 prepends 'l_' or 'l_{{ n }}'(ver>=2.9) to keys if jinja2 . __version__ >= '2.9' : kwargs = dict ( ( k [ 4 : ] , v ) for ( k , v ) in kwargs . items ( ) ) else : kwargs = dict ( ( k [ 2 : ] , v ) for ( k , v ) in kwargs . items ( ) ) if self . _library is None : self . _library = self . load_library ( ) id = kwargs . get ( 'id' , '' ) library = self . _library . get ( id , { } ) # apply options from a tag library . update ( kwargs . get ( 'library' , { } ) ) # apply options from chartkick.json kwargs . update ( library = library ) options . update ( kwargs ) return CHART_HTML . format ( data = data , options = json . dumps ( kwargs ) , * * options )
template chart support function
303
4
241,469
def load_library ( self ) : try : filename = self . environment . get_template ( 'chartkick.json' ) . filename except TemplateNotFound : return { } else : options = Options ( ) options . load ( filename ) return options
loads configuration options
52
3
241,470
def js ( ) : return os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , 'js' )
returns home directory of js
36
6
241,471
def parse_options ( source ) : options = { } tokens = [ t . strip ( ) for t in source . split ( '=' ) ] name = tokens [ 0 ] for token in tokens [ 1 : - 1 ] : value , next_name = token . rsplit ( ' ' , 1 ) options [ name . strip ( ) ] = value name = next_name options [ name . strip ( ) ] = tokens [ - 1 ] . strip ( ) return options
parses chart tag options
99
6
241,472
def copy ( self ) : return RigidTransform ( np . copy ( self . rotation ) , np . copy ( self . translation ) , self . from_frame , self . to_frame )
Returns a copy of the RigidTransform .
41
9
241,473
def _check_valid_rotation ( self , rotation ) : if not isinstance ( rotation , np . ndarray ) or not np . issubdtype ( rotation . dtype , np . number ) : raise ValueError ( 'Rotation must be specified as numeric numpy array' ) if len ( rotation . shape ) != 2 or rotation . shape [ 0 ] != 3 or rotation . shape [ 1 ] != 3 : raise ValueError ( 'Rotation must be specified as a 3x3 ndarray' ) if np . abs ( np . linalg . det ( rotation ) - 1.0 ) > 1e-3 : raise ValueError ( 'Illegal rotation. Must have determinant == 1.0' )
Checks that the given rotation matrix is valid .
156
10
241,474
def _check_valid_translation ( self , translation ) : if not isinstance ( translation , np . ndarray ) or not np . issubdtype ( translation . dtype , np . number ) : raise ValueError ( 'Translation must be specified as numeric numpy array' ) t = translation . squeeze ( ) if len ( t . shape ) != 1 or t . shape [ 0 ] != 3 : raise ValueError ( 'Translation must be specified as a 3-vector, 3x1 ndarray, or 1x3 ndarray' )
Checks that the translation vector is valid .
118
9
241,475
def interpolate_with ( self , other_tf , t ) : if t < 0 or t > 1 : raise ValueError ( 'Must interpolate between 0 and 1' ) interp_translation = ( 1.0 - t ) * self . translation + t * other_tf . translation interp_rotation = transformations . quaternion_slerp ( self . quaternion , other_tf . quaternion , t ) interp_tf = RigidTransform ( rotation = interp_rotation , translation = interp_translation , from_frame = self . from_frame , to_frame = self . to_frame ) return interp_tf
Interpolate with another rigid transformation .
143
8
241,476
def linear_trajectory_to ( self , target_tf , traj_len ) : if traj_len < 0 : raise ValueError ( 'Traj len must at least 0' ) delta_t = 1.0 / ( traj_len + 1 ) t = 0.0 traj = [ ] while t < 1.0 : traj . append ( self . interpolate_with ( target_tf , t ) ) t += delta_t traj . append ( target_tf ) return traj
Creates a trajectory of poses linearly interpolated from this tf to a target tf .
112
18
241,477
def apply ( self , points ) : if not isinstance ( points , BagOfPoints ) : raise ValueError ( 'Rigid transformations can only be applied to bags of points' ) if points . dim != 3 : raise ValueError ( 'Rigid transformations can only be applied to 3-dimensional points' ) if points . frame != self . _from_frame : raise ValueError ( 'Cannot transform points in frame %s with rigid transformation from frame %s to frame %s' % ( points . frame , self . _from_frame , self . _to_frame ) ) if isinstance ( points , BagOfVectors ) : # rotation only x = points . data x_tf = self . rotation . dot ( x ) else : # extract numpy data, homogenize, and transform x = points . data if len ( x . shape ) == 1 : x = x [ : , np . newaxis ] x_homog = np . r_ [ x , np . ones ( [ 1 , points . num_points ] ) ] x_homog_tf = self . matrix . dot ( x_homog ) x_tf = x_homog_tf [ 0 : 3 , : ] # output in BagOfPoints format if isinstance ( points , PointCloud ) : return PointCloud ( x_tf , frame = self . _to_frame ) elif isinstance ( points , Point ) : return Point ( x_tf , frame = self . _to_frame ) elif isinstance ( points , Direction ) : return Direction ( x_tf , frame = self . _to_frame ) elif isinstance ( points , NormalCloud ) : return NormalCloud ( x_tf , frame = self . _to_frame ) raise ValueError ( 'Type %s not yet supported' % ( type ( points ) ) )
Applies the rigid transformation to a set of 3D objects .
393
13
241,478
def dot ( self , other_tf ) : if other_tf . to_frame != self . from_frame : raise ValueError ( 'To frame of right hand side ({0}) must match from frame of left hand side ({1})' . format ( other_tf . to_frame , self . from_frame ) ) pose_tf = self . matrix . dot ( other_tf . matrix ) rotation , translation = RigidTransform . rotation_and_translation_from_matrix ( pose_tf ) if isinstance ( other_tf , SimilarityTransform ) : return SimilarityTransform ( self . rotation , self . translation , scale = 1.0 , from_frame = self . from_frame , to_frame = self . to_frame ) * other_tf return RigidTransform ( rotation , translation , from_frame = other_tf . from_frame , to_frame = self . to_frame )
Compose this rigid transform with another .
195
8
241,479
def inverse ( self ) : inv_rotation = self . rotation . T inv_translation = np . dot ( - self . rotation . T , self . translation ) return RigidTransform ( inv_rotation , inv_translation , from_frame = self . _to_frame , to_frame = self . _from_frame )
Take the inverse of the rigid transform .
71
8
241,480
def save ( self , filename ) : file_root , file_ext = os . path . splitext ( filename ) if file_ext . lower ( ) != TF_EXTENSION : raise ValueError ( 'Extension %s not supported for RigidTransform. Must be stored with extension %s' % ( file_ext , TF_EXTENSION ) ) f = open ( filename , 'w' ) f . write ( '%s\n' % ( self . _from_frame ) ) f . write ( '%s\n' % ( self . _to_frame ) ) f . write ( '%f %f %f\n' % ( self . _translation [ 0 ] , self . _translation [ 1 ] , self . _translation [ 2 ] ) ) f . write ( '%f %f %f\n' % ( self . _rotation [ 0 , 0 ] , self . _rotation [ 0 , 1 ] , self . _rotation [ 0 , 2 ] ) ) f . write ( '%f %f %f\n' % ( self . _rotation [ 1 , 0 ] , self . _rotation [ 1 , 1 ] , self . _rotation [ 1 , 2 ] ) ) f . write ( '%f %f %f\n' % ( self . _rotation [ 2 , 0 ] , self . _rotation [ 2 , 1 ] , self . _rotation [ 2 , 2 ] ) ) f . close ( )
Save the RigidTransform to a file .
324
9
241,481
def as_frames ( self , from_frame , to_frame = 'world' ) : return RigidTransform ( self . rotation , self . translation , from_frame , to_frame )
Return a shallow copy of this rigid transform with just the frames changed .
41
14
241,482
def rotation_from_quaternion ( q_wxyz ) : q_xyzw = np . array ( [ q_wxyz [ 1 ] , q_wxyz [ 2 ] , q_wxyz [ 3 ] , q_wxyz [ 0 ] ] ) R = transformations . quaternion_matrix ( q_xyzw ) [ : 3 , : 3 ] return R
Convert quaternion array to rotation matrix .
90
10
241,483
def quaternion_from_axis_angle ( v ) : theta = np . linalg . norm ( v ) if theta > 0 : v = v / np . linalg . norm ( v ) ax , ay , az = v qx = ax * np . sin ( 0.5 * theta ) qy = ay * np . sin ( 0.5 * theta ) qz = az * np . sin ( 0.5 * theta ) qw = np . cos ( 0.5 * theta ) q = np . array ( [ qw , qx , qy , qz ] ) return q
Convert axis - angle representation to a quaternion vector .
138
13
241,484
def transform_from_dual_quaternion ( dq , from_frame = 'unassigned' , to_frame = 'world' ) : quaternion = dq . qr translation = 2 * dq . qd [ 1 : ] return RigidTransform ( rotation = quaternion , translation = translation , from_frame = from_frame , to_frame = to_frame )
Create a RigidTransform from a DualQuaternion .
87
12
241,485
def rotation_and_translation_from_matrix ( matrix ) : if not isinstance ( matrix , np . ndarray ) or matrix . shape [ 0 ] != 4 or matrix . shape [ 1 ] != 4 : raise ValueError ( 'Matrix must be specified as a 4x4 ndarray' ) rotation = matrix [ : 3 , : 3 ] translation = matrix [ : 3 , 3 ] return rotation , translation
Helper to convert 4x4 matrix to rotation matrix and translation vector .
89
14
241,486
def rotation_from_axis_and_origin ( axis , origin , angle , to_frame = 'world' ) : axis_hat = np . array ( [ [ 0 , - axis [ 2 ] , axis [ 1 ] ] , [ axis [ 2 ] , 0 , - axis [ 0 ] ] , [ - axis [ 1 ] , axis [ 0 ] , 0 ] ] ) # Rodrigues Formula R = RigidTransform ( np . eye ( 3 ) + np . sin ( angle ) * axis_hat + ( 1 - np . cos ( angle ) ) * axis_hat . dot ( axis_hat ) , from_frame = to_frame , to_frame = to_frame ) return RigidTransform ( translation = origin , from_frame = to_frame , to_frame = to_frame ) . dot ( R ) . dot ( RigidTransform ( translation = - origin , from_frame = to_frame , to_frame = to_frame ) )
Returns a rotation matrix around some arbitrary axis about the point origin using Rodrigues Formula
206
16
241,487
def x_axis_rotation ( theta ) : R = np . array ( [ [ 1 , 0 , 0 , ] , [ 0 , np . cos ( theta ) , - np . sin ( theta ) ] , [ 0 , np . sin ( theta ) , np . cos ( theta ) ] ] ) return R
Generates a 3x3 rotation matrix for a rotation of angle theta about the x axis .
72
20
241,488
def y_axis_rotation ( theta ) : R = np . array ( [ [ np . cos ( theta ) , 0 , np . sin ( theta ) ] , [ 0 , 1 , 0 ] , [ - np . sin ( theta ) , 0 , np . cos ( theta ) ] ] ) return R
Generates a 3x3 rotation matrix for a rotation of angle theta about the y axis .
71
20
241,489
def z_axis_rotation ( theta ) : R = np . array ( [ [ np . cos ( theta ) , - np . sin ( theta ) , 0 ] , [ np . sin ( theta ) , np . cos ( theta ) , 0 ] , [ 0 , 0 , 1 ] ] ) return R
Generates a 3x3 rotation matrix for a rotation of angle theta about the z axis .
71
20
241,490
def random_rotation ( ) : rand_seed = np . random . rand ( 3 , 3 ) U , S , V = np . linalg . svd ( rand_seed ) return U
Generates a random 3x3 rotation matrix with SVD .
43
13
241,491
def rotation_from_axes ( x_axis , y_axis , z_axis ) : return np . hstack ( ( x_axis [ : , np . newaxis ] , y_axis [ : , np . newaxis ] , z_axis [ : , np . newaxis ] ) )
Convert specification of axis in target frame to a rotation matrix from source to target frame .
65
18
241,492
def interpolate ( T0 , T1 , t ) : if T0 . to_frame != T1 . to_frame : raise ValueError ( 'Cannot interpolate between 2 transforms with different to frames! Got T1 {0} and T2 {1}' . format ( T0 . to_frame , T1 . to_frame ) ) dq0 = T0 . dual_quaternion dq1 = T1 . dual_quaternion dqt = DualQuaternion . interpolate ( dq0 , dq1 , t ) from_frame = "{0}_{1}_{2}" . format ( T0 . from_frame , T1 . from_frame , t ) return RigidTransform . transform_from_dual_quaternion ( dqt , from_frame , T0 . to_frame )
Return an interpolation of two RigidTransforms .
183
11
241,493
def load ( filename ) : file_root , file_ext = os . path . splitext ( filename ) if file_ext . lower ( ) != TF_EXTENSION : raise ValueError ( 'Extension %s not supported for RigidTransform. Can only load extension %s' % ( file_ext , TF_EXTENSION ) ) f = open ( filename , 'r' ) lines = list ( f ) from_frame = lines [ 0 ] [ : - 1 ] to_frame = lines [ 1 ] [ : - 1 ] t = np . zeros ( 3 ) t_tokens = lines [ 2 ] [ : - 1 ] . split ( ) t [ 0 ] = float ( t_tokens [ 0 ] ) t [ 1 ] = float ( t_tokens [ 1 ] ) t [ 2 ] = float ( t_tokens [ 2 ] ) R = np . zeros ( [ 3 , 3 ] ) r_tokens = lines [ 3 ] [ : - 1 ] . split ( ) R [ 0 , 0 ] = float ( r_tokens [ 0 ] ) R [ 0 , 1 ] = float ( r_tokens [ 1 ] ) R [ 0 , 2 ] = float ( r_tokens [ 2 ] ) r_tokens = lines [ 4 ] [ : - 1 ] . split ( ) R [ 1 , 0 ] = float ( r_tokens [ 0 ] ) R [ 1 , 1 ] = float ( r_tokens [ 1 ] ) R [ 1 , 2 ] = float ( r_tokens [ 2 ] ) r_tokens = lines [ 5 ] [ : - 1 ] . split ( ) R [ 2 , 0 ] = float ( r_tokens [ 0 ] ) R [ 2 , 1 ] = float ( r_tokens [ 1 ] ) R [ 2 , 2 ] = float ( r_tokens [ 2 ] ) f . close ( ) return RigidTransform ( rotation = R , translation = t , from_frame = from_frame , to_frame = to_frame )
Load a RigidTransform from a file .
459
9
241,494
def dot ( self , other_tf ) : if other_tf . to_frame != self . from_frame : raise ValueError ( 'To frame of right hand side ({0}) must match from frame of left hand side ({1})' . format ( other_tf . to_frame , self . from_frame ) ) if not isinstance ( other_tf , RigidTransform ) : raise ValueError ( 'Can only compose with other RigidTransform classes' ) other_scale = 1.0 if isinstance ( other_tf , SimilarityTransform ) : other_scale = other_tf . scale rotation = self . rotation . dot ( other_tf . rotation ) translation = self . translation + self . scale * self . rotation . dot ( other_tf . translation ) scale = self . scale * other_scale return SimilarityTransform ( rotation , translation , scale , from_frame = other_tf . from_frame , to_frame = self . to_frame )
Compose this simliarity transform with another .
206
10
241,495
def inverse ( self ) : inv_rot = np . linalg . inv ( self . rotation ) inv_scale = 1.0 / self . scale inv_trans = - inv_scale * inv_rot . dot ( self . translation ) return SimilarityTransform ( inv_rot , inv_trans , inv_scale , from_frame = self . _to_frame , to_frame = self . _from_frame )
Take the inverse of the similarity transform .
91
8
241,496
def save ( self , filename ) : file_root , file_ext = os . path . splitext ( filename ) if file_ext == '.npy' : np . save ( filename , self . _data ) elif file_ext == '.npz' : np . savez_compressed ( filename , self . _data ) else : raise ValueError ( 'Extension %s not supported for point saves.' % ( file_ext ) )
Saves the collection to a file .
97
8
241,497
def load_data ( filename ) : file_root , file_ext = os . path . splitext ( filename ) data = None if file_ext == '.npy' : data = np . load ( filename ) elif file_ext == '.npz' : data = np . load ( filename ) [ 'arr_0' ] else : raise ValueError ( 'Extension %s not supported for point reads' % ( file_ext ) ) return data
Loads data from a file .
99
7
241,498
def open ( filename , frame = 'unspecified' ) : data = BagOfPoints . load_data ( filename ) return Point ( data , frame )
Create a Point from data saved in a file .
32
10
241,499
def _check_valid_data ( self , data ) : if len ( data . shape ) == 2 and data . shape [ 1 ] != 1 : raise ValueError ( 'Can only initialize Direction from a single Nx1 array' ) if np . abs ( np . linalg . norm ( data ) - 1.0 ) > 1e-4 : raise ValueError ( 'Direction data must have norm=1.0' )
Checks that the incoming data is a Nx1 ndarray .
93
15