idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
21,300
def get_trainer ( name ) : name = name . lower ( ) return int ( hashlib . md5 ( name . encode ( 'utf-8' ) ) . hexdigest ( ) , 16 ) % 10 ** 8
return the unique id for a trainer determined by the md5 sum
49
13
21,301
def scale_image ( image , new_width ) : ( original_width , original_height ) = image . size aspect_ratio = original_height / float ( original_width ) new_height = int ( aspect_ratio * new_width ) # This scales it wider than tall, since characters are biased new_image = image . resize ( ( new_width * 2 , new_height ) ) return new_image
Resizes an image preserving the aspect ratio .
91
9
21,302
def map_pixels_to_ascii_chars ( image , range_width = 25 ) : pixels_in_image = list ( image . getdata ( ) ) pixels_to_chars = [ ASCII_CHARS [ pixel_value / range_width ] for pixel_value in pixels_in_image ] return "" . join ( pixels_to_chars )
Maps each pixel to an ascii char based on the range in which it lies .
83
18
21,303
def load_steps ( working_dir = None , steps_dir = None , step_file = None , step_list = None ) : if steps_dir is not None : step_files = glob . glob ( os . path . join ( steps_dir , '*.cwl' ) ) elif step_file is not None : step_files = [ step_file ] elif step_list is not None : step_files = [ ] for path in step_list : if os . path . isdir ( path ) : step_files += glob . glob ( os . path . join ( path , '*.cwl' ) ) else : step_files . append ( path ) else : step_files = [ ] if working_dir is not None : step_files = sort_loading_order ( step_files ) steps = { } for f in step_files : if working_dir is not None : # Copy file to working_dir if not working_dir == os . path . dirname ( f ) and not is_url ( f ) : copied_file = os . path . join ( working_dir , os . path . basename ( f ) ) shutil . copy2 ( f , copied_file ) f = copied_file # Create steps try : s = Step ( f ) steps [ s . name ] = s except ( NotImplementedError , ValidationException , PackedWorkflowException ) as e : logger . warning ( e ) return steps
Return a dictionary containing Steps read from file .
316
9
21,304
def load_yaml ( filename ) : with open ( filename ) as myfile : content = myfile . read ( ) if "win" in sys . platform : content = content . replace ( "\\" , "/" ) return yaml . safe_load ( content )
Return object in yaml file .
58
7
21,305
def sort_loading_order ( step_files ) : tools = [ ] workflows = [ ] workflows_with_subworkflows = [ ] for f in step_files : # assume that urls are tools if f . startswith ( 'http://' ) or f . startswith ( 'https://' ) : tools . append ( f ) else : obj = load_yaml ( f ) if obj . get ( 'class' , '' ) == 'Workflow' : if 'requirements' in obj . keys ( ) : subw = { 'class' : 'SubworkflowFeatureRequirement' } if subw in obj [ 'requirements' ] : workflows_with_subworkflows . append ( f ) else : workflows . append ( f ) else : workflows . append ( f ) else : tools . append ( f ) return tools + workflows + workflows_with_subworkflows
Sort step files into correct loading order .
199
8
21,306
def load_cwl ( fname ) : logger . debug ( 'Loading CWL file "{}"' . format ( fname ) ) # Fetching, preprocessing and validating cwl # Older versions of cwltool if legacy_cwltool : try : ( document_loader , workflowobj , uri ) = fetch_document ( fname ) ( document_loader , _ , processobj , metadata , uri ) = validate_document ( document_loader , workflowobj , uri ) except TypeError : from cwltool . context import LoadingContext , getdefault from cwltool import workflow from cwltool . resolver import tool_resolver from cwltool . load_tool import resolve_tool_uri loadingContext = LoadingContext ( ) loadingContext . construct_tool_object = getdefault ( loadingContext . construct_tool_object , workflow . default_make_tool ) loadingContext . resolver = getdefault ( loadingContext . resolver , tool_resolver ) uri , tool_file_uri = resolve_tool_uri ( fname , resolver = loadingContext . resolver , fetcher_constructor = loadingContext . fetcher_constructor ) document_loader , workflowobj , uri = fetch_document ( uri , resolver = loadingContext . resolver , fetcher_constructor = loadingContext . fetcher_constructor ) document_loader , avsc_names , processobj , metadata , uri = validate_document ( document_loader , workflowobj , uri , loadingContext . overrides_list , { } , enable_dev = loadingContext . enable_dev , strict = loadingContext . strict , preprocess_only = False , fetcher_constructor = loadingContext . fetcher_constructor , skip_schemas = False , do_validate = loadingContext . do_validate ) # Recent versions of cwltool else : ( loading_context , workflowobj , uri ) = fetch_document ( fname ) loading_context , uri = resolve_and_validate_document ( loading_context , workflowobj , uri ) document_loader = loading_context . loader processobj = workflowobj metadata = loading_context . metadata return document_loader , processobj , metadata , uri
Load and validate CWL file using cwltool
490
11
21,307
def set_input ( self , p_name , value ) : name = self . python_names . get ( p_name ) if p_name is None or name not in self . get_input_names ( ) : raise ValueError ( 'Invalid input "{}"' . format ( p_name ) ) self . step_inputs [ name ] = value
Set a Step s input variable to a certain value .
77
11
21,308
def output_reference ( self , name ) : if name not in self . output_names : raise ValueError ( 'Invalid output "{}"' . format ( name ) ) return Reference ( step_name = self . name_in_workflow , output_name = name )
Return a reference to the given output for use in an input of a next Step .
58
17
21,309
def _input_optional ( inp ) : if 'default' in inp . keys ( ) : return True typ = inp . get ( 'type' ) if isinstance ( typ , six . string_types ) : return typ . endswith ( '?' ) elif isinstance ( typ , dict ) : # TODO: handle case where iput type is dict return False elif isinstance ( typ , list ) : # The cwltool validation expands optional arguments to # [u'null', <type>] return bool ( u'null' in typ ) else : raise ValueError ( 'Invalid input "{}"' . format ( inp . get [ 'id' ] ) )
Returns True if a step input parameter is optional .
148
10
21,310
def to_obj ( self , wd = False , pack = False , relpath = None ) : obj = CommentedMap ( ) if pack : obj [ 'run' ] = self . orig elif relpath is not None : if self . from_url : obj [ 'run' ] = self . run else : obj [ 'run' ] = os . path . relpath ( self . run , relpath ) elif wd : if self . from_url : obj [ 'run' ] = self . run else : obj [ 'run' ] = os . path . basename ( self . run ) else : obj [ 'run' ] = self . run obj [ 'in' ] = self . step_inputs obj [ 'out' ] = self . output_names if self . is_scattered : obj [ 'scatter' ] = self . scattered_inputs # scatter_method is optional when scattering over a single variable if self . scatter_method is not None : obj [ 'scatterMethod' ] = self . scatter_method return obj
Return the step as an dict that can be written to a yaml file .
229
16
21,311
def list_inputs ( self ) : doc = [ ] for inp , typ in self . input_types . items ( ) : if isinstance ( typ , six . string_types ) : typ = "'{}'" . format ( typ ) doc . append ( '{}: {}' . format ( inp , typ ) ) return '\n' . join ( doc )
Return a string listing all the Step s input names and their types .
81
14
21,312
def load ( self , steps_dir = None , step_file = None , step_list = None ) : self . _closed ( ) self . steps_library . load ( steps_dir = steps_dir , step_file = step_file , step_list = step_list )
Load CWL steps into the WorkflowGenerator s steps library .
62
14
21,313
def _has_requirements ( self ) : self . _closed ( ) return any ( [ self . has_workflow_step , self . has_scatter_requirement , self . has_multiple_inputs ] )
Returns True if the workflow needs a requirements section .
49
10
21,314
def inputs ( self , name ) : self . _closed ( ) step = self . _get_step ( name , make_copy = False ) return step . list_inputs ( )
List input names and types of a step in the steps library .
40
13
21,315
def _add_step ( self , step ) : self . _closed ( ) self . has_workflow_step = self . has_workflow_step or step . is_workflow self . wf_steps [ step . name_in_workflow ] = step
Add a step to the workflow .
59
7
21,316
def add_input ( self , * * kwargs ) : self . _closed ( ) def _get_item ( args ) : """Get a single item from args.""" if not args : raise ValueError ( "No parameter specified." ) item = args . popitem ( ) if args : raise ValueError ( "Too many parameters, not clear what to do " "with {}" . format ( kwargs ) ) return item symbols = None input_dict = CommentedMap ( ) if 'default' in kwargs : input_dict [ 'default' ] = kwargs . pop ( 'default' ) if 'label' in kwargs : input_dict [ 'label' ] = kwargs . pop ( 'label' ) if 'symbols' in kwargs : symbols = kwargs . pop ( 'symbols' ) name , input_type = _get_item ( kwargs ) if input_type == 'enum' : typ = CommentedMap ( ) typ [ 'type' ] = 'enum' # make sure symbols is set if symbols is None : raise ValueError ( "Please specify the enum's symbols." ) # make sure symbols is not empty if symbols == [ ] : raise ValueError ( "The enum's symbols cannot be empty." ) # make sure the symbols are a list if type ( symbols ) != list : raise ValueError ( 'Symbols should be a list.' ) # make sure symbols is a list of strings symbols = [ str ( s ) for s in symbols ] typ [ 'symbols' ] = symbols input_dict [ 'type' ] = typ else : # Set the 'type' if we can't use simple notation (because there is # a default value or a label) if bool ( input_dict ) : input_dict [ 'type' ] = input_type msg = '"{}" is already used as a workflow input. Please use a ' + 'different name.' if name in self . wf_inputs : raise ValueError ( msg . format ( name ) ) # Add 'type' for complex input types, so the user doesn't have to do it if isinstance ( input_type , dict ) : input_dict [ 'type' ] = input_type # Make sure we can use the notation without 'type' if the input allows # it. if bool ( input_dict ) : self . wf_inputs [ name ] = input_dict else : self . wf_inputs [ name ] = input_type return Reference ( input_name = name )
Add workflow input .
548
4
21,317
def add_outputs ( self , * * kwargs ) : self . _closed ( ) for name , source_name in kwargs . items ( ) : obj = { } obj [ 'outputSource' ] = source_name obj [ 'type' ] = self . step_output_types [ source_name ] self . wf_outputs [ name ] = obj
Add workflow outputs .
82
4
21,318
def _get_step ( self , name , make_copy = True ) : self . _closed ( ) s = self . steps_library . get_step ( name ) if s is None : msg = '"{}" not found in steps library. Please check your ' 'spelling or load additional steps' raise ValueError ( msg . format ( name ) ) if make_copy : s = copy . deepcopy ( s ) return s
Return step from steps library .
93
6
21,319
def to_obj ( self , wd = False , pack = False , relpath = None ) : self . _closed ( ) obj = CommentedMap ( ) obj [ 'cwlVersion' ] = 'v1.0' obj [ 'class' ] = 'Workflow' try : obj [ 'doc' ] = self . documentation except ( AttributeError , ValueError ) : pass try : obj [ 'label' ] = self . label except ( AttributeError , ValueError ) : pass if self . _has_requirements ( ) : obj [ 'requirements' ] = [ ] if self . has_workflow_step : obj [ 'requirements' ] . append ( { 'class' : 'SubworkflowFeatureRequirement' } ) if self . has_scatter_requirement : obj [ 'requirements' ] . append ( { 'class' : 'ScatterFeatureRequirement' } ) if self . has_multiple_inputs : obj [ 'requirements' ] . append ( { 'class' : 'MultipleInputFeatureRequirement' } ) obj [ 'inputs' ] = self . wf_inputs obj [ 'outputs' ] = self . wf_outputs steps_obj = CommentedMap ( ) for key in self . wf_steps : steps_obj [ key ] = self . wf_steps [ key ] . to_obj ( relpath = relpath , pack = pack , wd = wd ) obj [ 'steps' ] = steps_obj return obj
Return the created workflow as a dict .
330
8
21,320
def to_script ( self , wf_name = 'wf' ) : self . _closed ( ) script = [ ] # Workflow documentation # if self.documentation: # if is_multiline(self.documentation): # print('doc = """') # print(self.documentation) # print('"""') # print('{}.set_documentation(doc)'.format(wf_name)) # else: # print('{}.set_documentation(\'{}\')'.format(wf_name, # self.documentation)) # Workflow inputs params = [ ] returns = [ ] for name , typ in self . wf_inputs . items ( ) : params . append ( '{}=\'{}\'' . format ( name , typ ) ) returns . append ( name ) script . append ( '{} = {}.add_inputs({})' . format ( ', ' . join ( returns ) , wf_name , ', ' . join ( params ) ) ) # Workflow steps returns = [ ] for name , step in self . wf_steps . items ( ) : pyname = step . python_name returns = [ '{}_{}' . format ( pyname , o ) for o in step [ 'out' ] ] params = [ '{}={}' . format ( name , python_name ( param ) ) for name , param in step [ 'in' ] . items ( ) ] script . append ( '{} = {}.{}({})' . format ( ', ' . join ( returns ) , wf_name , pyname , ', ' . join ( params ) ) ) # Workflow outputs params = [ ] for name , details in self . wf_outputs . items ( ) : params . append ( '{}={}' . format ( name , python_name ( details [ 'outputSource' ] ) ) ) script . append ( '{}.add_outputs({})' . format ( wf_name , ', ' . join ( params ) ) ) return '\n' . join ( script )
Generated and print the scriptcwl script for the currunt workflow .
457
16
21,321
def _types_match ( type1 , type2 ) : if isinstance ( type1 , six . string_types ) and isinstance ( type2 , six . string_types ) : type1 = type1 . rstrip ( '?' ) type2 = type2 . rstrip ( '?' ) if type1 != type2 : return False return True
Returns False only if it can show that no value of type1 can possibly match type2 .
75
19
21,322
def validate ( self ) : # define tmpfile ( fd , tmpfile ) = tempfile . mkstemp ( ) os . close ( fd ) try : # save workflow object to tmpfile, # do not recursively call validate function self . save ( tmpfile , mode = 'abs' , validate = False ) # load workflow from tmpfile document_loader , processobj , metadata , uri = load_cwl ( tmpfile ) finally : # cleanup tmpfile os . remove ( tmpfile )
Validate workflow object .
108
5
21,323
def save ( self , fname , mode = None , validate = True , encoding = 'utf-8' , wd = False , inline = False , relative = False , pack = False ) : self . _closed ( ) if mode is None : mode = 'abs' if pack : mode = 'pack' elif wd : mode = 'wd' elif relative : mode = 'rel' msg = 'Using deprecated save method. Please save the workflow ' 'with: wf.save(\'{}\', mode=\'{}\'). Redirecting to new ' 'save method.' . format ( fname , mode ) warnings . warn ( msg , DeprecationWarning ) modes = ( 'rel' , 'abs' , 'wd' , 'inline' , 'pack' ) if mode not in modes : msg = 'Illegal mode "{}". Choose one of ({}).' . format ( mode , ',' . join ( modes ) ) raise ValueError ( msg ) if validate : self . validate ( ) dirname = os . path . dirname ( os . path . abspath ( fname ) ) if not os . path . exists ( dirname ) : os . makedirs ( dirname ) if mode == 'inline' : msg = ( 'Inline saving is deprecated. Please save the workflow ' 'using mode=\'pack\'. Setting mode to pack.' ) warnings . warn ( msg , DeprecationWarning ) mode = 'pack' if mode == 'rel' : relpath = dirname save_yaml ( fname = fname , wf = self , pack = False , relpath = relpath , wd = False ) if mode == 'abs' : save_yaml ( fname = fname , wf = self , pack = False , relpath = None , wd = False ) if mode == 'pack' : self . _pack ( fname , encoding ) if mode == 'wd' : if self . get_working_dir ( ) is None : raise ValueError ( 'Working directory not set.' ) else : # save in working_dir bn = os . path . basename ( fname ) wd_file = os . path . join ( self . working_dir , bn ) save_yaml ( fname = wd_file , wf = self , pack = False , relpath = None , wd = True ) # and copy workflow file to other location (as though all steps # are in the same directory as the workflow) try : shutil . copy2 ( wd_file , fname ) except shutil . Error : pass
Save the workflow to file .
558
6
21,324
def str_presenter ( dmpr , data ) : if is_multiline ( data ) : return dmpr . represent_scalar ( 'tag:yaml.org,2002:str' , data , style = '|' ) return dmpr . represent_scalar ( 'tag:yaml.org,2002:str' , data )
Return correct str_presenter to write multiple lines to a yaml field .
81
16
21,325
def build_grad_matrices ( V , points ) : # See <https://www.allanswered.com/post/lkbkm/#zxqgk> mesh = V . mesh ( ) bbt = BoundingBoxTree ( ) bbt . build ( mesh ) dofmap = V . dofmap ( ) el = V . element ( ) rows = [ ] cols = [ ] datax = [ ] datay = [ ] for i , xy in enumerate ( points ) : cell_id = bbt . compute_first_entity_collision ( Point ( * xy ) ) cell = Cell ( mesh , cell_id ) coordinate_dofs = cell . get_vertex_coordinates ( ) rows . append ( [ i , i , i ] ) cols . append ( dofmap . cell_dofs ( cell_id ) ) v = el . evaluate_basis_derivatives_all ( 1 , xy , coordinate_dofs , cell_id ) v = v . reshape ( 3 , 2 ) datax . append ( v [ : , 0 ] ) datay . append ( v [ : , 1 ] ) rows = numpy . concatenate ( rows ) cols = numpy . concatenate ( cols ) datax = numpy . concatenate ( datax ) datay = numpy . concatenate ( datay ) m = len ( points ) n = V . dim ( ) dx_matrix = sparse . csr_matrix ( ( datax , ( rows , cols ) ) , shape = ( m , n ) ) dy_matrix = sparse . csr_matrix ( ( datay , ( rows , cols ) ) , shape = ( m , n ) ) return dx_matrix , dy_matrix
Build the sparse m - by - n matrices that map a coefficient set for a function in V to the values of dx and dy at a number m of points .
394
34
21,326
def apply_M ( self , ax , ay ) : jac = numpy . array ( [ [ self . dx . dot ( ax ) , self . dy . dot ( ax ) ] , [ self . dx . dot ( ay ) , self . dy . dot ( ay ) ] ] ) # jacs and J are of shape (2, 2, k). M must be of the same shape and # contain the result of the k 2x2 dot products. Perhaps there's a # dot() for this. M = numpy . einsum ( "ijl,jkl->ikl" , jac , self . J ) # M = numpy.array([ # [ # jac[0][0]*self.J[0][0] + jac[0][1]*self.J[1][0], # jac[0][0]*self.J[0][1] + jac[0][1]*self.J[1][1], # ], # [ # jac[1][0]*self.J[0][0] + jac[1][1]*self.J[1][0], # jac[1][0]*self.J[0][1] + jac[1][1]*self.J[1][1], # ], # ]) # One could use # # M = numpy.moveaxis(M, -1, 0) # _, sigma, _ = numpy.linalg.svd(M) # # but computing the singular values explicitly via # <https://scicomp.stackexchange.com/a/14103/3980> is faster and more # explicit. a = ( M [ 0 , 0 ] + M [ 1 , 1 ] ) / 2 b = ( M [ 0 , 0 ] - M [ 1 , 1 ] ) / 2 c = ( M [ 1 , 0 ] + M [ 0 , 1 ] ) / 2 d = ( M [ 1 , 0 ] - M [ 0 , 1 ] ) / 2 return a , b , c , d
Linear operator that converts ax ay to abcd .
454
11
21,327
def cost_min2 ( self , alpha ) : n = self . V . dim ( ) ax = alpha [ : n ] ay = alpha [ n : ] # ml = pyamg.ruge_stuben_solver(self.L) # # ml = pyamg.smoothed_aggregation_solver(self.L) # print(ml) # print() # print(self.L) # print() # x = ml.solve(ax, tol=1e-10) # print('residual: {}'.format(numpy.linalg.norm(ax - self.L*x))) # print() # print(ax) # print() # print(x) # exit(1) # x = sparse.linalg.spsolve(self.L, ax) # print('residual: {}'.format(numpy.linalg.norm(ax - self.L*x))) # exit(1) q2 , r2 = self . get_q2_r2 ( ax , ay ) Lax = self . L * ax Lay = self . L * ay out = [ 0.5 * numpy . dot ( Lax , Lax ) , 0.5 * numpy . dot ( Lay , Lay ) , 0.5 * numpy . dot ( q2 - 1 , q2 - 1 ) , 0.5 * numpy . dot ( r2 , r2 ) , ] if self . num_f_eval % 10000 == 0 : print ( "{:7d} {:e} {:e} {:e} {:e}" . format ( self . num_f_eval , * out ) ) self . num_f_eval += 1 return numpy . sum ( out )
Residual formulation Hessian is a low - rank update of the identity .
382
16
21,328
def delta ( a , b ) : diff = a - b return numpy . einsum ( "i...,i...->..." , diff , diff )
Computes the distances between two colors or color sets . The shape of a and b must be equal .
34
21
21,329
def plot_flat_gamut ( xy_to_2d = lambda xy : xy , axes_labels = ( "x" , "y" ) , plot_rgb_triangle = True , fill_horseshoe = True , plot_planckian_locus = True , ) : observer = observers . cie_1931_2 ( ) # observer = observers.cie_1964_10() _plot_monochromatic ( observer , xy_to_2d , fill_horseshoe = fill_horseshoe ) # plt.grid() if plot_rgb_triangle : _plot_rgb_triangle ( xy_to_2d ) if plot_planckian_locus : _plot_planckian_locus ( observer , xy_to_2d ) plt . gca ( ) . set_aspect ( "equal" ) # plt.legend() plt . xlabel ( axes_labels [ 0 ] ) plt . ylabel ( axes_labels [ 1 ] ) return
Show a flat color gamut by default xy . There exists a chroma gamut for all color models which transform lines in XYZ to lines and hence have a natural decomposition into lightness and chroma components . Also the flat gamut is the same for every lightness value . Examples for color models with this property are CIELUV and IPT examples for color models without are CIELAB and CIECAM02 .
242
88
21,330
def _get_xy_tree ( xy , degree ) : x , y = xy tree = [ numpy . array ( [ numpy . ones ( x . shape , dtype = int ) ] ) ] for d in range ( degree ) : tree . append ( numpy . concatenate ( [ tree [ - 1 ] * x , [ tree [ - 1 ] [ - 1 ] * y ] ] ) ) return tree
Evaluates the entire tree of 2d mononomials .
92
13
21,331
def spectrum_to_xyz100 ( spectrum , observer ) : lambda_o , data_o = observer lambda_s , data_s = spectrum # form the union of lambdas lmbda = numpy . sort ( numpy . unique ( numpy . concatenate ( [ lambda_o , lambda_s ] ) ) ) # The technical document prescribes that the integration be performed over # the wavelength range corresponding to the entire visible spectrum, 360 nm # to 830 nm. assert lmbda [ 0 ] < 361e-9 assert lmbda [ - 1 ] > 829e-9 # interpolate data idata_o = numpy . array ( [ numpy . interp ( lmbda , lambda_o , dt ) for dt in data_o ] ) # The technical report specifies the interpolation techniques, too: # ``` # Use one of the four following methods to calculate needed but unmeasured # values of phi(l), R(l) or tau(l) within the range of measurements: # 1) the third-order polynomial interpolation (Lagrange) from the four # neighbouring data points around the point to be interpolated, or # 2) cubic spline interpolation formula, or # 3) a fifth order polynomial interpolation formula from the six # neighboring data points around the point to be interpolated, or # 4) a Sprague interpolation (see Seve, 2003). # ``` # Well, don't do that but simply use linear interpolation now. We only use # the midpoint rule for integration anyways. idata_s = numpy . interp ( lmbda , lambda_s , data_s ) # step sizes delta = numpy . zeros ( len ( lmbda ) ) diff = lmbda [ 1 : ] - lmbda [ : - 1 ] delta [ 1 : ] += diff delta [ : - 1 ] += diff delta /= 2 values = numpy . dot ( idata_o , idata_s * delta ) return values * 100
Computes the tristimulus values XYZ from a given spectrum for a given observer via
443
19
21,332
def d ( nominal_temperature ) : # From CIE 15:2004. Colorimetry, 3rd edition, 2004 (page 69, note 5): # # The method required to calculate the values for the relative spectral # power distributions of illuminants D50, D55, D65, and D75, in Table T.1 # is as follows # 1. Multiply the nominal correlated colour temperature (5000 K, 5500 K, # 6500 K or 7500 K) by 1,4388/1,4380. # 2. Calculate XD and YD using the equations given in the text. # 3. Calculate M1 and M2 using the equations given in the text. # 4. Round M1 and M2 to three decimal places. # 5. Calculate S(lambda) every 10 nm by # S(lambda) = S0(lambda) + M1 S1(lambda) + M2 S2(lambda) # using values of S0(lambda), S1(lambda) and S2(lambda) from # Table T.2. # 6. Interpolate the 10 nm values of S(lambda) linearly to obtain values # at intermediate wavelengths. tcp = 1.4388e-2 / 1.4380e-2 * nominal_temperature if 4000 <= tcp <= 7000 : xd = ( ( - 4.6070e9 / tcp + 2.9678e6 ) / tcp + 0.09911e3 ) / tcp + 0.244063 else : assert 7000 < tcp <= 25000 xd = ( ( - 2.0064e9 / tcp + 1.9018e6 ) / tcp + 0.24748e3 ) / tcp + 0.237040 yd = ( - 3.000 * xd + 2.870 ) * xd - 0.275 m1 = ( - 1.3515 - 1.7703 * xd + 5.9114 * yd ) / ( 0.0241 + 0.2562 * xd - 0.7341 * yd ) m2 = ( + 0.0300 - 31.4424 * xd + 30.0717 * yd ) / ( 0.0241 + 0.2562 * xd - 0.7341 * yd ) m1 = numpy . around ( m1 , decimals = 3 ) m2 = numpy . around ( m2 , decimals = 3 ) dir_path = os . path . dirname ( os . path . realpath ( __file__ ) ) with open ( os . path . join ( dir_path , "data/illuminants/d.yaml" ) ) as f : data = yaml . safe_load ( f ) data = numpy . array ( data ) . T lmbda = data [ 0 ] s = data [ 1 : ] return lmbda , s [ 0 ] + m1 * s [ 1 ] + m2 * s [ 2 ]
CIE D - series illuminants .
644
9
21,333
def e ( ) : lmbda = 1.0e-9 * numpy . arange ( 300 , 831 ) data = numpy . full ( lmbda . shape , 100.0 ) return lmbda , data
This is a hypothetical reference radiator . All wavelengths in CIE illuminant E are weighted equally with a relative spectral power of 100 . 0 .
50
29
21,334
def dot ( a , b ) : b = numpy . asarray ( b ) return numpy . dot ( a , b . reshape ( b . shape [ 0 ] , - 1 ) ) . reshape ( a . shape [ : - 1 ] + b . shape [ 1 : ] )
Take arrays a and b and form the dot product between the last axis of a and the first of b .
63
22
21,335
def get_nlcd_mask ( nlcd_ds , filter = 'not_forest' , out_fn = None ) : print ( "Loading NLCD LULC" ) b = nlcd_ds . GetRasterBand ( 1 ) l = b . ReadAsArray ( ) print ( "Filtering NLCD LULC with: %s" % filter ) #Original nlcd products have nan as ndv #12 - ice #31 - rock #11 - open water, includes rivers #52 - shrub, <5 m tall, >20% #42 - evergreeen forest #Should use data dictionary here for general masking #Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes if filter == 'rock' : mask = ( l == 31 ) elif filter == 'rock+ice' : mask = np . logical_or ( ( l == 31 ) , ( l == 12 ) ) elif filter == 'rock+ice+water' : mask = np . logical_or ( np . logical_or ( ( l == 31 ) , ( l == 12 ) ) , ( l == 11 ) ) elif filter == 'not_forest' : mask = ~ ( np . logical_or ( np . logical_or ( ( l == 41 ) , ( l == 42 ) ) , ( l == 43 ) ) ) elif filter == 'not_forest+not_water' : mask = ~ ( np . logical_or ( np . logical_or ( np . logical_or ( ( l == 41 ) , ( l == 42 ) ) , ( l == 43 ) ) , ( l == 11 ) ) ) else : print ( "Invalid mask type" ) mask = None #Write out original data if out_fn is not None : print ( "Writing out %s" % out_fn ) iolib . writeGTiff ( l , out_fn , nlcd_ds ) l = None return mask
Generate raster mask for specified NLCD LULC filter
426
13
21,336
def get_bareground_mask ( bareground_ds , bareground_thresh = 60 , out_fn = None ) : print ( "Loading bareground" ) b = bareground_ds . GetRasterBand ( 1 ) l = b . ReadAsArray ( ) print ( "Masking pixels with <%0.1f%% bare ground" % bareground_thresh ) if bareground_thresh < 0.0 or bareground_thresh > 100.0 : sys . exit ( "Invalid bare ground percentage" ) mask = ( l > bareground_thresh ) #Write out original data if out_fn is not None : print ( "Writing out %s" % out_fn ) iolib . writeGTiff ( l , out_fn , bareground_ds ) l = None return mask
Generate raster mask for exposed bare ground from global bareground data
176
14
21,337
def get_snodas_ds ( dem_dt , code = 1036 ) : import tarfile import gzip snodas_ds = None snodas_url_str = None outdir = os . path . join ( datadir , 'snodas' ) if not os . path . exists ( outdir ) : os . makedirs ( outdir ) #Note: unmasked products (beyond CONUS) are only available from 2010-present if dem_dt >= datetime ( 2003 , 9 , 30 ) and dem_dt < datetime ( 2010 , 1 , 1 ) : snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/masked/%Y/%m_%b/SNODAS_%Y%m%d.tar' tar_subfn_str_fmt = 'us_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz' elif dem_dt >= datetime ( 2010 , 1 , 1 ) : snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/unmasked/%Y/%m_%b/SNODAS_unmasked_%Y%m%d.tar' tar_subfn_str_fmt = './zz_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz' else : print ( "No SNODAS data available for input date" ) if snodas_url_str is not None : snodas_url = dem_dt . strftime ( snodas_url_str ) snodas_tar_fn = iolib . getfile ( snodas_url , outdir = outdir ) print ( "Unpacking" ) tar = tarfile . open ( snodas_tar_fn ) #gunzip to extract both dat and Hdr files, tar.gz for ext in ( 'dat' , 'Hdr' ) : tar_subfn_str = tar_subfn_str_fmt % ( code , ext ) tar_subfn_gz = dem_dt . strftime ( tar_subfn_str ) tar_subfn = os . path . splitext ( tar_subfn_gz ) [ 0 ] print ( tar_subfn ) if outdir is not None : tar_subfn = os . path . join ( outdir , tar_subfn ) if not os . path . exists ( tar_subfn ) : #Should be able to do this without writing intermediate gz to disk tar . extract ( tar_subfn_gz ) with gzip . open ( tar_subfn_gz , 'rb' ) as f : outf = open ( tar_subfn , 'wb' ) outf . write ( f . read ( ) ) outf . close ( ) os . remove ( tar_subfn_gz ) #Need to delete 'Created by module comment' line from Hdr, can contain too many characters bad_str = 'Created by module comment' snodas_fn = tar_subfn f = open ( snodas_fn ) output = [ ] for line in f : if not bad_str in line : output . append ( line ) f . close ( ) f = open ( snodas_fn , 'w' ) f . writelines ( output ) f . close ( ) #Return GDAL dataset for extracted product snodas_ds = gdal . Open ( snodas_fn ) return snodas_ds
Function to fetch and process SNODAS snow depth products for input datetime
810
15
21,338
def get_modis_tile_list ( ds ) : from demcoreg import modis_grid modis_dict = { } for key in modis_grid . modis_dict : modis_dict [ key ] = ogr . CreateGeometryFromWkt ( modis_grid . modis_dict [ key ] ) geom = geolib . ds_geom ( ds ) geom_dup = geolib . geom_dup ( geom ) ct = osr . CoordinateTransformation ( geom_dup . GetSpatialReference ( ) , geolib . wgs_srs ) geom_dup . Transform ( ct ) tile_list = [ ] for key , val in list ( modis_dict . items ( ) ) : if geom_dup . Intersects ( val ) : tile_list . append ( key ) return tile_list
Helper function to identify MODIS tiles that intersect input geometry
203
11
21,339
def proc_modscag ( fn_list , extent = None , t_srs = None ) : #Use cubic spline here for improve upsampling ds_list = warplib . memwarp_multi_fn ( fn_list , res = 'min' , extent = extent , t_srs = t_srs , r = 'cubicspline' ) stack_fn = os . path . splitext ( fn_list [ 0 ] ) [ 0 ] + '_' + os . path . splitext ( os . path . split ( fn_list [ - 1 ] ) [ 1 ] ) [ 0 ] + '_stack_%i' % len ( fn_list ) #Create stack here - no need for most of mastack machinery, just make 3D array #Mask values greater than 100% (clouds, bad pixels, etc) ma_stack = np . ma . array ( [ np . ma . masked_greater ( iolib . ds_getma ( ds ) , 100 ) for ds in np . array ( ds_list ) ] , dtype = np . uint8 ) stack_count = np . ma . masked_equal ( ma_stack . count ( axis = 0 ) , 0 ) . astype ( np . uint8 ) stack_count . set_fill_value ( 0 ) stack_min = ma_stack . min ( axis = 0 ) . astype ( np . uint8 ) stack_min . set_fill_value ( 0 ) stack_max = ma_stack . max ( axis = 0 ) . astype ( np . uint8 ) stack_max . set_fill_value ( 0 ) stack_med = np . ma . median ( ma_stack , axis = 0 ) . astype ( np . uint8 ) stack_med . set_fill_value ( 0 ) out_fn = stack_fn + '_count.tif' iolib . writeGTiff ( stack_count , out_fn , ds_list [ 0 ] ) out_fn = stack_fn + '_max.tif' iolib . writeGTiff ( stack_max , out_fn , ds_list [ 0 ] ) out_fn = stack_fn + '_min.tif' iolib . writeGTiff ( stack_min , out_fn , ds_list [ 0 ] ) out_fn = stack_fn + '_med.tif' iolib . writeGTiff ( stack_med , out_fn , ds_list [ 0 ] ) ds = gdal . Open ( out_fn ) return ds
Process the MODSCAG products for full date range create composites and reproject
572
16
21,340
def _value_length ( self , value , t ) : if isinstance ( value , int ) : fmt = '<%s' % ( type_codes [ t ] ) output = struct . pack ( fmt , value ) return len ( output ) elif isinstance ( value , str ) : return len ( value ) + 1 # Account for final 0 len_accum = 0 for x in value : len_accum += self . _value_length ( x , t ) return len_accum
Given an integer or list of them convert it to an array of bytes .
107
15
21,341
def _parse_line ( self , line_no , line ) : try : matched = statement . parseString ( line ) except ParseException as exc : raise DataError ( "Error parsing line in TileBus file" , line_number = line_no , column = exc . col , contents = line ) if 'symbol' in matched : self . _parse_cmd ( matched ) elif 'filename' in matched : self . _parse_include ( matched ) elif 'variable' in matched : self . _parse_assignment ( matched ) elif 'configvar' in matched : self . _parse_configvar ( matched )
Parse a line in a TileBus file
136
9
21,342
def _validate_information ( self ) : needed_variables = [ "ModuleName" , "ModuleVersion" , "APIVersion" ] for var in needed_variables : if var not in self . variables : raise DataError ( "Needed variable was not defined in mib file." , variable = var ) # Make sure ModuleName is <= 6 characters if len ( self . variables [ "ModuleName" ] ) > 6 : raise DataError ( "ModuleName too long, must be 6 or fewer characters." , module_name = self . variables [ "ModuleName" ] ) if not isinstance ( self . variables [ "ModuleVersion" ] , str ) : raise ValueError ( "ModuleVersion ('%s') must be a string of the form X.Y.Z" % str ( self . variables [ 'ModuleVersion' ] ) ) if not isinstance ( self . variables [ "APIVersion" ] , str ) : raise ValueError ( "APIVersion ('%s') must be a string of the form X.Y" % str ( self . variables [ 'APIVersion' ] ) ) self . variables [ 'ModuleVersion' ] = self . _convert_module_version ( self . variables [ "ModuleVersion" ] ) self . variables [ 'APIVersion' ] = self . _convert_api_version ( self . variables [ "APIVersion" ] ) self . variables [ "ModuleName" ] = self . variables [ "ModuleName" ] . ljust ( 6 ) self . valid = True
Validate that all information has been filled in
331
9
21,343
def get_block ( self , config_only = False ) : mib = TBBlock ( ) for cid , config in self . configs . items ( ) : mib . add_config ( cid , config ) if not config_only : for key , val in self . commands . items ( ) : mib . add_command ( key , val ) if not self . valid : self . _validate_information ( ) mib . set_api_version ( * self . variables [ "APIVersion" ] ) mib . set_module_version ( * self . variables [ "ModuleVersion" ] ) mib . set_name ( self . variables [ "ModuleName" ] ) return mib
Create a TileBus Block based on the information in this descriptor
154
12
21,344
def add_adapter ( self , adapter ) : if self . _started : raise InternalError ( "New adapters cannot be added after start() is called" ) if isinstance ( adapter , DeviceAdapter ) : self . _logger . warning ( "Wrapping legacy device adapter %s in async wrapper" , adapter ) adapter = AsynchronousModernWrapper ( adapter , loop = self . _loop ) self . adapters . append ( adapter ) adapter_callback = functools . partial ( self . handle_adapter_event , len ( self . adapters ) - 1 ) events = [ 'device_seen' , 'broadcast' , 'report' , 'connection' , 'disconnection' , 'trace' , 'progress' ] adapter . register_monitor ( [ None ] , events , adapter_callback )
Add a device adapter to this aggregating adapter .
171
10
21,345
def get_config ( self , name , default = _MISSING ) : val = self . _config . get ( name , default ) if val is _MISSING : raise ArgumentError ( "DeviceAdapter config {} did not exist and no default" . format ( name ) ) return val
Get a configuration setting from this DeviceAdapter .
62
9
21,346
async def start ( self ) : successful = 0 try : for adapter in self . adapters : await adapter . start ( ) successful += 1 self . _started = True except : for adapter in self . adapters [ : successful ] : await adapter . stop ( ) raise
Start all adapters managed by this device adapter .
55
9
21,347
def visible_devices ( self ) : devs = { } for device_id , adapters in self . _devices . items ( ) : dev = None max_signal = None best_adapter = None for adapter_id , devinfo in adapters . items ( ) : connstring = "adapter/{0}/{1}" . format ( adapter_id , devinfo [ 'connection_string' ] ) if dev is None : dev = copy . deepcopy ( devinfo ) del dev [ 'connection_string' ] if 'adapters' not in dev : dev [ 'adapters' ] = [ ] best_adapter = adapter_id dev [ 'adapters' ] . append ( ( adapter_id , devinfo [ 'signal_strength' ] , connstring ) ) if max_signal is None : max_signal = devinfo [ 'signal_strength' ] elif devinfo [ 'signal_strength' ] > max_signal : max_signal = devinfo [ 'signal_strength' ] best_adapter = adapter_id # If device has been seen in no adapters, it will get expired # don't return it if dev is None : continue dev [ 'connection_string' ] = "device/%x" % dev [ 'uuid' ] dev [ 'adapters' ] = sorted ( dev [ 'adapters' ] , key = lambda x : x [ 1 ] , reverse = True ) dev [ 'best_adapter' ] = best_adapter dev [ 'signal_strength' ] = max_signal devs [ device_id ] = dev return devs
Unify all visible devices across all connected adapters
351
9
21,348
async def probe ( self ) : for adapter in self . adapters : if adapter . get_config ( 'probe_supported' , False ) : await adapter . probe ( )
Probe for devices .
38
5
21,349
async def send_script ( self , conn_id , data ) : adapter_id = self . _get_property ( conn_id , 'adapter' ) return await self . adapters [ adapter_id ] . send_script ( conn_id , data )
Send a script to a device .
57
7
21,350
async def handle_adapter_event ( self , adapter_id , conn_string , conn_id , name , event ) : if name == 'device_seen' : self . _track_device_seen ( adapter_id , conn_string , event ) event = self . _translate_device_seen ( adapter_id , conn_string , event ) conn_string = self . _translate_conn_string ( adapter_id , conn_string ) elif conn_id is not None and self . _get_property ( conn_id , 'translate' ) : conn_string = self . _translate_conn_string ( adapter_id , conn_string ) else : conn_string = "adapter/%d/%s" % ( adapter_id , conn_string ) await self . notify_event ( conn_string , name , event )
Handle an event received from an adapter .
190
8
21,351
def _device_expiry_callback ( self ) : expired = 0 for adapters in self . _devices . values ( ) : to_remove = [ ] now = monotonic ( ) for adapter_id , dev in adapters . items ( ) : if 'expires' not in dev : continue if now > dev [ 'expires' ] : to_remove . append ( adapter_id ) local_conn = "adapter/%d/%s" % ( adapter_id , dev [ 'connection_string' ] ) if local_conn in self . _conn_strings : del self . _conn_strings [ local_conn ] for entry in to_remove : del adapters [ entry ] expired += 1 if expired > 0 : self . _logger . info ( 'Expired %d devices' , expired )
Periodic callback to remove expired devices from visible_devices .
176
13
21,352
def PathIsDir ( self , key , val , env ) : if not os . path . isdir ( val ) : if os . path . isfile ( val ) : m = 'Directory path for option %s is a file: %s' else : m = 'Directory path for option %s does not exist: %s' raise SCons . Errors . UserError ( m % ( key , val ) )
Validator to check if Path is a directory .
88
10
21,353
def PathExists ( self , key , val , env ) : if not os . path . exists ( val ) : m = 'Path for option %s does not exist: %s' raise SCons . Errors . UserError ( m % ( key , val ) )
Validator to check if Path exists
57
7
21,354
async def _reset_vector ( self ) : self . _logger . debug ( "sensor_graph subsystem task starting" ) # If there is a persistent sgf loaded, send reset information. self . initialized . set ( ) while True : stream , reading = await self . _inputs . get ( ) try : await process_graph_input ( self . graph , stream , reading , self . _executor ) self . process_streamers ( ) except : #pylint:disable=bare-except;This is a background task that should not die self . _logger . exception ( "Unhandled exception processing sensor_graph input (stream=%s), reading=%s" , stream , reading ) finally : self . _inputs . task_done ( )
Background task to initialize this system in the event loop .
167
11
21,355
def process_input ( self , encoded_stream , value ) : if not self . enabled : return if isinstance ( encoded_stream , str ) : stream = DataStream . FromString ( encoded_stream ) encoded_stream = stream . encode ( ) elif isinstance ( encoded_stream , DataStream ) : stream = encoded_stream encoded_stream = stream . encode ( ) else : stream = DataStream . FromEncoded ( encoded_stream ) reading = IOTileReading ( self . get_timestamp ( ) , encoded_stream , value ) self . _inputs . put_nowait ( ( stream , reading ) )
Process or drop a graph input .
134
7
21,356
def _seek_streamer ( self , index , value ) : highest_id = self . _rsl . highest_stored_id ( ) streamer = self . graph . streamers [ index ] if not streamer . walker . buffered : return _pack_sgerror ( SensorLogError . CANNOT_USE_UNBUFFERED_STREAM ) find_type = None try : exact = streamer . walker . seek ( value , target = 'id' ) if exact : find_type = 'exact' else : find_type = 'other_stream' except UnresolvedIdentifierError : if value > highest_id : find_type = 'too_high' else : find_type = 'too_low' # If we found an exact match, move one beyond it if find_type == 'exact' : try : streamer . walker . pop ( ) except StreamEmptyError : pass error = Error . NO_ERROR elif find_type == 'too_high' : streamer . walker . skip_all ( ) error = _pack_sgerror ( SensorLogError . NO_MORE_READINGS ) elif find_type == 'too_low' : streamer . walker . seek ( 0 , target = 'offset' ) error = _pack_sgerror ( SensorLogError . NO_MORE_READINGS ) else : error = _pack_sgerror ( SensorLogError . ID_FOUND_FOR_ANOTHER_STREAM ) return error
Complex logic for actually seeking a streamer to a reading_id .
329
15
21,357
def acknowledge_streamer ( self , index , ack , force ) : if index >= len ( self . graph . streamers ) : return _pack_sgerror ( SensorGraphError . STREAMER_NOT_ALLOCATED ) old_ack = self . streamer_acks . get ( index , 0 ) if ack != 0 : if ack <= old_ack and not force : return _pack_sgerror ( SensorGraphError . OLD_ACKNOWLEDGE_UPDATE ) self . streamer_acks [ index ] = ack current_ack = self . streamer_acks . get ( index , 0 ) return self . _seek_streamer ( index , current_ack )
Acknowledge a streamer value as received from the remote side .
151
14
21,358
def _handle_streamer_finished ( self , index , succeeded , highest_ack ) : self . _logger . debug ( "Rolling back streamer %d after streaming, highest ack from streaming subsystem was %d" , index , highest_ack ) self . acknowledge_streamer ( index , highest_ack , False )
Callback when a streamer finishes processing .
71
8
21,359
def process_streamers ( self ) : # Check for any triggered streamers and pass them to stream manager in_progress = self . _stream_manager . in_progress ( ) triggered = self . graph . check_streamers ( blacklist = in_progress ) for streamer in triggered : self . _stream_manager . process_streamer ( streamer , callback = self . _handle_streamer_finished )
Check if any streamers should be handed to the stream manager .
88
13
21,360
def trigger_streamer ( self , index ) : self . _logger . debug ( "trigger_streamer RPC called on streamer %d" , index ) if index >= len ( self . graph . streamers ) : return _pack_sgerror ( SensorGraphError . STREAMER_NOT_ALLOCATED ) if index in self . _stream_manager . in_progress ( ) : return _pack_sgerror ( SensorGraphError . STREAM_ALREADY_IN_PROGRESS ) streamer = self . graph . streamers [ index ] if not streamer . triggered ( manual = True ) : return _pack_sgerror ( SensorGraphError . STREAMER_HAS_NO_NEW_DATA ) self . _logger . debug ( "calling mark_streamer on streamer %d from trigger_streamer RPC" , index ) self . graph . mark_streamer ( index ) self . process_streamers ( ) return Error . NO_ERROR
Pass a streamer to the stream manager if it has data .
214
13
21,361
def persist ( self ) : self . persisted_nodes = self . graph . dump_nodes ( ) self . persisted_streamers = self . graph . dump_streamers ( ) self . persisted_exists = True self . persisted_constants = self . _sensor_log . dump_constants ( )
Trigger saving the current sensorgraph to persistent storage .
69
11
21,362
def reset ( self ) : self . persisted_exists = False self . persisted_nodes = [ ] self . persisted_streamers = [ ] self . persisted_constants = [ ] self . graph . clear ( ) self . streamer_status = { }
Clear the sensorgraph from RAM and flash .
57
10
21,363
def add_node ( self , binary_descriptor ) : try : node_string = parse_binary_descriptor ( binary_descriptor ) except : self . _logger . exception ( "Error parsing binary node descriptor: %s" , binary_descriptor ) return _pack_sgerror ( SensorGraphError . INVALID_NODE_STREAM ) # FIXME: Actually provide the correct error codes here try : self . graph . add_node ( node_string ) except NodeConnectionError : return _pack_sgerror ( SensorGraphError . STREAM_NOT_IN_USE ) except ProcessingFunctionError : return _pack_sgerror ( SensorGraphError . INVALID_PROCESSING_FUNCTION ) except ResourceUsageError : return _pack_sgerror ( SensorGraphError . NO_NODE_SPACE_AVAILABLE ) return Error . NO_ERROR
Add a node to the sensor_graph using a binary node descriptor .
200
14
21,364
def add_streamer ( self , binary_descriptor ) : streamer = streamer_descriptor . parse_binary_descriptor ( binary_descriptor ) try : self . graph . add_streamer ( streamer ) self . streamer_status [ len ( self . graph . streamers ) - 1 ] = StreamerStatus ( ) return Error . NO_ERROR except ResourceUsageError : return _pack_sgerror ( SensorGraphError . NO_MORE_STREAMER_RESOURCES )
Add a streamer to the sensor_graph using a binary streamer descriptor .
114
16
21,365
def inspect_streamer ( self , index ) : if index >= len ( self . graph . streamers ) : return [ _pack_sgerror ( SensorGraphError . STREAMER_NOT_ALLOCATED ) , b'\0' * 14 ] return [ Error . NO_ERROR , streamer_descriptor . create_binary_descriptor ( self . graph . streamers [ index ] ) ]
Inspect the streamer at the given index .
90
10
21,366
def inspect_node ( self , index ) : if index >= len ( self . graph . nodes ) : raise RPCErrorCode ( 6 ) #FIXME: use actual error code here for UNKNOWN_ERROR status return create_binary_descriptor ( str ( self . graph . nodes [ index ] ) )
Inspect the graph node at the given index .
67
10
21,367
def query_streamer ( self , index ) : if index >= len ( self . graph . streamers ) : return None info = self . streamer_status [ index ] highest_ack = self . streamer_acks . get ( index , 0 ) return [ info . last_attempt_time , info . last_success_time , info . last_error , highest_ack , info . last_status , info . attempt_number , info . comm_status ]
Query the status of the streamer at the given index .
101
12
21,368
def sg_graph_input ( self , value , stream_id ) : self . sensor_graph . process_input ( stream_id , value ) return [ Error . NO_ERROR ]
Present a graph input to the sensor_graph subsystem .
41
11
21,369
def sg_add_streamer ( self , desc ) : if len ( desc ) == 13 : desc += b'\0' err = self . sensor_graph . add_streamer ( desc ) return [ err ]
Add a graph streamer using a binary descriptor .
48
10
21,370
def sg_seek_streamer ( self , index , force , value ) : force = bool ( force ) err = self . sensor_graph . acknowledge_streamer ( index , value , force ) return [ err ]
Ackowledge a streamer .
47
8
21,371
def sg_query_streamer ( self , index ) : resp = self . sensor_graph . query_streamer ( index ) if resp is None : return [ struct . pack ( "<L" , _pack_sgerror ( SensorGraphError . STREAMER_NOT_ALLOCATED ) ) ] return [ struct . pack ( "<LLLLBBBx" , * resp ) ]
Query the current status of a streamer .
85
9
21,372
def dispatch ( self , value , callback = None ) : done = None if callback is None : done = threading . Event ( ) shared_data = [ None , None ] def _callback ( exc_info , return_value ) : shared_data [ 0 ] = exc_info shared_data [ 1 ] = return_value done . set ( ) callback = _callback workitem = WorkItem ( value , callback ) self . _work_queue . put ( workitem ) if done is None : return None done . wait ( ) exc_info , return_value = shared_data if exc_info is not None : self . future_raise ( * exc_info ) return return_value
Dispatch an item to the workqueue and optionally wait .
147
11
21,373
def future_raise ( self , tp , value = None , tb = None ) : if value is not None and isinstance ( tp , Exception ) : raise TypeError ( "instance exception may not have a separate value" ) if value is not None : exc = tp ( value ) else : exc = tp if exc . __traceback__ is not tb : raise exc . with_traceback ( tb ) raise exc
raise_ implementation from future . utils
94
8
21,374
def flush ( self ) : done = threading . Event ( ) def _callback ( ) : done . set ( ) self . defer ( _callback ) done . wait ( )
Synchronously wait until this work item is processed .
37
11
21,375
def direct_dispatch ( self , arg , callback ) : try : self . _current_callbacks . appendleft ( callback ) exc_info = None retval = None retval = self . _routine ( arg ) except : # pylint:disable=bare-except;We need to capture the exception and feed it back to the caller exc_info = sys . exc_info ( ) finally : self . _current_callbacks . popleft ( ) if callback is not None and retval is not self . STILL_PENDING : callback ( exc_info , retval ) return retval , exc_info
Directly dispatch a work item .
135
7
21,376
def stop ( self , timeout = None , force = False ) : self . signal_stop ( ) self . wait_stopped ( timeout , force )
Stop the worker thread and synchronously wait for it to finish .
32
13
21,377
def wait_stopped ( self , timeout = None , force = False ) : self . join ( timeout ) if self . is_alive ( ) and force is False : raise TimeoutExpiredError ( "Error waiting for background thread to exit" , timeout = timeout )
Wait for the thread to stop .
58
7
21,378
def generate ( env ) : if not exists ( env ) : return env [ 'WIXCANDLEFLAGS' ] = [ '-nologo' ] env [ 'WIXCANDLEINCLUDE' ] = [ ] env [ 'WIXCANDLECOM' ] = '$WIXCANDLE $WIXCANDLEFLAGS -I $WIXCANDLEINCLUDE -o ${TARGET} ${SOURCE}' env [ 'WIXLIGHTFLAGS' ] . append ( '-nologo' ) env [ 'WIXLIGHTCOM' ] = "$WIXLIGHT $WIXLIGHTFLAGS -out ${TARGET} ${SOURCES}" env [ 'WIXSRCSUF' ] = '.wxs' env [ 'WIXOBJSUF' ] = '.wixobj' object_builder = SCons . Builder . Builder ( action = '$WIXCANDLECOM' , suffix = '$WIXOBJSUF' , src_suffix = '$WIXSRCSUF' ) linker_builder = SCons . Builder . Builder ( action = '$WIXLIGHTCOM' , src_suffix = '$WIXOBJSUF' , src_builder = object_builder ) env [ 'BUILDERS' ] [ 'WiX' ] = linker_builder
Add Builders and construction variables for WiX to an Environment .
307
13
21,379
def FromString ( cls , desc ) : if language . stream is None : language . get_language ( ) parse_exp = Optional ( time_interval ( 'time' ) - Literal ( ':' ) . suppress ( ) ) - language . stream ( 'stream' ) - Literal ( '=' ) . suppress ( ) - number ( 'value' ) try : data = parse_exp . parseString ( desc ) time = 0 if 'time' in data : time = data [ 'time' ] [ 0 ] return SimulationStimulus ( time , data [ 'stream' ] [ 0 ] , data [ 'value' ] ) except ( ParseException , ParseSyntaxException ) : raise ArgumentError ( "Could not parse stimulus descriptor" , descriptor = desc )
Create a new stimulus from a description string .
166
9
21,380
def get_connection_id ( self , conn_or_int_id ) : key = conn_or_int_id if isinstance ( key , str ) : table = self . _int_connections elif isinstance ( key , int ) : table = self . _connections else : raise ArgumentError ( "You must supply either an int connection id or a string internal id to _get_connection_state" , id = key ) try : data = table [ key ] except KeyError : raise ArgumentError ( "Could not find connection by id" , id = key ) return data [ 'conn_id' ]
Get the connection id .
133
5
21,381
def _get_connection ( self , conn_or_int_id ) : key = conn_or_int_id if isinstance ( key , str ) : table = self . _int_connections elif isinstance ( key , int ) : table = self . _connections else : return None try : data = table [ key ] except KeyError : return None return data
Get the data for a connection by either conn_id or internal_id
81
15
21,382
def _get_connection_state ( self , conn_or_int_id ) : key = conn_or_int_id if isinstance ( key , str ) : table = self . _int_connections elif isinstance ( key , int ) : table = self . _connections else : raise ArgumentError ( "You must supply either an int connection id or a string internal id to _get_connection_state" , id = key ) if key not in table : return self . Disconnected data = table [ key ] return data [ 'state' ]
Get a connection s state by either conn_id or internal_id
120
14
21,383
def _check_timeouts ( self ) : for conn_id , data in self . _connections . items ( ) : if 'timeout' in data and data [ 'timeout' ] . expired : if data [ 'state' ] == self . Connecting : self . finish_connection ( conn_id , False , 'Connection attempt timed out' ) elif data [ 'state' ] == self . Disconnecting : self . finish_disconnection ( conn_id , False , 'Disconnection attempt timed out' ) elif data [ 'state' ] == self . InProgress : if data [ 'microstate' ] == 'rpc' : self . finish_operation ( conn_id , False , 'RPC timed out without response' , None , None ) elif data [ 'microstate' ] == 'open_interface' : self . finish_operation ( conn_id , False , 'Open interface request timed out' )
Check if any operations in progress need to be timed out
200
11
21,384
def unexpected_disconnect ( self , conn_or_internal_id ) : data = { 'id' : conn_or_internal_id } action = ConnectionAction ( 'force_disconnect' , data , sync = False ) self . _actions . put ( action )
Notify that there was an unexpected disconnection of the device .
59
13
21,385
def finish_operation ( self , conn_or_internal_id , success , * args ) : data = { 'id' : conn_or_internal_id , 'success' : success , 'callback_args' : args } action = ConnectionAction ( 'finish_operation' , data , sync = False ) self . _actions . put ( action )
Finish an operation on a connection .
77
7
21,386
def _finish_operation_action ( self , action ) : success = action . data [ 'success' ] conn_key = action . data [ 'id' ] if self . _get_connection_state ( conn_key ) != self . InProgress : self . _logger . error ( "Invalid finish_operation action on a connection whose state is not InProgress, conn_key=%s" , str ( conn_key ) ) return # Cannot be None since we checked above to make sure it exists data = self . _get_connection ( conn_key ) callback = data [ 'callback' ] conn_id = data [ 'conn_id' ] args = action . data [ 'callback_args' ] data [ 'state' ] = self . Idle data [ 'microstate' ] = None callback ( conn_id , self . id , success , * args )
Finish an attempted operation .
188
5
21,387
def canonical_text ( self , text ) : out = [ ] line_continues_a_comment = False for line in text . splitlines ( ) : line , comment = self . comment_re . findall ( line ) [ 0 ] if line_continues_a_comment == True : out [ - 1 ] = out [ - 1 ] + line . lstrip ( ) else : out . append ( line ) line_continues_a_comment = len ( comment ) > 0 return '\n' . join ( out ) . rstrip ( ) + '\n'
Standardize an input TeX - file contents .
125
10
21,388
def scan_recurse ( self , node , path = ( ) ) : path_dict = dict ( list ( path ) ) queue = [ ] queue . extend ( self . scan ( node ) ) seen = { } # This is a hand-coded DSU (decorate-sort-undecorate, or # Schwartzian transform) pattern. The sort key is the raw name # of the file as specifed on the \include, \input, etc. line. # TODO: what about the comment in the original Classic scanner: # """which lets # us keep the sort order constant regardless of whether the file # is actually found in a Repository or locally.""" nodes = [ ] source_dir = node . get_dir ( ) #for include in includes: while queue : include = queue . pop ( ) inc_type , inc_subdir , inc_filename = include try : if seen [ inc_filename ] == 1 : continue except KeyError : seen [ inc_filename ] = 1 # # Handle multiple filenames in include[1] # n , i = self . find_include ( include , source_dir , path_dict ) if n is None : # Do not bother with 'usepackage' warnings, as they most # likely refer to system-level files if inc_type != 'usepackage' : SCons . Warnings . warn ( SCons . Warnings . DependencyWarning , "No dependency generated for file: %s (included from: %s) -- file not found" % ( i , node ) ) else : sortkey = self . sort_key ( n ) nodes . append ( ( sortkey , n ) ) # recurse down queue . extend ( self . scan ( n , inc_subdir ) ) return [ pair [ 1 ] for pair in sorted ( nodes ) ]
do a recursive scan of the top level target file This lets us search for included files based on the directory of the main file just as latex does
387
29
21,389
def caller_trace ( back = 0 ) : global caller_bases , caller_dicts import traceback tb = traceback . extract_stack ( limit = 3 + back ) tb . reverse ( ) callee = tb [ 1 ] [ : 3 ] caller_bases [ callee ] = caller_bases . get ( callee , 0 ) + 1 for caller in tb [ 2 : ] : caller = callee + caller [ : 3 ] try : entry = caller_dicts [ callee ] except KeyError : caller_dicts [ callee ] = entry = { } entry [ caller ] = entry . get ( caller , 0 ) + 1 callee = caller
Trace caller stack and save info into global dicts which are printed automatically at the end of SCons execution .
148
23
21,390
def diff_dumps ( ih1 , ih2 , tofile = None , name1 = "a" , name2 = "b" , n_context = 3 ) : def prepare_lines ( ih ) : sio = StringIO ( ) ih . dump ( sio ) dump = sio . getvalue ( ) lines = dump . splitlines ( ) return lines a = prepare_lines ( ih1 ) b = prepare_lines ( ih2 ) import difflib result = list ( difflib . unified_diff ( a , b , fromfile = name1 , tofile = name2 , n = n_context , lineterm = '' ) ) if tofile is None : tofile = sys . stdout output = '\n' . join ( result ) + '\n' tofile . write ( output )
Diff 2 IntelHex objects and produce unified diff output for their hex dumps .
182
16
21,391
def _decode_record ( self , s , line = 0 ) : s = s . rstrip ( '\r\n' ) if not s : return # empty line if s [ 0 ] == ':' : try : bin = array ( 'B' , unhexlify ( asbytes ( s [ 1 : ] ) ) ) except ( TypeError , ValueError ) : # this might be raised by unhexlify when odd hexascii digits raise HexRecordError ( line = line ) length = len ( bin ) if length < 5 : raise HexRecordError ( line = line ) else : raise HexRecordError ( line = line ) record_length = bin [ 0 ] if length != ( 5 + record_length ) : raise RecordLengthError ( line = line ) addr = bin [ 1 ] * 256 + bin [ 2 ] record_type = bin [ 3 ] if not ( 0 <= record_type <= 5 ) : raise RecordTypeError ( line = line ) crc = sum ( bin ) crc &= 0x0FF if crc != 0 : raise RecordChecksumError ( line = line ) if record_type == 0 : # data record addr += self . _offset for i in range_g ( 4 , 4 + record_length ) : if not self . _buf . get ( addr , None ) is None : raise AddressOverlapError ( address = addr , line = line ) self . _buf [ addr ] = bin [ i ] addr += 1 # FIXME: addr should be wrapped # BUT after 02 record (at 64K boundary) # and after 04 record (at 4G boundary) elif record_type == 1 : # end of file record if record_length != 0 : raise EOFRecordError ( line = line ) raise _EndOfFile elif record_type == 2 : # Extended 8086 Segment Record if record_length != 2 or addr != 0 : raise ExtendedSegmentAddressRecordError ( line = line ) self . _offset = ( bin [ 4 ] * 256 + bin [ 5 ] ) * 16 elif record_type == 4 : # Extended Linear Address Record if record_length != 2 or addr != 0 : raise ExtendedLinearAddressRecordError ( line = line ) self . _offset = ( bin [ 4 ] * 256 + bin [ 5 ] ) * 65536 elif record_type == 3 : # Start Segment Address Record if record_length != 4 or addr != 0 : raise StartSegmentAddressRecordError ( line = line ) if self . start_addr : raise DuplicateStartAddressRecordError ( line = line ) self . start_addr = { 'CS' : bin [ 4 ] * 256 + bin [ 5 ] , 'IP' : bin [ 6 ] * 256 + bin [ 7 ] , } elif record_type == 5 : # Start Linear Address Record if record_length != 4 or addr != 0 : raise StartLinearAddressRecordError ( line = line ) if self . start_addr : raise DuplicateStartAddressRecordError ( line = line ) self . start_addr = { 'EIP' : ( bin [ 4 ] * 16777216 + bin [ 5 ] * 65536 + bin [ 6 ] * 256 + bin [ 7 ] ) , }
Decode one record of HEX file .
694
9
21,392
def loadhex ( self , fobj ) : if getattr ( fobj , "read" , None ) is None : fobj = open ( fobj , "r" ) fclose = fobj . close else : fclose = None self . _offset = 0 line = 0 try : decode = self . _decode_record try : for s in fobj : line += 1 decode ( s , line ) except _EndOfFile : pass finally : if fclose : fclose ( )
Load hex file into internal buffer . This is not necessary if object was initialized with source set . This will overwrite addresses if object was already initialized .
105
29
21,393
def loadbin ( self , fobj , offset = 0 ) : fread = getattr ( fobj , "read" , None ) if fread is None : f = open ( fobj , "rb" ) fread = f . read fclose = f . close else : fclose = None try : self . frombytes ( array ( 'B' , asbytes ( fread ( ) ) ) , offset = offset ) finally : if fclose : fclose ( )
Load bin file into internal buffer . Not needed if source set in constructor . This will overwrite addresses without warning if object was already initialized .
101
27
21,394
def loadfile ( self , fobj , format ) : if format == "hex" : self . loadhex ( fobj ) elif format == "bin" : self . loadbin ( fobj ) else : raise ValueError ( 'format should be either "hex" or "bin";' ' got %r instead' % format )
Load data file into internal buffer . Preferred wrapper over loadbin or loadhex .
71
16
21,395
def _get_start_end ( self , start = None , end = None , size = None ) : if ( start , end ) == ( None , None ) and self . _buf == { } : raise EmptyIntelHexError if size is not None : if None not in ( start , end ) : raise ValueError ( "tobinarray: you can't use start,end and size" " arguments in the same time" ) if ( start , end ) == ( None , None ) : start = self . minaddr ( ) if start is not None : end = start + size - 1 else : start = end - size + 1 if start < 0 : raise ValueError ( "tobinarray: invalid size (%d) " "for given end address (%d)" % ( size , end ) ) else : if start is None : start = self . minaddr ( ) if end is None : end = self . maxaddr ( ) if start > end : start , end = end , start return start , end
Return default values for start and end if they are None . If this IntelHex object is empty then it s error to invoke this method with both start and end as None .
215
36
21,396
def tobinarray ( self , start = None , end = None , pad = _DEPRECATED , size = None ) : if not isinstance ( pad , _DeprecatedParam ) : print ( "IntelHex.tobinarray: 'pad' parameter is deprecated." ) if pad is not None : print ( "Please, use IntelHex.padding attribute instead." ) else : print ( "Please, don't pass it explicitly." ) print ( "Use syntax like this: ih.tobinarray(start=xxx, end=yyy, size=zzz)" ) else : pad = None return self . _tobinarray_really ( start , end , pad , size )
Convert this object to binary form as array . If start and end unspecified they will be inferred from the data .
147
23
21,397
def _tobinarray_really ( self , start , end , pad , size ) : if pad is None : pad = self . padding bin = array ( 'B' ) if self . _buf == { } and None in ( start , end ) : return bin if size is not None and size <= 0 : raise ValueError ( "tobinarray: wrong value for size" ) start , end = self . _get_start_end ( start , end , size ) for i in range_g ( start , end + 1 ) : bin . append ( self . _buf . get ( i , pad ) ) return bin
Return binary array .
132
4
21,398
def tobinstr ( self , start = None , end = None , pad = _DEPRECATED , size = None ) : if not isinstance ( pad , _DeprecatedParam ) : print ( "IntelHex.tobinstr: 'pad' parameter is deprecated." ) if pad is not None : print ( "Please, use IntelHex.padding attribute instead." ) else : print ( "Please, don't pass it explicitly." ) print ( "Use syntax like this: ih.tobinstr(start=xxx, end=yyy, size=zzz)" ) else : pad = None return self . _tobinstr_really ( start , end , pad , size )
Convert to binary form and return as binary string .
147
11
21,399
def tobinfile ( self , fobj , start = None , end = None , pad = _DEPRECATED , size = None ) : if not isinstance ( pad , _DeprecatedParam ) : print ( "IntelHex.tobinfile: 'pad' parameter is deprecated." ) if pad is not None : print ( "Please, use IntelHex.padding attribute instead." ) else : print ( "Please, don't pass it explicitly." ) print ( "Use syntax like this: ih.tobinfile(start=xxx, end=yyy, size=zzz)" ) else : pad = None if getattr ( fobj , "write" , None ) is None : fobj = open ( fobj , "wb" ) close_fd = True else : close_fd = False fobj . write ( self . _tobinstr_really ( start , end , pad , size ) ) if close_fd : fobj . close ( )
Convert to binary and write to file .
206
9