idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
46,200
def gen ( skipdirhtml = False ) : docs_changelog = 'docs/changelog.rst' check_git_unchanged ( docs_changelog ) pandoc ( '--from=markdown' , '--to=rst' , '--output=' + docs_changelog , 'CHANGELOG.md' ) if not skipdirhtml : sphinx_build [ '-b' , 'dirhtml' , '-W' , '-E' , 'docs' , 'docs/_build/dirhtml' ] & FG sphinx_build [ '-b' , 'html' , '-W' , '-E' , 'docs' , 'docs/_build/html' ] & FG
Generate html and dirhtml output .
46,201
def __reset_crosshair ( self ) : self . lhor . set_ydata ( self . y_coord ) self . lver . set_xdata ( self . x_coord )
redraw the cross - hair on the horizontal slice plot
46,202
def __init_vertical_plot ( self ) : if len ( self . ax2 . lines ) > 0 : self . ax2 . cla ( ) self . ax2 . set_ylabel ( self . datalabel , fontsize = self . fontsize ) self . ax2 . set_xlabel ( self . spectrumlabel , fontsize = self . fontsize ) self . ax2 . set_title ( 'vertical point profiles' , fontsize = self . fontsize ) self . ax2 . set_xlim ( [ 1 , self . bands ] ) self . vline = self . ax2 . axvline ( self . slider . value , color = 'black' )
set up the vertical profile plot
46,203
def PythonPercentFormat ( format_str ) : if format_str . startswith ( 'printf ' ) : fmt = format_str [ len ( 'printf ' ) : ] return lambda value : fmt % value else : return None
Use Python % format strings as template format specifiers .
46,204
def Plural ( format_str ) : if format_str . startswith ( 'plural?' ) : i = len ( 'plural?' ) try : splitchar = format_str [ i ] _ , plural_val , singular_val = format_str . split ( splitchar ) except IndexError : raise Error ( 'plural? must have exactly 2 arguments' ) def Formatter ( value ) : plural = False if isinstance ( value , int ) and value > 1 : plural = True if isinstance ( value , list ) and len ( value ) > 1 : plural = True if plural : return plural_val else : return singular_val return Formatter else : return None
Returns whether the value should be considered a plural value .
46,205
def _correct_qualimap_genome_results ( samples ) : for s in samples : if verify_file ( s . qualimap_genome_results_fpath ) : correction_is_needed = False with open ( s . qualimap_genome_results_fpath , 'r' ) as f : content = f . readlines ( ) metrics_started = False for line in content : if ">> Reference" in line : metrics_started = True if metrics_started : if line . find ( ',' ) != - 1 : correction_is_needed = True break if correction_is_needed : with open ( s . qualimap_genome_results_fpath , 'w' ) as f : metrics_started = False for line in content : if ">> Reference" in line : metrics_started = True if metrics_started : if line . find ( ',' ) != - 1 : line = line . replace ( ',' , '' ) f . write ( line )
fixing java . lang . Double . parseDouble error on entries like 6 082 . 49
46,206
def _correct_qualimap_insert_size_histogram ( work_dir , samples ) : for s in samples : qualimap1_dirname = dirname ( s . qualimap_ins_size_hist_fpath ) . replace ( 'raw_data_qualimapReport' , 'raw_data' ) qualimap2_dirname = dirname ( s . qualimap_ins_size_hist_fpath ) if exists ( qualimap1_dirname ) : if not exists ( qualimap2_dirname ) : shutil . move ( qualimap1_dirname , qualimap2_dirname ) else : shutil . rmtree ( qualimap1_dirname ) elif not exists ( qualimap2_dirname ) : continue if verify_file ( s . qualimap_ins_size_hist_fpath , silent = True ) and tc . reuse_intermediate : pass else : if verify_file ( s . picard_ins_size_hist_txt_fpath ) : with open ( s . picard_ins_size_hist_txt_fpath , 'r' ) as picard_f : one_line_to_stop = False for line in picard_f : if one_line_to_stop : break if line . startswith ( '## HISTOGRAM' ) : one_line_to_stop = True with file_transaction ( work_dir , s . qualimap_ins_size_hist_fpath ) as tx : with open ( tx , 'w' ) as qualimap_f : for line in picard_f : qualimap_f . write ( line )
replacing Qualimap insert size histogram with Picard one .
46,207
def norm_vec ( vector ) : assert len ( vector ) == 3 v = np . array ( vector ) return v / np . sqrt ( np . sum ( v ** 2 ) )
Normalize the length of a vector to one
46,208
def sphere_points_from_angles_and_tilt ( angles , tilted_axis ) : assert len ( angles . shape ) == 1 tilted_axis = norm_vec ( tilted_axis ) [ u , v , w ] = tilted_axis newang = np . zeros ( ( angles . shape [ 0 ] , 3 ) , dtype = float ) newang [ : , 0 ] = np . sin ( angles - angles [ 0 ] ) newang [ : , 2 ] = np . cos ( angles - angles [ 0 ] ) theta = np . arccos ( v ) if np . allclose ( u , 0 ) and np . allclose ( w , 0 ) : phi = 0 else : phi = np . arctan2 ( u , w ) rtilt = np . cos ( theta ) newang *= rtilt Rx = np . array ( [ [ 1 , 0 , 0 ] , [ 0 , np . cos ( theta ) , - np . sin ( theta ) ] , [ 0 , np . sin ( theta ) , np . cos ( theta ) ] ] ) for ii in range ( newang . shape [ 0 ] ) : newang [ ii ] = np . dot ( Rx , newang [ ii ] ) newang = newang - ( newang [ 0 ] - np . array ( [ 0 , 0 , 1 ] ) ) . reshape ( 1 , 3 ) Ry = np . array ( [ [ + np . cos ( phi ) , 0 , np . sin ( phi ) ] , [ 0 , 1 , 0 ] , [ - np . sin ( phi ) , 0 , np . cos ( phi ) ] ] ) for jj in range ( newang . shape [ 0 ] ) : newang [ jj ] = np . dot ( Ry , newang [ jj ] ) return newang
For a given tilt of the rotational axis tilted_axis compute the points on a unit sphere that correspond to the distribution angles along the great circle about this axis .
46,209
def check_git_unchanged ( filename , yes = False ) : if check_staged ( filename ) : s = 'There are staged changes in {}, overwrite? [y/n] ' . format ( filename ) if yes or input ( s ) in ( 'y' , 'yes' ) : return else : raise RuntimeError ( 'There are staged changes in ' '{}, aborting.' . format ( filename ) ) if check_unstaged ( filename ) : s = 'There are unstaged changes in {}, overwrite? [y/n] ' . format ( filename ) if yes or input ( s ) in ( 'y' , 'yes' ) : return else : raise RuntimeError ( 'There are unstaged changes in ' '{}, aborting.' . format ( filename ) )
Check git to avoid overwriting user changes .
46,210
def check_staged ( filename = None ) : retcode , _ , stdout = git [ 'diff-index' , '--quiet' , '--cached' , 'HEAD' , filename ] . run ( retcode = None ) if retcode == 1 : return True elif retcode == 0 : return False else : raise RuntimeError ( stdout )
Check if there are changes to be committed in the index .
46,211
def run_process ( analysis , action_name , message = '__nomessagetoken__' ) : if analysis is None : return process_id = None if isinstance ( message , dict ) and '__process_id' in message : process_id = message [ '__process_id' ] del message [ '__process_id' ] if process_id : yield analysis . emit ( '__process' , { 'id' : process_id , 'status' : 'start' } ) fns = [ functools . partial ( handler , analysis ) for handler in ( analysis . _action_handlers . get ( action_name , [ ] ) + analysis . _action_handlers . get ( '*' , [ ] ) ) ] if fns : args , kwargs = [ ] , { } if isinstance ( message , list ) : args = message elif isinstance ( message , dict ) : kwargs = message elif message == '__nomessagetoken__' : pass else : args = [ message ] for fn in fns : log . debug ( 'calling {}' . format ( fn ) ) try : yield tornado . gen . maybe_future ( fn ( * args , ** kwargs ) ) except Exception as e : yield analysis . emit ( 'error' , 'an Exception occured' ) raise e else : yield analysis . emit ( 'warn' , 'no handler for {}' . format ( action_name ) ) if process_id : yield analysis . emit ( '__process' , { 'id' : process_id , 'status' : 'end' } )
Executes an action in the analysis with the given message .
46,212
def dl_file ( url , dest , chunk_size = 6553 ) : import urllib3 http = urllib3 . PoolManager ( ) r = http . request ( 'GET' , url , preload_content = False ) with dest . open ( 'wb' ) as out : while True : data = r . read ( chunk_size ) if data is None or len ( data ) == 0 : break out . write ( data ) r . release_conn ( )
Download url to dest
46,213
def extract_lzma ( path ) : tlfile = pathlib . Path ( path ) with tlfile . open ( "rb" ) as td : data = lzma . decompress ( td . read ( ) ) fd , tmpname = tempfile . mkstemp ( prefix = "odt_ex_" , suffix = ".tar" ) with open ( fd , "wb" ) as fo : fo . write ( data ) return tmpname
Extract an lzma file and return the temporary file name
46,214
def get_file ( fname , datapath = datapath ) : datapath = pathlib . Path ( datapath ) datapath . mkdir ( parents = True , exist_ok = True ) dlfile = datapath / fname if not dlfile . exists ( ) : print ( "Attempting to download file {} from {} to {}." . format ( fname , webloc , datapath ) ) try : dl_file ( url = webloc + fname , dest = dlfile ) except BaseException : warnings . warn ( "Download failed: {}" . format ( fname ) ) raise return dlfile
Return path of an example data file
46,215
def load_data ( fname , ** kwargs ) : fname = get_file ( fname ) if fname . suffix == ".lzma" : return load_tar_lzma_data ( fname ) elif fname . suffix == ".zip" : return load_zip_data ( fname , ** kwargs )
Load example data
46,216
def load_tar_lzma_data ( tlfile ) : tmpname = extract_lzma ( tlfile ) fields_real = [ ] fields_imag = [ ] phantom = [ ] parms = { } with tarfile . open ( tmpname , "r" ) as t : members = t . getmembers ( ) members . sort ( key = lambda x : x . name ) for m in members : n = m . name f = t . extractfile ( m ) if n . startswith ( "fdtd_info" ) : for ln in f . readlines ( ) : ln = ln . decode ( ) if ln . count ( "=" ) == 1 : key , val = ln . split ( "=" ) parms [ key . strip ( ) ] = float ( val . strip ( ) ) elif n . startswith ( "phantom" ) : phantom . append ( np . loadtxt ( f ) ) elif n . startswith ( "field" ) : if n . endswith ( "imag.txt" ) : fields_imag . append ( np . loadtxt ( f ) ) elif n . endswith ( "real.txt" ) : fields_real . append ( np . loadtxt ( f ) ) try : os . remove ( tmpname ) except OSError : pass phantom = np . array ( phantom ) sino = np . array ( fields_real ) + 1j * np . array ( fields_imag ) angles = np . linspace ( 0 , 2 * np . pi , sino . shape [ 0 ] , endpoint = False ) return sino , angles , phantom , parms
Load example sinogram data from a . tar . lzma file
46,217
def load_zip_data ( zipname , f_sino_real , f_sino_imag , f_angles = None , f_phantom = None , f_info = None ) : ret = [ ] with zipfile . ZipFile ( str ( zipname ) ) as arc : sino_real = np . loadtxt ( arc . open ( f_sino_real ) ) sino_imag = np . loadtxt ( arc . open ( f_sino_imag ) ) sino = sino_real + 1j * sino_imag ret . append ( sino ) if f_angles : angles = np . loadtxt ( arc . open ( f_angles ) ) ret . append ( angles ) if f_phantom : phantom = np . loadtxt ( arc . open ( f_phantom ) ) ret . append ( phantom ) if f_info : with arc . open ( f_info ) as info : cfg = { } for li in info . readlines ( ) : li = li . decode ( ) if li . count ( "=" ) == 1 : key , val = li . split ( "=" ) cfg [ key . strip ( ) ] = float ( val . strip ( ) ) ret . append ( cfg ) return ret
Load example sinogram data from a . zip file
46,218
def transform_to ( ext ) : def decor ( f ) : @ functools . wraps ( f ) def wrapper ( * args , ** kwargs ) : out_file = kwargs . get ( "out_file" , None ) if not out_file : in_path = kwargs . get ( "in_file" , args [ 0 ] ) out_dir = kwargs . get ( "out_dir" , os . path . dirname ( in_path ) ) safe_mkdir ( out_dir ) out_name = replace_suffix ( os . path . basename ( in_path ) , ext ) out_file = os . path . join ( out_dir , out_name ) kwargs [ "out_file" ] = out_file if not file_exists ( out_file ) : out_file = f ( * args , ** kwargs ) return out_file return wrapper return decor
Decorator to create an output filename from an output filename with the specified extension . Changes the extension in_file is transformed to a new type .
46,219
def filter_to ( word ) : def decor ( f ) : @ functools . wraps ( f ) def wrapper ( * args , ** kwargs ) : out_file = kwargs . get ( "out_file" , None ) if not out_file : in_path = kwargs . get ( "in_file" , args [ 0 ] ) out_dir = kwargs . get ( "out_dir" , os . path . dirname ( in_path ) ) safe_mkdir ( out_dir ) out_name = append_stem ( os . path . basename ( in_path ) , word ) out_file = os . path . join ( out_dir , out_name ) kwargs [ "out_file" ] = out_file if not file_exists ( out_file ) : out_file = f ( * args , ** kwargs ) return out_file return wrapper return decor
Decorator to create an output filename from an input filename by adding a word onto the stem . in_file is filtered by the function and the results are written to out_file . You would want to use this over transform_to if you don t know the extension of the file going in . This also memoizes the output file .
46,220
def get_in ( d , t , default = None ) : result = reduce ( lambda d , t : d . get ( t , { } ) , t , d ) if not result : return default else : return result
look up if you can get a tuple of values from a nested dictionary each item in the tuple a deeper layer
46,221
def which ( program ) : def is_exe ( _fpath ) : return os . path . isfile ( _fpath ) and os . access ( _fpath , os . X_OK ) fpath , fname = os . path . split ( program ) if fpath : if is_exe ( program ) : return program else : for path in os . environ [ "PATH" ] . split ( os . pathsep ) : exe_file = os . path . join ( path , program ) if is_exe ( exe_file ) : return exe_file return None
returns the path to an executable or None if it can t be found
46,222
def expanduser ( path ) : if path [ : 1 ] != '~' : return path i , n = 1 , len ( path ) while i < n and path [ i ] not in '/\\' : i = i + 1 if 'HOME' in os . environ : userhome = os . environ [ 'HOME' ] elif 'USERPROFILE' in os . environ : userhome = os . environ [ 'USERPROFILE' ] elif not 'HOMEPATH' in os . environ : return path else : try : drive = os . environ [ 'HOMEDRIVE' ] except KeyError : drive = '' userhome = join ( drive , os . environ [ 'HOMEPATH' ] ) if i != 1 : userhome = join ( dirname ( userhome ) , path [ 1 : i ] ) return userhome + path [ i : ]
Expand ~ and ~user constructs .
46,223
def dots_to_empty_cells ( config , tsv_fpath ) : def proc_line ( l , i ) : while '\t\t' in l : l = l . replace ( '\t\t' , '\t.\t' ) return l return iterate_file ( config , tsv_fpath , proc_line , suffix = 'dots' )
Put dots instead of empty cells in order to view TSV with column - t
46,224
def trigger_all_callbacks ( self , callbacks = None ) : return [ ret for key in self for ret in self . trigger_callbacks ( key , callbacks = None ) ]
Trigger callbacks for all keys on all or a subset of subscribers .
46,225
def set_state ( self , updater = None , ** kwargs ) : if callable ( updater ) : state_change = updater ( self ) elif updater is not None : state_change = updater else : state_change = kwargs return [ callback_result for k , v in state_change . items ( ) for callback_result in self . set ( k , v ) ]
Update the datastore .
46,226
def run_multisample_qualimap ( output_dir , work_dir , samples , targqc_full_report ) : plots_dirpath = join ( output_dir , 'plots' ) individual_report_fpaths = [ s . qualimap_html_fpath for s in samples ] if isdir ( plots_dirpath ) and not any ( not can_reuse ( join ( plots_dirpath , f ) , individual_report_fpaths ) for f in listdir ( plots_dirpath ) if not f . startswith ( '.' ) ) : debug ( 'Qualimap miltisample plots exist - ' + plots_dirpath + ', reusing...' ) else : if len ( [ s . qualimap_html_fpath for s in samples if s . qualimap_html_fpath ] ) > 0 : if find_executable ( ) is not None : qualimap_output_dir = join ( work_dir , 'qualimap_multi_bamqc' ) _correct_qualimap_genome_results ( samples ) _correct_qualimap_insert_size_histogram ( samples ) safe_mkdir ( qualimap_output_dir ) rows = [ ] for sample in samples : if sample . qualimap_html_fpath : rows += [ [ sample . name , sample . qualimap_html_fpath ] ] data_fpath = write_tsv_rows ( ( [ ] , rows ) , join ( qualimap_output_dir , 'qualimap_results_by_sample.tsv' ) ) qualimap_plots_dirpath = join ( qualimap_output_dir , 'images_multisampleBamQcReport' ) cmdline = find_executable ( ) + ' multi-bamqc --data {data_fpath} -outdir {qualimap_output_dir}' . format ( ** locals ( ) ) run ( cmdline , env_vars = dict ( DISPLAY = None ) , checks = [ lambda _1 , _2 : verify_dir ( qualimap_output_dir ) ] , reuse = cfg . reuse_intermediate ) if not verify_dir ( qualimap_plots_dirpath ) : warn ( 'Warning: Qualimap for multi-sample analysis failed to finish. TargQC will not contain plots.' ) return None else : if exists ( plots_dirpath ) : shutil . rmtree ( plots_dirpath ) shutil . move ( qualimap_plots_dirpath , plots_dirpath ) else : warn ( 'Warning: Qualimap for multi-sample analysis was not found. TargQC will not contain plots.' ) return None targqc_full_report . plots = [ ] for plot_fpath in listdir ( plots_dirpath ) : plot_fpath = join ( plots_dirpath , plot_fpath ) if verify_file ( plot_fpath ) and plot_fpath . endswith ( '.png' ) : targqc_full_report . plots . append ( relpath ( plot_fpath , output_dir ) )
1 . Generates Qualimap2 plots and put into plots_dirpath 2 . Adds records to targqc_full_report . plots
46,227
def odt_to_ri ( f , res , nm ) : r km = ( 2 * np . pi * nm ) / res ri = nm * np . sqrt ( f / km ** 2 + 1 ) negrootcoord = np . where ( ri . real < 0 ) ri [ negrootcoord ] *= - 1 return ri
r Convert the ODT object function to refractive index
46,228
def opt_to_ri ( f , res , nm ) : r ri = nm + f / ( 2 * np . pi ) * res return ri
r Convert the OPT object function to refractive index
46,229
def rasterize ( vectorobject , reference , outname = None , burn_values = 1 , expressions = None , nodata = 0 , append = False ) : if expressions is None : expressions = [ '' ] if isinstance ( burn_values , ( int , float ) ) : burn_values = [ burn_values ] if len ( expressions ) != len ( burn_values ) : raise RuntimeError ( 'expressions and burn_values of different length' ) failed = [ ] for exp in expressions : try : vectorobject . layer . SetAttributeFilter ( exp ) except RuntimeError : failed . append ( exp ) if len ( failed ) > 0 : raise RuntimeError ( 'failed to set the following attribute filter(s): ["{}"]' . format ( '", ' . join ( failed ) ) ) if append and outname is not None and os . path . isfile ( outname ) : target_ds = gdal . Open ( outname , GA_Update ) else : if not isinstance ( reference , Raster ) : raise RuntimeError ( "parameter 'reference' must be of type Raster" ) if outname is not None : target_ds = gdal . GetDriverByName ( 'GTiff' ) . Create ( outname , reference . cols , reference . rows , 1 , gdal . GDT_Byte ) else : target_ds = gdal . GetDriverByName ( 'MEM' ) . Create ( '' , reference . cols , reference . rows , 1 , gdal . GDT_Byte ) target_ds . SetGeoTransform ( reference . raster . GetGeoTransform ( ) ) target_ds . SetProjection ( reference . raster . GetProjection ( ) ) band = target_ds . GetRasterBand ( 1 ) band . SetNoDataValue ( nodata ) band . FlushCache ( ) band = None for expression , value in zip ( expressions , burn_values ) : vectorobject . layer . SetAttributeFilter ( expression ) gdal . RasterizeLayer ( target_ds , [ 1 ] , vectorobject . layer , burn_values = [ value ] ) vectorobject . layer . SetAttributeFilter ( '' ) if outname is None : return Raster ( target_ds ) else : target_ds = None
rasterize a vector object
46,230
def reproject ( rasterobject , reference , outname , targetres = None , resampling = 'bilinear' , format = 'GTiff' ) : if isinstance ( rasterobject , str ) : rasterobject = Raster ( rasterobject ) if not isinstance ( rasterobject , Raster ) : raise RuntimeError ( 'rasterobject must be of type Raster or str' ) if isinstance ( reference , ( Raster , Vector ) ) : projection = reference . projection if targetres is not None : xres , yres = targetres elif hasattr ( reference , 'res' ) : xres , yres = reference . res else : raise RuntimeError ( 'parameter targetres is missing and cannot be read from the reference' ) elif isinstance ( reference , ( int , str , osr . SpatialReference ) ) : try : projection = crsConvert ( reference , 'proj4' ) except TypeError : raise RuntimeError ( 'reference projection cannot be read' ) if targetres is None : raise RuntimeError ( 'parameter targetres is missing and cannot be read from the reference' ) else : xres , yres = targetres else : raise TypeError ( 'reference must be of type Raster, Vector, osr.SpatialReference, str or int' ) options = { 'format' : format , 'resampleAlg' : resampling , 'xRes' : xres , 'yRes' : yres , 'srcNodata' : rasterobject . nodata , 'dstNodata' : rasterobject . nodata , 'dstSRS' : projection } gdalwarp ( rasterobject , outname , options )
reproject a raster file
46,231
def allstats ( self , approximate = False ) : statcollect = [ ] for x in self . layers ( ) : try : stats = x . ComputeStatistics ( approximate ) except RuntimeError : stats = None stats = dict ( zip ( [ 'min' , 'max' , 'mean' , 'sdev' ] , stats ) ) statcollect . append ( stats ) return statcollect
Compute some basic raster statistics
46,232
def array ( self ) : if self . bands == 1 : return self . matrix ( ) else : arr = self . raster . ReadAsArray ( ) . transpose ( 1 , 2 , 0 ) if isinstance ( self . nodata , list ) : for i in range ( 0 , self . bands ) : arr [ : , : , i ] [ arr [ : , : , i ] == self . nodata [ i ] ] = np . nan else : arr [ arr == self . nodata ] = np . nan return arr
read all raster bands into a numpy ndarray
46,233
def bandnames ( self , names ) : if not isinstance ( names , list ) : raise TypeError ( 'the names to be set must be of type list' ) if len ( names ) != self . bands : raise ValueError ( 'length mismatch of names to be set ({}) and number of bands ({})' . format ( len ( names ) , self . bands ) ) self . __bandnames = names
set the names of the raster bands
46,234
def extract ( self , px , py , radius = 1 , nodata = None ) : if not self . geo [ 'xmin' ] <= px <= self . geo [ 'xmax' ] : raise RuntimeError ( 'px is out of bounds' ) if not self . geo [ 'ymin' ] <= py <= self . geo [ 'ymax' ] : raise RuntimeError ( 'py is out of bounds' ) if nodata is None : nodata = self . nodata xres , yres = self . res hx = xres / 2.0 hy = yres / 2.0 xlim = float ( xres * radius ) ylim = float ( yres * radius ) xmin = int ( floor ( ( px - self . geo [ 'xmin' ] - xlim ) / xres ) ) ymin = int ( floor ( ( self . geo [ 'ymax' ] - py - ylim ) / yres ) ) xmin = xmin if xmin >= 0 else 0 ymin = ymin if ymin >= 0 else 0 xmax = int ( ceil ( ( px - self . geo [ 'xmin' ] + xlim ) / xres ) ) ymax = int ( ceil ( ( self . geo [ 'ymax' ] - py + ylim ) / yres ) ) xmax = xmax if xmax <= self . cols else self . cols ymax = ymax if ymax <= self . rows else self . rows if self . __data [ 0 ] is not None : array = self . __data [ 0 ] [ ymin : ymax , xmin : xmax ] else : array = self . raster . GetRasterBand ( 1 ) . ReadAsArray ( xmin , ymin , xmax - xmin , ymax - ymin ) sum = 0 counter = 0 weightsum = 0 for x in range ( xmin , xmax ) : for y in range ( ymin , ymax ) : val = array [ y - ymin , x - xmin ] if val != nodata : xc = x * xres + hx + self . geo [ 'xmin' ] yc = self . geo [ 'ymax' ] - y * yres + hy dx = abs ( xc - px ) dy = abs ( yc - py ) weight = sqrt ( dx ** 2 + dy ** 2 ) sum += val * weight weightsum += weight counter += 1 array = None if counter > 0 : return sum / weightsum else : return nodata
extract weighted average of pixels intersecting with a defined radius to a point .
46,235
def geo ( self ) : out = dict ( zip ( [ 'xmin' , 'xres' , 'rotation_x' , 'ymax' , 'rotation_y' , 'yres' ] , self . raster . GetGeoTransform ( ) ) ) out [ 'xmax' ] = out [ 'xmin' ] + out [ 'xres' ] * self . cols out [ 'ymin' ] = out [ 'ymax' ] + out [ 'yres' ] * self . rows return out
General image geo information .
46,236
def res ( self ) : return ( abs ( float ( self . geo [ 'xres' ] ) ) , abs ( float ( self . geo [ 'yres' ] ) ) )
the raster resolution in x and y direction
46,237
def rescale ( self , fun ) : if self . bands != 1 : raise ValueError ( 'only single band images are currently supported' ) mat = self . matrix ( ) scaled = fun ( mat ) self . assign ( scaled , band = 0 )
perform raster computations with custom functions and assign them to the existing raster object in memory
46,238
def write ( self , outname , dtype = 'default' , format = 'ENVI' , nodata = 'default' , compress_tif = False , overwrite = False ) : if os . path . isfile ( outname ) and not overwrite : raise RuntimeError ( 'target file already exists' ) if format == 'GTiff' and not re . search ( r'\.tif[f]*$' , outname ) : outname += '.tif' dtype = Dtype ( self . dtype if dtype == 'default' else dtype ) . gdalint nodata = self . nodata if nodata == 'default' else nodata options = [ ] if format == 'GTiff' and compress_tif : options += [ 'COMPRESS=DEFLATE' , 'PREDICTOR=2' ] driver = gdal . GetDriverByName ( format ) outDataset = driver . Create ( outname , self . cols , self . rows , self . bands , dtype , options ) driver = None outDataset . SetMetadata ( self . raster . GetMetadata ( ) ) outDataset . SetGeoTransform ( [ self . geo [ x ] for x in [ 'xmin' , 'xres' , 'rotation_x' , 'ymax' , 'rotation_y' , 'yres' ] ] ) if self . projection is not None : outDataset . SetProjection ( self . projection ) for i in range ( 1 , self . bands + 1 ) : outband = outDataset . GetRasterBand ( i ) if nodata is not None : outband . SetNoDataValue ( nodata ) mat = self . matrix ( band = i ) dtype_mat = str ( mat . dtype ) dtype_ras = Dtype ( dtype ) . numpystr if not np . can_cast ( dtype_mat , dtype_ras ) : warnings . warn ( "writing band {}: unsafe casting from type {} to {}" . format ( i , dtype_mat , dtype_ras ) ) outband . WriteArray ( mat ) del mat outband . FlushCache ( ) outband = None if format == 'GTiff' : outDataset . SetMetadataItem ( 'TIFFTAG_DATETIME' , strftime ( '%Y:%m:%d %H:%M:%S' , gmtime ( ) ) ) outDataset = None if format == 'ENVI' : hdrfile = os . path . splitext ( outname ) [ 0 ] + '.hdr' with HDRobject ( hdrfile ) as hdr : hdr . band_names = self . bandnames hdr . write ( )
write the raster object to a file .
46,239
def numpy2gdalint ( self ) : if not hasattr ( self , '__numpy2gdalint' ) : tmap = { } for group in [ 'int' , 'uint' , 'float' , 'complex' ] : for dtype in np . sctypes [ group ] : code = gdal_array . NumericTypeCodeToGDALTypeCode ( dtype ) if code is not None : tmap [ dtype ( ) . dtype . name ] = code self . __numpy2gdalint = tmap return self . __numpy2gdalint
create a dictionary for mapping numpy data types to GDAL data type codes
46,240
def static_parser ( static ) : if static is None : return if isinstance ( static , dict ) : static = static . items ( ) for group in static : if not isinstance ( group , dict ) : yield group continue for item in group . items ( ) : yield item
Parse object describing static routes .
46,241
def analyses_info ( self ) : f_config = os . path . join ( self . analyses_path , 'index.yaml' ) tornado . autoreload . watch ( f_config ) with io . open ( f_config , 'r' , encoding = 'utf8' ) as f : config = yaml . safe_load ( f ) self . info . update ( config ) if self . debug : self . info [ 'version' ] += '.debug-{:04X}' . format ( int ( random . random ( ) * 0xffff ) ) readme = Readme ( self . analyses_path ) if self . info [ 'description' ] is None : self . info [ 'description' ] = readme . text . strip ( ) self . info [ 'description_html' ] = readme . html
Add analyses from the analyses folder .
46,242
def build ( self ) : for cmd in self . build_cmds : log . info ( 'building command: {}' . format ( cmd ) ) full_cmd = 'cd {}; {}' . format ( self . analyses_path , cmd ) log . debug ( 'full command: {}' . format ( full_cmd ) ) subprocess . call ( full_cmd , shell = True ) log . info ( 'build done' )
Run the build command specified in index . yaml .
46,243
def get ( self ) : return self . render ( 'index.html' , databench_version = DATABENCH_VERSION , meta_infos = self . meta_infos ( ) , ** self . info )
Render the List - of - Analyses overview page .
46,244
def _init_worker ( X , X_shape , X_dtype ) : mprotate_dict [ "X" ] = X mprotate_dict [ "X_shape" ] = X_shape mprotate_dict [ "X_dtype" ] = X_dtype
Initializer for pool for _mprotate
46,245
def _mprotate ( ang , lny , pool , order ) : targ_args = list ( ) slsize = np . int ( np . floor ( lny / ncores ) ) for t in range ( ncores ) : ymin = t * slsize ymax = ( t + 1 ) * slsize if t == ncores - 1 : ymax = lny targ_args . append ( ( ymin , ymax , ang , order ) ) pool . map ( _rotate , targ_args )
Uses multiprocessing to wrap around _rotate
46,246
def write_byte ( self , address , value ) : LOGGER . debug ( "Writing byte %s to device %s!" , bin ( value ) , hex ( address ) ) return self . driver . write_byte ( address , value )
Writes the byte to unaddressed register in a device .
46,247
def read_byte ( self , address ) : LOGGER . debug ( "Reading byte from device %s!" , hex ( address ) ) return self . driver . read_byte ( address )
Reads unadressed byte from a device .
46,248
def write_byte_data ( self , address , register , value ) : LOGGER . debug ( "Writing byte data %s to register %s on device %s" , bin ( value ) , hex ( register ) , hex ( address ) ) return self . driver . write_byte_data ( address , register , value )
Write a byte value to a device s register .
46,249
async def _raise_for_status ( response ) : try : response . raise_for_status ( ) except aiohttp . ClientResponseError as exc : reason = response . reason spacetrack_error_msg = None try : json = await response . json ( ) if isinstance ( json , Mapping ) : spacetrack_error_msg = json [ 'error' ] except ( ValueError , KeyError , aiohttp . ClientResponseError ) : pass if not spacetrack_error_msg : spacetrack_error_msg = await response . text ( ) if spacetrack_error_msg : reason += '\nSpace-Track response:\n' + spacetrack_error_msg payload = dict ( code = response . status , message = reason , headers = response . headers , ) try : payload [ 'history' ] = exc . history except AttributeError : pass raise aiohttp . ClientResponseError ( ** payload )
Raise an appropriate error for a given response .
46,250
async def generic_request ( self , class_ , iter_lines = False , iter_content = False , controller = None , parse_types = False , ** kwargs ) : if iter_lines and iter_content : raise ValueError ( 'iter_lines and iter_content cannot both be True' ) if 'format' in kwargs and parse_types : raise ValueError ( 'parse_types can only be used if format is unset.' ) if controller is None : controller = self . _find_controller ( class_ ) else : classes = self . request_controllers . get ( controller , None ) if classes is None : raise ValueError ( 'Unknown request controller {!r}' . format ( controller ) ) if class_ not in classes : raise ValueError ( 'Unknown request class {!r} for controller {!r}' . format ( class_ , controller ) ) decode = ( class_ != 'download' ) if not decode and iter_lines : error = ( 'iter_lines disabled for binary data, since CRLF newlines ' 'split over chunk boundaries would yield extra blank lines. ' 'Use iter_content=True instead.' ) raise ValueError ( error ) await self . authenticate ( ) url = ( '{0}{1}/query/class/{2}' . format ( self . base_url , controller , class_ ) ) offline_check = ( class_ , controller ) in self . offline_predicates valid_fields = { p . name for p in self . rest_predicates } predicates = None if not offline_check : predicates = await self . get_predicates ( class_ ) predicate_fields = { p . name for p in predicates } valid_fields = predicate_fields | { p . name for p in self . rest_predicates } else : valid_fields |= self . offline_predicates [ ( class_ , controller ) ] for key , value in kwargs . items ( ) : if key not in valid_fields : raise TypeError ( "'{class_}' got an unexpected argument '{key}'" . format ( class_ = class_ , key = key ) ) value = _stringify_predicate_value ( value ) url += '/{key}/{value}' . format ( key = key , value = value ) logger . debug ( url ) resp = await self . _ratelimited_get ( url ) await _raise_for_status ( resp ) if iter_lines : return _AsyncLineIterator ( resp , decode_unicode = decode ) elif iter_content : return _AsyncChunkIterator ( resp , decode_unicode = decode ) else : if 'format' in kwargs : if decode : data = await resp . text ( ) data = data . replace ( '\r' , '' ) else : data = await resp . read ( ) return data else : data = await resp . json ( ) if predicates is None or not parse_types : return data else : return self . _parse_types ( data , predicates )
Generic Space - Track query coroutine .
46,251
def get_numeric_value ( string_value ) : num_chars = [ '.' , '+' , '-' ] number = '' for c in string_value : if c . isdigit ( ) or c in num_chars : number += c return number
parses string_value and returns only number - like part
46,252
def run ( analysis , path = None , name = None , info = None , ** kwargs ) : kwargs . update ( { 'analysis' : analysis , 'path' : path , 'name' : name , 'info' : info , } ) main ( ** kwargs )
Run a single analysis .
46,253
def _stringify_predicate_value ( value ) : if isinstance ( value , bool ) : return str ( value ) . lower ( ) elif isinstance ( value , Sequence ) and not isinstance ( value , six . string_types ) : return ',' . join ( _stringify_predicate_value ( x ) for x in value ) elif isinstance ( value , datetime . datetime ) : return value . isoformat ( sep = ' ' ) elif isinstance ( value , datetime . date ) : return value . isoformat ( ) elif value is None : return 'null-val' else : return str ( value )
Convert Python objects to Space - Track compatible strings
46,254
def args_repr ( * args , ** kwargs ) : items = [ repr ( a ) for a in args ] items += [ "%s = %r" % ( k , v ) for k , v in iter ( kwargs . items ( ) ) ] return ", " . join ( items )
Returns human - readable string representation of both positional and keyword arguments passed to the function .
46,255
def obj_repr ( obj , * args , ** kwargs ) : cls_name = type ( obj ) . __name__ return "%s(%s)" % ( cls_name , args_repr ( * args , ** kwargs ) , )
Returns human - readable string representation of an object given that it has been created by calling constructor with the specified positional and keyword arguments .
46,256
def dictmerge ( x , y ) : z = x . copy ( ) z . update ( y ) return z
merge two dictionaries
46,257
def parse_literal ( x ) : if isinstance ( x , list ) : return [ parse_literal ( y ) for y in x ] elif isinstance ( x , ( bytes , str ) ) : try : return int ( x ) except ValueError : try : return float ( x ) except ValueError : return x else : raise TypeError ( 'input must be a string or a list of strings' )
return the smallest possible data type for a string or list of strings
46,258
def urlQueryParser ( url , querydict ) : address_parse = urlparse ( url ) return urlunparse ( address_parse . _replace ( query = urlencode ( querydict ) ) )
parse a url query
46,259
def push ( self , x ) : if isinstance ( x , list ) : for item in x : self . stack . append ( item ) else : self . stack . append ( x )
append items to the stack ; input can be a single value or a list
46,260
def pop ( self ) : if not self . empty ( ) : val = self . stack [ - 1 ] del self . stack [ - 1 ] return val
return the last stack element and delete it from the list
46,261
def axes ( self , offset = False ) : reg , self . _scale = self . SCALES [ self . _gauss ] x = self . bus . read_int16_data ( self . address , self . HMC5883L_DXRA ) if x == - 4096 : x = OVERFLOW y = self . bus . read_int16_data ( self . address , self . HMC5883L_DYRA ) if y == - 4096 : y = OVERFLOW z = self . bus . read_int16_data ( self . address , self . HMC5883L_DZRA ) if z == - 4096 : z = OVERFLOW x *= self . _scale y *= self . _scale z *= self . _scale if offset : ( x , y , z ) = self . __offset ( ( x , y , z ) ) return ( x , y , z )
returns measured value in miligauss
46,262
def _ToString ( x ) : if x is None : return 'null' if isinstance ( x , six . string_types ) : return x return pprint . pformat ( x )
The default default formatter! .
46,263
def _Pairs ( data ) : keys = sorted ( data ) return [ { '@key' : k , '@value' : data [ k ] } for k in keys ]
dictionary - > list of pairs
46,264
def _Pluralize ( value , unused_context , args ) : if len ( args ) == 0 : s , p = '' , 's' elif len ( args ) == 1 : s , p = '' , args [ 0 ] elif len ( args ) == 2 : s , p = args else : raise AssertionError if value > 1 : return p else : return s
Formatter to pluralize words .
46,265
def _TemplateExists ( unused_value , context , args ) : try : name = args [ 0 ] except IndexError : raise EvaluationError ( 'The "template" predicate requires an argument.' ) return context . HasTemplate ( name )
Returns whether the given name is in the current Template s template group .
46,266
def SplitMeta ( meta ) : n = len ( meta ) if n % 2 == 1 : raise ConfigurationError ( '%r has an odd number of metacharacters' % meta ) return meta [ : n // 2 ] , meta [ n // 2 : ]
Split and validate metacharacters .
46,267
def _MatchDirective ( token ) : if token . startswith ( '.' ) : token = token [ 1 : ] else : return None , None if token == 'end' : return END_TOKEN , None if token == 'alternates with' : return ALTERNATES_TOKEN , token if token . startswith ( 'or' ) : if token . strip ( ) == 'or' : return OR_TOKEN , None else : pred_str = token [ 2 : ] . strip ( ) return OR_TOKEN , pred_str match = _SECTION_RE . match ( token ) if match : repeated , section_name = match . groups ( ) if repeated : return REPEATED_SECTION_TOKEN , section_name else : return SECTION_TOKEN , section_name if token . startswith ( 'template ' ) : return SUBST_TEMPLATE_TOKEN , token [ 9 : ] . strip ( ) if token . startswith ( 'define ' ) : return DEF_TOKEN , token [ 7 : ] . strip ( ) if token . startswith ( 'if ' ) : return IF_TOKEN , token [ 3 : ] . strip ( ) if token . endswith ( '?' ) : return PREDICATE_TOKEN , token return None , None
Helper function for matching certain directives .
46,268
def _CompileTemplate ( template_str , builder , meta = '{}' , format_char = '|' , default_formatter = 'str' , whitespace = 'smart' ) : meta_left , meta_right = SplitMeta ( meta ) if format_char not in ( ':' , '|' ) : raise ConfigurationError ( 'Only format characters : and | are accepted (got %r)' % format_char ) if whitespace not in ( 'smart' , 'strip-line' ) : raise ConfigurationError ( 'Invalid whitespace mode %r' % whitespace ) balance_counter = 0 comment_counter = 0 has_defines = False for token_type , token in _Tokenize ( template_str , meta_left , meta_right , whitespace ) : if token_type == COMMENT_BEGIN_TOKEN : comment_counter += 1 continue if token_type == COMMENT_END_TOKEN : comment_counter -= 1 if comment_counter < 0 : raise CompilationError ( 'Got too many ##END markers' ) continue if comment_counter > 0 : continue if token_type in ( LITERAL_TOKEN , META_LITERAL_TOKEN ) : if token : builder . Append ( token ) continue if token_type in ( SECTION_TOKEN , REPEATED_SECTION_TOKEN , DEF_TOKEN ) : parts = [ p . strip ( ) for p in token . split ( format_char ) ] if len ( parts ) == 1 : name = parts [ 0 ] formatters = [ ] else : name = parts [ 0 ] formatters = parts [ 1 : ] builder . NewSection ( token_type , name , formatters ) balance_counter += 1 if token_type == DEF_TOKEN : has_defines = True continue if token_type == PREDICATE_TOKEN : builder . NewPredicateSection ( token , test_attr = True ) balance_counter += 1 continue if token_type == IF_TOKEN : builder . NewPredicateSection ( token , test_attr = False ) balance_counter += 1 continue if token_type == OR_TOKEN : builder . NewOrClause ( token ) continue if token_type == ALTERNATES_TOKEN : builder . AlternatesWith ( ) continue if token_type == END_TOKEN : balance_counter -= 1 if balance_counter < 0 : raise TemplateSyntaxError ( 'Got too many %send%s statements. You may have mistyped an ' "earlier 'section' or 'repeated section' directive." % ( meta_left , meta_right ) ) builder . EndSection ( ) continue if token_type == SUBST_TOKEN : parts = [ p . strip ( ) for p in token . split ( format_char ) ] if len ( parts ) == 1 : if default_formatter is None : raise MissingFormatter ( 'This template requires explicit formatters.' ) name = token formatters = [ default_formatter ] else : name = parts [ 0 ] formatters = parts [ 1 : ] builder . AppendSubstitution ( name , formatters ) continue if token_type == SUBST_TEMPLATE_TOKEN : builder . AppendTemplateSubstitution ( token ) continue if balance_counter != 0 : raise TemplateSyntaxError ( 'Got too few %send%s statements' % ( meta_left , meta_right ) ) if comment_counter != 0 : raise CompilationError ( 'Got %d more {##BEGIN}s than {##END}s' % comment_counter ) return builder . Root ( ) , has_defines
Compile the template string calling methods on the program builder .
46,269
def FromString ( s , ** kwargs ) : f = StringIO . StringIO ( s ) return FromFile ( f , ** kwargs )
Like FromFile but takes a string .
46,270
def FromFile ( f , more_formatters = lambda x : None , more_predicates = lambda x : None , _constructor = None ) : _constructor = _constructor or Template options = { } while 1 : line = f . readline ( ) match = _OPTION_RE . match ( line ) if match : name , value = match . group ( 1 ) , match . group ( 2 ) name = name . lower ( ) name = name . encode ( 'utf-8' ) if name in _OPTION_NAMES : name = name . replace ( '-' , '_' ) value = value . strip ( ) if name == 'default_formatter' and value . lower ( ) == 'none' : value = None options [ name ] = value else : break else : break if options : if line . strip ( ) : raise CompilationError ( 'Must be one blank line between template options and body (got %r)' % line ) body = f . read ( ) else : body = line + f . read ( ) return _constructor ( body , more_formatters = more_formatters , more_predicates = more_predicates , ** options )
Parse a template from a file using a simple file format .
46,271
def _Execute ( statements , context , callback , trace ) : if trace : trace . exec_depth += 1 for i , statement in enumerate ( statements ) : if isinstance ( statement , six . string_types ) : callback ( statement ) else : try : func , args = statement func ( args , context , callback , trace ) except UndefinedVariable as e : start = max ( 0 , i - 3 ) end = i + 3 e . near = statements [ start : end ] e . trace = trace raise
Execute a bunch of template statements in a ScopedContext .
46,272
def expand ( template_str , dictionary , ** kwargs ) : t = Template ( template_str , ** kwargs ) return t . expand ( dictionary )
Free function to expands a template string with a data dictionary .
46,273
def _FlattenToCallback ( tokens , callback ) : for t in tokens : if isinstance ( t , six . string_types ) : callback ( t ) else : _FlattenToCallback ( t , callback )
Takes a nested list structure and flattens it .
46,274
def execute_with_style_LEGACY ( template , style , data , callback , body_subtree = 'body' ) : try : body_data = data [ body_subtree ] except KeyError : raise EvaluationError ( 'Data dictionary has no subtree %r' % body_subtree ) tokens_body = [ ] template . execute ( body_data , tokens_body . append ) data [ body_subtree ] = tokens_body tokens = [ ] style . execute ( data , tokens . append ) _FlattenToCallback ( tokens , callback )
OBSOLETE old API .
46,275
def expand_with_style ( template , style , data , body_subtree = 'body' ) : if template . has_defines : return template . expand ( data , style = style ) else : tokens = [ ] execute_with_style_LEGACY ( template , style , data , tokens . append , body_subtree = body_subtree ) return JoinTokens ( tokens )
Expand a data dictionary with a template AND a style .
46,276
def _GetFormatter ( self , format_str ) : formatter , args , func_type = self . formatters . LookupWithType ( format_str ) if formatter : return formatter , args , func_type else : raise BadFormatter ( '%r is not a valid formatter' % format_str )
The user s formatters are consulted first then the default formatters .
46,277
def _GetPredicate ( self , pred_str , test_attr = False ) : predicate , args , func_type = self . predicates . LookupWithType ( pred_str ) if predicate : pred = predicate , args , func_type else : if test_attr : assert pred_str . endswith ( '?' ) pred = ( _TestAttribute , ( pred_str [ : - 1 ] , ) , ENHANCED_FUNC ) else : raise BadPredicate ( '%r is not a valid predicate' % pred_str ) return pred
The user s predicates are consulted first then the default predicates .
46,278
def NewSection ( self , token_type , section_name , pre_formatters ) : pre_formatters = [ self . _GetFormatter ( f ) for f in pre_formatters ] if token_type == REPEATED_SECTION_TOKEN : new_block = _RepeatedSection ( section_name , pre_formatters ) func = _DoRepeatedSection elif token_type == SECTION_TOKEN : new_block = _Section ( section_name , pre_formatters ) func = _DoSection elif token_type == DEF_TOKEN : new_block = _Section ( section_name , [ ] ) func = _DoDef else : raise AssertionError ( 'Invalid token type %s' % token_type ) self . _NewSection ( func , new_block )
For sections or repeated sections .
46,279
def NewPredicateSection ( self , pred_str , test_attr = False ) : pred = self . _GetPredicate ( pred_str , test_attr = test_attr ) block = _PredicateSection ( ) block . NewOrClause ( pred ) self . _NewSection ( _DoPredicates , block )
For chains of predicate clauses .
46,280
def PushSection ( self , name , pre_formatters ) : if name == '@' : value = self . stack [ - 1 ] . context else : value = self . stack [ - 1 ] . context . get ( name ) for i , ( f , args , formatter_type ) in enumerate ( pre_formatters ) : if formatter_type == ENHANCED_FUNC : value = f ( value , self , args ) elif formatter_type == SIMPLE_FUNC : value = f ( value ) else : assert False , 'Invalid formatter type %r' % formatter_type self . stack . append ( _Frame ( value ) ) return value
Given a section name push it on the top of the stack .
46,281
def Next ( self ) : stacktop = self . stack [ - 1 ] if stacktop . index == - 1 : stacktop = _Frame ( None , index = 0 ) self . stack . append ( stacktop ) context_array = self . stack [ - 2 ] . context if stacktop . index == len ( context_array ) : self . stack . pop ( ) raise StopIteration stacktop . context = context_array [ stacktop . index ] stacktop . index += 1 return True
Advance to the next item in a repeated section .
46,282
def _LookUpStack ( self , name ) : i = len ( self . stack ) - 1 while 1 : frame = self . stack [ i ] if name == '@index' : if frame . index != - 1 : return frame . index else : context = frame . context if hasattr ( context , 'get' ) : try : return context [ name ] except KeyError : pass i -= 1 if i <= - 1 : return self . _Undefined ( name )
Look up the stack for the given name .
46,283
def Lookup ( self , name ) : if name == '@' : return self . stack [ - 1 ] . context parts = name . split ( '.' ) value = self . _LookUpStack ( parts [ 0 ] ) for part in parts [ 1 : ] : try : value = value [ part ] except ( KeyError , TypeError ) : return self . _Undefined ( part ) return value
Get the value associated with a name in the current context .
46,284
def execute ( self , data_dict , callback , group = None , trace = None ) : group = group or self . group context = _ScopedContext ( data_dict , self . undefined_str , group = group ) _Execute ( self . _program . Statements ( ) , context , callback , trace )
Low level method to expand the template piece by piece .
46,285
def expand ( self , * args , ** kwargs ) : if args : if len ( args ) == 1 : data_dict = args [ 0 ] trace = kwargs . get ( 'trace' ) style = kwargs . get ( 'style' ) else : raise TypeError ( 'expand() only takes 1 positional argument (got %s)' % args ) else : data_dict = kwargs trace = None style = None tokens = [ ] group = _MakeGroupFromRootSection ( self . _program , self . undefined_str ) if style : style . execute ( data_dict , tokens . append , group = group , trace = trace ) else : self . execute ( data_dict , tokens . append , group = group , trace = trace ) return JoinTokens ( tokens )
Expands the template with the given data dictionary returning a string .
46,286
def tokenstream ( self , data_dict ) : tokens = [ ] self . execute ( data_dict , tokens . append ) for token in tokens : yield token
Yields a list of tokens resulting from expansion .
46,287
def align_unwrapped ( sino ) : samples = [ ] if len ( sino . shape ) == 2 : samples . append ( sino [ : , 0 ] ) samples . append ( sino [ : , 1 ] ) samples . append ( sino [ : , 2 ] ) samples . append ( sino [ : , - 1 ] ) samples . append ( sino [ : , - 2 ] ) elif len ( sino . shape ) == 3 : samples . append ( sino [ : , 0 , 0 ] ) samples . append ( sino [ : , 0 , - 1 ] ) samples . append ( sino [ : , - 1 , 0 ] ) samples . append ( sino [ : , - 1 , - 1 ] ) samples . append ( sino [ : , 0 , 1 ] ) steps = np . zeros ( ( len ( samples ) , samples [ 0 ] . shape [ 0 ] ) ) for i in range ( len ( samples ) ) : t = np . unwrap ( samples [ i ] ) steps [ i ] = samples [ i ] - t remove = mode ( steps , axis = 0 ) [ 0 ] [ 0 ] twopi = 2 * np . pi minimum = divmod_neg ( np . min ( sino ) , twopi ) [ 0 ] remove += minimum * twopi for i in range ( len ( sino ) ) : sino [ i ] -= remove [ i ]
Align an unwrapped phase array to zero - phase
46,288
def divmod_neg ( a , b ) : q , r = divmod ( a , b ) sr = np . sign ( r ) if np . abs ( r ) > b / 2 : q += sr r -= b * sr return q , r
Return divmod with closest result to zero
46,289
def sinogram_as_radon ( uSin , align = True ) : r ndims = len ( uSin . shape ) if ndims == 2 : phiR = np . unwrap ( np . angle ( uSin ) , axis = - 1 ) else : phiR = np . angle ( uSin ) for ii in range ( len ( phiR ) ) : phiR [ ii ] = unwrap_phase ( phiR [ ii ] , seed = 47 ) if align : align_unwrapped ( phiR ) return phiR
r Compute the phase from a complex wave field sinogram
46,290
def sinogram_as_rytov ( uSin , u0 = 1 , align = True ) : r ndims = len ( uSin . shape ) phiR = np . angle ( uSin / u0 ) lna = np . log ( np . absolute ( uSin / u0 ) ) if ndims == 2 : phiR [ : ] = np . unwrap ( phiR , axis = - 1 ) else : for ii in range ( len ( phiR ) ) : phiR [ ii ] = unwrap_phase ( phiR [ ii ] , seed = 47 ) if align : align_unwrapped ( phiR ) rytovSin = 1j * phiR + lna return u0 * rytovSin
r Convert the complex wave field sinogram to the Rytov phase
46,291
def json_encoder_default ( obj ) : if np is not None and hasattr ( obj , 'size' ) and hasattr ( obj , 'dtype' ) : if obj . size == 1 : if np . issubdtype ( obj . dtype , np . integer ) : return int ( obj ) elif np . issubdtype ( obj . dtype , np . floating ) : return float ( obj ) if isinstance ( obj , set ) : return list ( obj ) elif hasattr ( obj , 'to_native' ) : return obj . to_native ( ) elif hasattr ( obj , 'tolist' ) and hasattr ( obj , '__iter__' ) : return obj . tolist ( ) return obj
Handle more data types than the default JSON encoder .
46,292
def fig_to_src ( figure , image_format = 'png' , dpi = 80 ) : if image_format == 'png' : f = io . BytesIO ( ) figure . savefig ( f , format = image_format , dpi = dpi ) f . seek ( 0 ) return png_to_src ( f . read ( ) ) elif image_format == 'svg' : f = io . StringIO ( ) figure . savefig ( f , format = image_format , dpi = dpi ) f . seek ( 0 ) return svg_to_src ( f . read ( ) )
Convert a matplotlib figure to an inline HTML image .
46,293
def ReleaseSW ( self ) : ' Go away from Limit Switch ' while self . ReadStatusBit ( 2 ) == 1 : spi . SPI_write ( self . CS , [ 0x92 , 0x92 ] | ( ~ self . Dir & 1 ) ) while self . IsBusy ( ) : pass self . MoveWait ( 10 )
Go away from Limit Switch
46,294
def feature2vector ( feature , ref , layername = None ) : features = feature if isinstance ( feature , list ) else [ feature ] layername = layername if layername is not None else ref . layername vec = Vector ( driver = 'Memory' ) vec . addlayer ( layername , ref . srs , ref . geomType ) feat_def = features [ 0 ] . GetDefnRef ( ) fields = [ feat_def . GetFieldDefn ( x ) for x in range ( 0 , feat_def . GetFieldCount ( ) ) ] vec . layer . CreateFields ( fields ) for feat in features : vec . layer . CreateFeature ( feat ) vec . init_features ( ) return vec
create a Vector object from ogr features
46,295
def intersect ( obj1 , obj2 ) : if not isinstance ( obj1 , Vector ) or not isinstance ( obj2 , Vector ) : raise RuntimeError ( 'both objects must be of type Vector' ) obj1 = obj1 . clone ( ) obj2 = obj2 . clone ( ) obj1 . reproject ( obj2 . srs ) union1 = ogr . Geometry ( ogr . wkbMultiPolygon ) for feat in obj1 . layer : union1 . AddGeometry ( feat . GetGeometryRef ( ) ) obj1 . layer . ResetReading ( ) union1 . Simplify ( 0 ) union2 = ogr . Geometry ( ogr . wkbMultiPolygon ) for feat in obj2 . layer : union2 . AddGeometry ( feat . GetGeometryRef ( ) ) obj2 . layer . ResetReading ( ) union2 . Simplify ( 0 ) intersect_base = union1 . Intersection ( union2 ) union1 = None union2 = None if intersect_base . GetArea ( ) > 0 : intersection = Vector ( driver = 'Memory' ) intersection . addlayer ( 'intersect' , obj1 . srs , ogr . wkbPolygon ) fieldmap = [ ] for index , fielddef in enumerate ( [ obj1 . fieldDefs , obj2 . fieldDefs ] ) : for field in fielddef : name = field . GetName ( ) i = 2 while name in intersection . fieldnames : name = '{}_{}' . format ( field . GetName ( ) , i ) i += 1 fieldmap . append ( ( index , field . GetName ( ) , name ) ) intersection . addfield ( name , type = field . GetType ( ) , width = field . GetWidth ( ) ) for feature1 in obj1 . layer : geom1 = feature1 . GetGeometryRef ( ) if geom1 . Intersects ( intersect_base ) : for feature2 in obj2 . layer : geom2 = feature2 . GetGeometryRef ( ) if geom2 . Intersects ( intersect_base ) : intersect = geom2 . Intersection ( geom1 ) fields = { } for item in fieldmap : if item [ 0 ] == 0 : fields [ item [ 2 ] ] = feature1 . GetField ( item [ 1 ] ) else : fields [ item [ 2 ] ] = feature2 . GetField ( item [ 1 ] ) intersection . addfeature ( intersect , fields ) intersect_base = None return intersection
intersect two Vector objects
46,296
def addfield ( self , name , type , width = 10 ) : fieldDefn = ogr . FieldDefn ( name , type ) if type == ogr . OFTString : fieldDefn . SetWidth ( width ) self . layer . CreateField ( fieldDefn )
add a field to the vector layer
46,297
def addlayer ( self , name , srs , geomType ) : self . vector . CreateLayer ( name , srs , geomType ) self . init_layer ( )
add a layer to the vector layer
46,298
def addvector ( self , vec ) : vec . layer . ResetReading ( ) for feature in vec . layer : self . layer . CreateFeature ( feature ) self . init_features ( ) vec . layer . ResetReading ( )
add a vector object to the layer of the current Vector object
46,299
def bbox ( self , outname = None , format = 'ESRI Shapefile' , overwrite = True ) : if outname is None : return bbox ( self . extent , self . srs ) else : bbox ( self . extent , self . srs , outname = outname , format = format , overwrite = overwrite )
create a bounding box from the extent of the Vector object