idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
42,200
def plot_iso ( axis , step , var ) : xmesh , ymesh , fld = get_meshes_fld ( step , var ) if conf . field . shift : fld = np . roll ( fld , conf . field . shift , axis = 0 ) axis . contour ( xmesh , ymesh , fld , linewidths = 1 )
Plot isocontours of scalar field .
42,201
def plot_vec ( axis , step , var ) : xmesh , ymesh , vec1 , vec2 = get_meshes_vec ( step , var ) dipz = step . geom . nztot // 10 if conf . field . shift : vec1 = np . roll ( vec1 , conf . field . shift , axis = 0 ) vec2 = np . roll ( vec2 , conf . field . shift , axis = 0 ) if step . geom . spherical or conf . plot . ratio is None : dipx = dipz else : dipx = step . geom . nytot if step . geom . twod_yz else step . geom . nxtot dipx = int ( dipx // 10 * conf . plot . ratio ) + 1 axis . quiver ( xmesh [ : : dipx , : : dipz ] , ymesh [ : : dipx , : : dipz ] , vec1 [ : : dipx , : : dipz ] , vec2 [ : : dipx , : : dipz ] , linewidths = 1 )
Plot vector field .
42,202
def cmd ( ) : sdat = StagyyData ( conf . core . path ) sovs = set_of_vars ( conf . field . plot ) minmax = { } if conf . plot . cminmax : conf . plot . vmin = None conf . plot . vmax = None for step in sdat . walk . filter ( snap = True ) : for var , _ in sovs : if var in step . fields : if var in phyvars . FIELD : dim = phyvars . FIELD [ var ] . dim else : dim = phyvars . FIELD_EXTRA [ var ] . dim field , _ = sdat . scale ( step . fields [ var ] , dim ) if var in minmax : minmax [ var ] = ( min ( minmax [ var ] [ 0 ] , np . nanmin ( field ) ) , max ( minmax [ var ] [ 1 ] , np . nanmax ( field ) ) ) else : minmax [ var ] = np . nanmin ( field ) , np . nanmax ( field ) for step in sdat . walk . filter ( snap = True ) : for var in sovs : if var [ 0 ] not in step . fields : print ( "'{}' field on snap {} not found" . format ( var [ 0 ] , step . isnap ) ) continue opts = { } if var [ 0 ] in minmax : opts = dict ( vmin = minmax [ var [ 0 ] ] [ 0 ] , vmax = minmax [ var [ 0 ] ] [ 1 ] ) fig , axis , _ , _ = plot_scalar ( step , var [ 0 ] , ** opts ) if valid_field_var ( var [ 1 ] ) : plot_iso ( axis , step , var [ 1 ] ) elif var [ 1 ] : plot_vec ( axis , step , var [ 1 ] ) oname = '{}_{}' . format ( * var ) if var [ 1 ] else var [ 0 ] misc . saveplot ( fig , oname , step . isnap )
Implementation of field subcommand .
42,203
def _sub ( cmd , * sections ) : cmd_func = cmd if isfunction ( cmd ) else cmd . cmd return Subcmd ( baredoc ( cmd ) , * sections , func = cmd_func )
Build Subcmd instance .
42,204
def _steps_to_slices ( ) : if not ( conf . core . timesteps or conf . core . snapshots ) : conf . core . timesteps = None conf . core . snapshots = slice ( - 1 , None , None ) return elif conf . core . snapshots : conf . core . timesteps = None steps = conf . core . snapshots else : conf . core . snapshots = None steps = conf . core . timesteps steps = steps . split ( ':' ) steps [ 0 ] = int ( steps [ 0 ] ) if steps [ 0 ] else None if len ( steps ) == 1 : steps . append ( steps [ 0 ] + 1 ) steps [ 1 ] = int ( steps [ 1 ] ) if steps [ 1 ] else None if len ( steps ) != 3 : steps = steps [ 0 : 2 ] + [ 1 ] steps [ 2 ] = int ( steps [ 2 ] ) if steps [ 2 ] else None steps = slice ( * steps ) if conf . core . snapshots is not None : conf . core . snapshots = steps else : conf . core . timesteps = steps
parse timesteps and snapshots arguments and return slices
42,205
def parse_args ( arglist = None ) : climan = CLIManager ( conf , ** SUB_CMDS ) create_complete_files ( climan , CONFIG_DIR , 'stagpy' , 'stagpy-git' , zsh_sourceable = True ) cmd_args , all_subs = climan . parse_args ( arglist ) sub_cmd = cmd_args . loam_sub_name if sub_cmd is None : return cmd_args . func if sub_cmd != 'config' : commands . report_parsing_problems ( PARSING_OUT ) if conf . common . set : set_conf_str ( conf , conf . common . set ) if conf . common . config : commands . config_pp ( all_subs ) load_mplstyle ( ) try : _steps_to_slices ( ) except AttributeError : pass return cmd_args . func
Parse cmd line arguments .
42,206
def plot_plate_limits ( axis , ridges , trenches , ymin , ymax ) : for trench in trenches : axis . axvline ( x = trench , ymin = ymin , ymax = ymax , color = 'red' , ls = 'dashed' , alpha = 0.4 ) for ridge in ridges : axis . axvline ( x = ridge , ymin = ymin , ymax = ymax , color = 'green' , ls = 'dashed' , alpha = 0.4 ) axis . set_xlim ( 0 , 2 * np . pi ) axis . set_ylim ( ymin , ymax )
plot lines designating ridges and trenches
42,207
def plot_plate_limits_field ( axis , rcmb , ridges , trenches ) : for trench in trenches : xxd = ( rcmb + 1.02 ) * np . cos ( trench ) yyd = ( rcmb + 1.02 ) * np . sin ( trench ) xxt = ( rcmb + 1.35 ) * np . cos ( trench ) yyt = ( rcmb + 1.35 ) * np . sin ( trench ) axis . annotate ( '' , xy = ( xxd , yyd ) , xytext = ( xxt , yyt ) , arrowprops = dict ( facecolor = 'red' , shrink = 0.05 ) ) for ridge in ridges : xxd = ( rcmb + 1.02 ) * np . cos ( ridge ) yyd = ( rcmb + 1.02 ) * np . sin ( ridge ) xxt = ( rcmb + 1.35 ) * np . cos ( ridge ) yyt = ( rcmb + 1.35 ) * np . sin ( ridge ) axis . annotate ( '' , xy = ( xxd , yyd ) , xytext = ( xxt , yyt ) , arrowprops = dict ( facecolor = 'green' , shrink = 0.05 ) )
plot arrows designating ridges and trenches in 2D field plots
42,208
def io_surface ( timestep , time , fid , fld ) : fid . write ( "{} {}" . format ( timestep , time ) ) fid . writelines ( [ "%10.2e" % item for item in fld [ : ] ] ) fid . writelines ( [ "\n" ] )
Output for surface files
42,209
def set_of_vars ( arg_plot ) : return set ( var for var in arg_plot . split ( ',' ) if var in phyvars . PLATES )
Build set of needed variables .
42,210
def _check_config ( ) : config . CONFIG_DIR . mkdir ( parents = True , exist_ok = True ) verfile = config . CONFIG_DIR / '.version' uptodate = verfile . is_file ( ) and verfile . read_text ( ) == __version__ if not uptodate : verfile . write_text ( __version__ ) if not ( uptodate and config . CONFIG_FILE . is_file ( ) ) : conf . create_config_ ( update = True ) for stfile in ( 'stagpy-paper.mplstyle' , 'stagpy-slides.mplstyle' ) : stfile_conf = config . CONFIG_DIR / stfile if not ( uptodate and stfile_conf . is_file ( ) ) : stfile_local = pathlib . Path ( __file__ ) . parent / stfile shutil . copy ( str ( stfile_local ) , str ( stfile_conf ) )
Create config files as necessary .
42,211
def load_mplstyle ( ) : plt = importlib . import_module ( 'matplotlib.pyplot' ) if conf . plot . mplstyle : for style in conf . plot . mplstyle . split ( ) : stfile = config . CONFIG_DIR / ( style + '.mplstyle' ) if stfile . is_file ( ) : style = str ( stfile ) try : plt . style . use ( style ) except OSError : print ( 'Cannot import style {}.' . format ( style ) , file = sys . stderr ) conf . plot . mplstyle = '' if conf . plot . xkcd : plt . xkcd ( )
Try to load conf . plot . mplstyle matplotlib style .
42,212
def dtime ( sdat , tstart = None , tend = None ) : tseries = sdat . tseries_between ( tstart , tend ) time = tseries [ 't' ] . values return time [ 1 : ] - time [ : - 1 ] , time [ : - 1 ]
Time increment dt .
42,213
def dt_dt ( sdat , tstart = None , tend = None ) : tseries = sdat . tseries_between ( tstart , tend ) time = tseries [ 't' ] . values temp = tseries [ 'Tmean' ] . values dtdt = ( temp [ 1 : ] - temp [ : - 1 ] ) / ( time [ 1 : ] - time [ : - 1 ] ) return dtdt , time [ : - 1 ]
Derivative of temperature .
42,214
def ebalance ( sdat , tstart = None , tend = None ) : tseries = sdat . tseries_between ( tstart , tend ) rbot , rtop = misc . get_rbounds ( sdat . steps . last ) if rbot != 0 : coefsurf = ( rtop / rbot ) ** 2 volume = rbot * ( ( rtop / rbot ) ** 3 - 1 ) / 3 else : coefsurf = 1. volume = 1. dtdt , time = dt_dt ( sdat , tstart , tend ) ftop = tseries [ 'ftop' ] . values * coefsurf fbot = tseries [ 'fbot' ] . values radio = tseries [ 'H_int' ] . values ebal = ftop [ 1 : ] - fbot [ 1 : ] + volume * ( dtdt - radio [ 1 : ] ) return ebal , time
Energy balance .
42,215
def mobility ( sdat , tstart = None , tend = None ) : tseries = sdat . tseries_between ( tstart , tend ) steps = sdat . steps [ tseries . index [ 0 ] : tseries . index [ - 1 ] ] time = [ ] mob = [ ] for step in steps . filter ( rprof = True ) : time . append ( step . timeinfo [ 't' ] ) mob . append ( step . rprof . iloc [ - 1 ] . loc [ 'vrms' ] / step . timeinfo [ 'vrms' ] ) return np . array ( mob ) , np . array ( time )
Plates mobility .
42,216
def r_edges ( step ) : rbot , rtop = misc . get_rbounds ( step ) centers = step . rprof . loc [ : , 'r' ] . values + rbot edges = ( centers [ : - 1 ] + centers [ 1 : ] ) / 2 edges = np . insert ( edges , 0 , rbot ) edges = np . append ( edges , rtop ) return edges , edges
Cell border .
42,217
def _scale_prof ( step , rprof , rad = None ) : rbot , rtop = misc . get_rbounds ( step ) if rbot == 0 : return rprof if rad is None : rad = step . rprof [ 'r' ] . values + rbot return rprof * ( 2 * rad / ( rtop + rbot ) ) ** 2
Scale profile to take sphericity into account
42,218
def diffs_prof ( step ) : diff , rad = diff_prof ( step ) return _scale_prof ( step , diff , rad ) , rad
Scaled diffusion .
42,219
def energy_prof ( step ) : diff , rad = diffs_prof ( step ) adv , _ = advts_prof ( step ) return ( diff + np . append ( adv , 0 ) ) , rad
Energy flux .
42,220
def advth ( step ) : rbot , rtop = misc . get_rbounds ( step ) rmean = 0.5 * ( rbot + rtop ) rad = step . rprof [ 'r' ] . values + rbot radio = step . timeinfo [ 'H_int' ] if rbot != 0 : th_adv = - ( rtop ** 3 - rad ** 3 ) / rmean ** 2 / 3 else : th_adv = rad - rtop th_adv *= radio th_adv += step . timeinfo [ 'Nutop' ] return th_adv , None
Theoretical advection .
42,221
def init_c_overturn ( step ) : rbot , rtop = misc . get_rbounds ( step ) xieut = step . sdat . par [ 'tracersin' ] [ 'fe_eut' ] k_fe = step . sdat . par [ 'tracersin' ] [ 'k_fe' ] xi0l = step . sdat . par [ 'tracersin' ] [ 'fe_cont' ] xi0s = k_fe * xi0l xired = xi0l / xieut rsup = ( rtop ** 3 - xired ** ( 1 / ( 1 - k_fe ) ) * ( rtop ** 3 - rbot ** 3 ) ) ** ( 1 / 3 ) def initprof ( rpos ) : if rpos < rsup : return xi0s * ( ( rtop ** 3 - rbot ** 3 ) / ( rtop ** 3 - rpos ** 3 ) ) ** ( 1 - k_fe ) return xieut rad = np . linspace ( rbot , rtop , 500 ) initprof = np . vectorize ( initprof ) return initprof ( rad ) , rad
Initial concentration .
42,222
def c_overturned ( step ) : rbot , rtop = misc . get_rbounds ( step ) cinit , rad = init_c_overturn ( step ) radf = ( rtop ** 3 + rbot ** 3 - rad ** 3 ) ** ( 1 / 3 ) return cinit , radf
Theoretical overturned concentration .
42,223
def last ( self ) : if self . _last is UNDETERMINED : self . _last = self . sdat . tseries . index [ - 1 ] return self [ self . _last ]
Last time step available .
42,224
def filter ( self , ** filters ) : for flt , val in self . _flt . items ( ) : self . _flt [ flt ] = filters . pop ( flt , val ) if filters : raise error . UnknownFiltersError ( filters . keys ( ) ) return self
Update filters with provided arguments .
42,225
def hdf5 ( self ) : if self . _rundir [ 'hdf5' ] is UNDETERMINED : h5_folder = self . path / self . par [ 'ioin' ] [ 'hdf5_output_folder' ] if ( h5_folder / 'Data.xmf' ) . is_file ( ) : self . _rundir [ 'hdf5' ] = h5_folder else : self . _rundir [ 'hdf5' ] = None return self . _rundir [ 'hdf5' ]
Path of output hdf5 folder if relevant None otherwise .
42,226
def tseries ( self ) : if self . _stagdat [ 'tseries' ] is UNDETERMINED : timefile = self . filename ( 'TimeSeries.h5' ) self . _stagdat [ 'tseries' ] = stagyyparsers . time_series_h5 ( timefile , list ( phyvars . TIME . keys ( ) ) ) if self . _stagdat [ 'tseries' ] is not None : return self . _stagdat [ 'tseries' ] timefile = self . filename ( 'time.dat' ) if self . hdf5 and not timefile . is_file ( ) : timefile = self . filename ( 'time.dat' , force_legacy = True ) self . _stagdat [ 'tseries' ] = stagyyparsers . time_series ( timefile , list ( phyvars . TIME . keys ( ) ) ) return self . _stagdat [ 'tseries' ]
Time series data .
42,227
def files ( self ) : if self . _rundir [ 'ls' ] is UNDETERMINED : out_stem = pathlib . Path ( self . par [ 'ioin' ] [ 'output_file_stem' ] + '_' ) out_dir = self . path / out_stem . parent if out_dir . is_dir ( ) : self . _rundir [ 'ls' ] = set ( out_dir . iterdir ( ) ) else : self . _rundir [ 'ls' ] = set ( ) return self . _rundir [ 'ls' ]
Set of found binary files output by StagYY .
42,228
def walk ( self ) : if conf . core . snapshots is not None : return self . snaps [ conf . core . snapshots ] elif conf . core . timesteps is not None : return self . steps [ conf . core . timesteps ] return self . snaps [ - 1 : ]
Return view on configured steps slice .
42,229
def scale ( self , data , unit ) : if self . par [ 'switches' ] [ 'dimensional_units' ] or not conf . scaling . dimensional or unit == '1' : return data , '' scaling = phyvars . SCALES [ unit ] ( self . scales ) factor = conf . scaling . factors . get ( unit , ' ' ) if conf . scaling . time_in_y and unit == 's' : scaling /= conf . scaling . yearins unit = 'yr' elif conf . scaling . vel_in_cmpy and unit == 'm/s' : scaling *= 100 * conf . scaling . yearins unit = 'cm/y' if factor in phyvars . PREFIXES : scaling *= 10 ** ( - 3 * ( phyvars . PREFIXES . index ( factor ) + 1 ) ) unit = factor + unit return data * scaling , unit
Scales quantity to obtain dimensionful quantity .
42,230
def tseries_between ( self , tstart = None , tend = None ) : if self . tseries is None : return None ndat = self . tseries . shape [ 0 ] if tstart is None : istart = 0 else : igm = 0 igp = ndat - 1 while igp - igm > 1 : istart = igm + ( igp - igm ) // 2 if self . tseries . iloc [ istart ] [ 't' ] >= tstart : igp = istart else : igm = istart istart = igp if tend is None : iend = None else : igm = 0 igp = ndat - 1 while igp - igm > 1 : iend = igm + ( igp - igm ) // 2 if self . tseries . iloc [ iend ] [ 't' ] > tend : igp = iend else : igm = iend iend = igm + 1 return self . tseries . iloc [ istart : iend ]
Return time series data between requested times .
42,231
def filename ( self , fname , timestep = None , suffix = '' , force_legacy = False ) : if timestep is not None : fname += '{:05d}' . format ( timestep ) fname += suffix if not force_legacy and self . hdf5 : fpath = self . hdf5 / fname else : fpath = self . par [ 'ioin' ] [ 'output_file_stem' ] + '_' + fname fpath = self . path / fpath return fpath
Return name of StagYY output file .
42,232
def binfiles_set ( self , isnap ) : possible_files = set ( self . filename ( fstem , isnap , force_legacy = True ) for fstem in phyvars . FIELD_FILES ) return possible_files & self . files
Set of existing binary files at a given snap .
42,233
def _tidy_names ( names , nnames , extra_names = None ) : if len ( names ) < nnames and extra_names is not None : names . extend ( extra_names ) names . extend ( range ( nnames - len ( names ) ) ) del names [ nnames : ]
Truncate or extend names so that its len is nnames .
42,234
def time_series ( timefile , colnames ) : if not timefile . is_file ( ) : return None data = pd . read_csv ( timefile , delim_whitespace = True , dtype = str , header = None , skiprows = 1 , index_col = 0 , engine = 'c' , memory_map = True , error_bad_lines = False , warn_bad_lines = False ) data = data . apply ( pd . to_numeric , raw = True , errors = 'coerce' ) rows_to_del = [ ] irow = len ( data ) - 1 while irow > 0 : iprev = irow - 1 while iprev >= 0 and data . index [ irow ] <= data . index [ iprev ] : rows_to_del . append ( iprev ) iprev -= 1 irow = iprev if rows_to_del : rows_to_keep = set ( range ( len ( data ) ) ) - set ( rows_to_del ) data = data . take ( list ( rows_to_keep ) , convert = False ) ncols = data . shape [ 1 ] _tidy_names ( colnames , ncols ) data . columns = colnames return data
Read temporal series text file .
42,235
def time_series_h5 ( timefile , colnames ) : if not timefile . is_file ( ) : return None with h5py . File ( timefile , 'r' ) as h5f : dset = h5f [ 'tseries' ] _ , ncols = dset . shape ncols -= 1 h5names = map ( bytes . decode , h5f [ 'names' ] [ len ( colnames ) + 1 : ] ) _tidy_names ( colnames , ncols , h5names ) data = dset [ ( ) ] return pd . DataFrame ( data [ : , 1 : ] , index = np . int_ ( data [ : , 0 ] ) , columns = colnames )
Read temporal series HDF5 file .
42,236
def _extract_rsnap_isteps ( rproffile ) : step_regex = re . compile ( r'^\*+step:\s*(\d+) ; time =\s*(\S+)' ) isteps = [ ] rows_to_del = set ( ) line = ' ' with rproffile . open ( ) as stream : while line [ 0 ] != '*' : line = stream . readline ( ) match = step_regex . match ( line ) istep = int ( match . group ( 1 ) ) time = float ( match . group ( 2 ) ) nlines = 0 iline = 0 for line in stream : if line [ 0 ] == '*' : isteps . append ( ( istep , time , nlines ) ) match = step_regex . match ( line ) istep = int ( match . group ( 1 ) ) time = float ( match . group ( 2 ) ) nlines = 0 nrows_to_del = 0 while isteps and istep <= isteps [ - 1 ] [ 0 ] : nrows_to_del += isteps . pop ( ) [ - 1 ] rows_to_del = rows_to_del . union ( range ( iline - nrows_to_del , iline ) ) else : nlines += 1 iline += 1 isteps . append ( ( istep , time , nlines ) ) return isteps , rows_to_del
Extract istep and compute list of rows to delete
42,237
def _readbin ( fid , fmt = 'i' , nwords = 1 , file64 = False , unpack = True ) : if fmt in 'if' : fmt += '8' if file64 else '4' elts = np . fromfile ( fid , fmt , nwords ) if unpack and len ( elts ) == 1 : elts = elts [ 0 ] return elts
Read n words of 4 or 8 bytes with fmt format .
42,238
def tracers ( tracersfile ) : if not tracersfile . is_file ( ) : return None tra = { } with tracersfile . open ( 'rb' ) as fid : readbin = partial ( _readbin , fid ) magic = readbin ( ) if magic > 8000 : magic -= 8000 readbin ( ) readbin = partial ( readbin , file64 = True ) if magic < 100 : raise ParsingError ( tracersfile , 'magic > 100 expected to get tracervar info' ) nblk = magic % 100 readbin ( 'f' , 2 ) readbin ( ) readbin ( 'f' ) ninfo = readbin ( ) ntra = readbin ( nwords = nblk , unpack = False ) readbin ( 'f' ) curv = readbin ( ) if curv : readbin ( 'f' ) infos = [ ] for _ in range ( ninfo ) : infos . append ( b'' . join ( readbin ( 'b' , 16 ) ) . strip ( ) . decode ( ) ) tra [ infos [ - 1 ] ] = [ ] if magic > 200 : ntrace_elt = readbin ( ) if ntrace_elt > 0 : readbin ( 'f' , ntrace_elt ) for ntrab in ntra : data = readbin ( 'f' , ntrab * ninfo ) for idx , info in enumerate ( infos ) : tra [ info ] . append ( data [ idx : : ninfo ] ) return tra
Extract tracers data .
42,239
def _read_group_h5 ( filename , groupname ) : with h5py . File ( filename , 'r' ) as h5f : data = h5f [ groupname ] [ ( ) ] return data
Return group content .
42,240
def _make_3d ( field , twod ) : shp = list ( field . shape ) if twod and 'X' in twod : shp . insert ( 1 , 1 ) elif twod : shp . insert ( 0 , 1 ) return field . reshape ( shp )
Add a dimension to field if necessary .
42,241
def _ncores ( meshes , twod ) : nnpb = len ( meshes ) nns = [ 1 , 1 , 1 ] if twod is None or 'X' in twod : while ( nnpb > 1 and meshes [ nns [ 0 ] ] [ 'X' ] [ 0 , 0 , 0 ] == meshes [ nns [ 0 ] - 1 ] [ 'X' ] [ - 1 , 0 , 0 ] ) : nns [ 0 ] += 1 nnpb -= 1 cpu = lambda icy : icy * nns [ 0 ] if twod is None or 'Y' in twod : while ( nnpb > 1 and meshes [ cpu ( nns [ 1 ] ) ] [ 'Y' ] [ 0 , 0 , 0 ] == meshes [ cpu ( nns [ 1 ] - 1 ) ] [ 'Y' ] [ 0 , - 1 , 0 ] ) : nns [ 1 ] += 1 nnpb -= nns [ 0 ] cpu = lambda icz : icz * nns [ 0 ] * nns [ 1 ] while ( nnpb > 1 and meshes [ cpu ( nns [ 2 ] ) ] [ 'Z' ] [ 0 , 0 , 0 ] == meshes [ cpu ( nns [ 2 ] - 1 ) ] [ 'Z' ] [ 0 , 0 , - 1 ] ) : nns [ 2 ] += 1 nnpb -= nns [ 0 ] * nns [ 1 ] return np . array ( nns )
Compute number of nodes in each direction .
42,242
def _conglomerate_meshes ( meshin , header ) : meshout = { } npc = header [ 'nts' ] // header [ 'ncs' ] shp = [ val + 1 if val != 1 else 1 for val in header [ 'nts' ] ] x_p = int ( shp [ 0 ] != 1 ) y_p = int ( shp [ 1 ] != 1 ) for coord in meshin [ 0 ] : meshout [ coord ] = np . zeros ( shp ) for icore in range ( np . prod ( header [ 'ncs' ] ) ) : ifs = [ icore // np . prod ( header [ 'ncs' ] [ : i ] ) % header [ 'ncs' ] [ i ] * npc [ i ] for i in range ( 3 ) ] for coord , mesh in meshin [ icore ] . items ( ) : meshout [ coord ] [ ifs [ 0 ] : ifs [ 0 ] + npc [ 0 ] + x_p , ifs [ 1 ] : ifs [ 1 ] + npc [ 1 ] + y_p , ifs [ 2 ] : ifs [ 2 ] + npc [ 2 ] + 1 ] = mesh return meshout
Conglomerate meshes from several cores into one .
42,243
def _get_field ( xdmf_file , data_item ) : shp = _get_dim ( data_item ) h5file , group = data_item . text . strip ( ) . split ( ':/' , 1 ) icore = int ( group . split ( '_' ) [ - 2 ] ) - 1 fld = _read_group_h5 ( xdmf_file . parent / h5file , group ) . reshape ( shp ) return icore , fld
Extract field from data item .
42,244
def _maybe_get ( elt , item , info , conversion = None ) : maybe_item = elt . find ( item ) if maybe_item is not None : maybe_item = maybe_item . get ( info ) if conversion is not None : maybe_item = conversion ( maybe_item ) return maybe_item
Extract and convert info if item is present .
42,245
def read_geom_h5 ( xdmf_file , snapshot ) : header = { } xdmf_root = xmlET . parse ( str ( xdmf_file ) ) . getroot ( ) if snapshot is None : return None , xdmf_root elt_snap = xdmf_root [ 0 ] [ 0 ] [ snapshot ] header [ 'ti_ad' ] = float ( elt_snap . find ( 'Time' ) . get ( 'Value' ) ) header [ 'mo_lambda' ] = _maybe_get ( elt_snap , 'mo_lambda' , 'Value' , float ) header [ 'mo_thick_sol' ] = _maybe_get ( elt_snap , 'mo_thick_sol' , 'Value' , float ) header [ 'ntb' ] = 1 coord_h5 = [ ] coord_shape = [ ] twod = None for elt_subdomain in elt_snap . findall ( 'Grid' ) : if elt_subdomain . get ( 'Name' ) . startswith ( 'meshYang' ) : header [ 'ntb' ] = 2 break elt_geom = elt_subdomain . find ( 'Geometry' ) if elt_geom . get ( 'Type' ) == 'X_Y' and twod is None : twod = '' for data_item in elt_geom . findall ( 'DataItem' ) : coord = data_item . text . strip ( ) [ - 1 ] if coord in 'XYZ' : twod += coord data_item = elt_geom . find ( 'DataItem' ) coord_shape . append ( _get_dim ( data_item ) ) coord_h5 . append ( xdmf_file . parent / data_item . text . strip ( ) . split ( ':/' , 1 ) [ 0 ] ) _read_coord_h5 ( coord_h5 , coord_shape , header , twod ) return header , xdmf_root
Extract geometry information from hdf5 files .
42,246
def _to_spherical ( flds , header ) : cth = np . cos ( header [ 't_mesh' ] [ : , : , : - 1 ] ) sth = np . sin ( header [ 't_mesh' ] [ : , : , : - 1 ] ) cph = np . cos ( header [ 'p_mesh' ] [ : , : , : - 1 ] ) sph = np . sin ( header [ 'p_mesh' ] [ : , : , : - 1 ] ) fout = np . copy ( flds ) fout [ 0 ] = cth * cph * flds [ 0 ] + cth * sph * flds [ 1 ] - sth * flds [ 2 ] fout [ 1 ] = sph * flds [ 0 ] - cph * flds [ 1 ] fout [ 2 ] = sth * cph * flds [ 0 ] + sth * sph * flds [ 1 ] + cth * flds [ 2 ] return fout
Convert vector field to spherical .
42,247
def _flds_shape ( fieldname , header ) : shp = list ( header [ 'nts' ] ) shp . append ( header [ 'ntb' ] ) if fieldname == 'Velocity' : shp . insert ( 0 , 3 ) header [ 'xp' ] = int ( header [ 'nts' ] [ 0 ] != 1 ) shp [ 1 ] += header [ 'xp' ] header [ 'yp' ] = int ( header [ 'nts' ] [ 1 ] != 1 ) shp [ 2 ] += header [ 'yp' ] header [ 'zp' ] = 1 header [ 'xyp' ] = 1 else : shp . insert ( 0 , 1 ) header [ 'xp' ] = 0 header [ 'yp' ] = 0 header [ 'zp' ] = 0 header [ 'xyp' ] = 0 return shp
Compute shape of flds variable .
42,248
def _post_read_flds ( flds , header ) : if flds . shape [ 0 ] >= 3 and header [ 'rcmb' ] > 0 : header [ 'p_mesh' ] = np . roll ( np . arctan2 ( header [ 'y_mesh' ] , header [ 'x_mesh' ] ) , - 1 , 1 ) for ibk in range ( header [ 'ntb' ] ) : flds [ ... , ibk ] = _to_spherical ( flds [ ... , ibk ] , header ) header [ 'p_mesh' ] = np . roll ( np . arctan2 ( header [ 'y_mesh' ] , - header [ 'x_mesh' ] ) + np . pi , - 1 , 1 ) return flds
Process flds to handle sphericity .
42,249
def read_field_h5 ( xdmf_file , fieldname , snapshot , header = None ) : if header is None : header , xdmf_root = read_geom_h5 ( xdmf_file , snapshot ) else : xdmf_root = xmlET . parse ( str ( xdmf_file ) ) . getroot ( ) npc = header [ 'nts' ] // header [ 'ncs' ] flds = np . zeros ( _flds_shape ( fieldname , header ) ) data_found = False for elt_subdomain in xdmf_root [ 0 ] [ 0 ] [ snapshot ] . findall ( 'Grid' ) : ibk = int ( elt_subdomain . get ( 'Name' ) . startswith ( 'meshYang' ) ) for data_attr in elt_subdomain . findall ( 'Attribute' ) : if data_attr . get ( 'Name' ) != fieldname : continue icore , fld = _get_field ( xdmf_file , data_attr . find ( 'DataItem' ) ) fld = fld . T shp = fld . shape if shp [ - 1 ] == 1 and header [ 'nts' ] [ 0 ] == 1 : fld = fld . reshape ( ( shp [ 0 ] , 1 , shp [ 1 ] , shp [ 2 ] ) ) if header [ 'rcmb' ] < 0 : fld = fld [ ( 2 , 0 , 1 ) , ... ] elif shp [ - 1 ] == 1 : fld = fld . reshape ( ( shp [ 0 ] , shp [ 1 ] , 1 , shp [ 2 ] ) ) if header [ 'rcmb' ] < 0 : fld = fld [ ( 0 , 2 , 1 ) , ... ] elif header [ 'nts' ] [ 1 ] == 1 : fld = fld . reshape ( ( 1 , shp [ 0 ] , 1 , shp [ 1 ] ) ) ifs = [ icore // np . prod ( header [ 'ncs' ] [ : i ] ) % header [ 'ncs' ] [ i ] * npc [ i ] for i in range ( 3 ) ] if header [ 'zp' ] : fld = fld [ : , : , : , : - 1 ] flds [ : , ifs [ 0 ] : ifs [ 0 ] + npc [ 0 ] + header [ 'xp' ] , ifs [ 1 ] : ifs [ 1 ] + npc [ 1 ] + header [ 'yp' ] , ifs [ 2 ] : ifs [ 2 ] + npc [ 2 ] , ibk ] = fld data_found = True flds = _post_read_flds ( flds , header ) return ( header , flds ) if data_found else None
Extract field data from hdf5 files .
42,250
def read_tracers_h5 ( xdmf_file , infoname , snapshot , position ) : xdmf_root = xmlET . parse ( str ( xdmf_file ) ) . getroot ( ) tra = { } tra [ infoname ] = [ { } , { } ] if position : for axis in 'xyz' : tra [ axis ] = [ { } , { } ] for elt_subdomain in xdmf_root [ 0 ] [ 0 ] [ snapshot ] . findall ( 'Grid' ) : ibk = int ( elt_subdomain . get ( 'Name' ) . startswith ( 'meshYang' ) ) if position : for data_attr in elt_subdomain . findall ( 'Geometry' ) : for data_item , axis in zip ( data_attr . findall ( 'DataItem' ) , 'xyz' ) : icore , data = _get_field ( xdmf_file , data_item ) tra [ axis ] [ ibk ] [ icore ] = data for data_attr in elt_subdomain . findall ( 'Attribute' ) : if data_attr . get ( 'Name' ) != infoname : continue icore , data = _get_field ( xdmf_file , data_attr . find ( 'DataItem' ) ) tra [ infoname ] [ ibk ] [ icore ] = data for info in tra : tra [ info ] = [ trab for trab in tra [ info ] if trab ] for iblk , trab in enumerate ( tra [ info ] ) : tra [ info ] [ iblk ] = np . concatenate ( [ trab [ icore ] for icore in range ( len ( trab ) ) ] ) return tra
Extract tracers data from hdf5 files .
42,251
def _init_shape ( self ) : shape = self . _par [ 'geometry' ] [ 'shape' ] . lower ( ) aspect = self . _header [ 'aspect' ] if self . rcmb is not None and self . rcmb >= 0 : self . _shape [ 'cyl' ] = self . twod_xz and ( shape == 'cylindrical' or aspect [ 0 ] >= np . pi ) self . _shape [ 'sph' ] = not self . _shape [ 'cyl' ] elif self . rcmb is None : self . _header [ 'rcmb' ] = self . _par [ 'geometry' ] [ 'r_cmb' ] if self . rcmb >= 0 : if self . twod_xz and shape == 'cylindrical' : self . _shape [ 'cyl' ] = True elif shape == 'spherical' : self . _shape [ 'sph' ] = True self . _shape [ 'axi' ] = self . cartesian and self . twod_xz and shape == 'axisymmetric'
Determine shape of geometry
42,252
def _get_raw_data ( self , name ) : filestem = '' for filestem , list_fvar in self . _files . items ( ) : if name in list_fvar : break fieldfile = self . step . sdat . filename ( filestem , self . step . isnap , force_legacy = True ) if not fieldfile . is_file ( ) : fieldfile = self . step . sdat . filename ( filestem , self . step . isnap ) parsed_data = None if fieldfile . is_file ( ) : parsed_data = stagyyparsers . fields ( fieldfile ) elif self . step . sdat . hdf5 and self . _filesh5 : for filestem , list_fvar in self . _filesh5 . items ( ) : if name in list_fvar : break parsed_data = stagyyparsers . read_field_h5 ( self . step . sdat . hdf5 / 'Data.xmf' , filestem , self . step . isnap ) return list_fvar , parsed_data
Find file holding data and return its content .
42,253
def geom ( self ) : if self . _header is UNDETERMINED : binfiles = self . step . sdat . binfiles_set ( self . step . isnap ) if binfiles : self . _header = stagyyparsers . fields ( binfiles . pop ( ) , only_header = True ) elif self . step . sdat . hdf5 : xmf = self . step . sdat . hdf5 / 'Data.xmf' self . _header , _ = stagyyparsers . read_geom_h5 ( xmf , self . step . isnap ) else : self . _header = None if self . _geom is UNDETERMINED : if self . _header is None : self . _geom = None else : self . _geom = _Geometry ( self . _header , self . step . sdat . par ) return self . _geom
Geometry information .
42,254
def timeinfo ( self ) : if self . istep not in self . sdat . tseries . index : return None return self . sdat . tseries . loc [ self . istep ]
Time series data of the time step .
42,255
def rprof ( self ) : if self . istep not in self . sdat . rprof . index . levels [ 0 ] : return None return self . sdat . rprof . loc [ self . istep ]
Radial profiles data of the time step .
42,256
def isnap ( self ) : if self . _isnap is UNDETERMINED : istep = None isnap = - 1 while ( istep is None or istep < self . istep ) and isnap < 99999 : isnap += 1 istep = self . sdat . snaps [ isnap ] . istep self . sdat . snaps . bind ( isnap , istep ) if istep != self . istep : self . _isnap = None return self . _isnap
Snapshot index corresponding to time step .
42,257
def from_json_dict ( dct , validate = True ) : if validate : validate_schema_dict ( dct ) version = dct [ 'version' ] if version == 1 : dct = convert_v1_to_v2 ( dct ) if validate : validate_schema_dict ( dct ) elif version != 2 : msg = ( 'Schema version {} is not supported. ' 'Consider updating clkhash.' ) . format ( version ) raise SchemaError ( msg ) clk_config = dct [ 'clkConfig' ] l = clk_config [ 'l' ] xor_folds = clk_config . get ( 'xor_folds' , 0 ) kdf = clk_config [ 'kdf' ] kdf_type = kdf [ 'type' ] kdf_hash = kdf . get ( 'hash' , 'SHA256' ) kdf_info_string = kdf . get ( 'info' ) kdf_info = ( base64 . b64decode ( kdf_info_string ) if kdf_info_string is not None else None ) kdf_salt_string = kdf . get ( 'salt' ) kdf_salt = ( base64 . b64decode ( kdf_salt_string ) if kdf_salt_string is not None else None ) kdf_key_size = kdf . get ( 'keySize' , DEFAULT_KDF_KEY_SIZE ) fields = list ( map ( spec_from_json_dict , dct [ 'features' ] ) ) return Schema ( fields , l , xor_folds , kdf_type , kdf_hash , kdf_info , kdf_salt , kdf_key_size )
Create a Schema for v1 or v2 according to dct
42,258
def _get_master_schema ( version ) : try : file_name = MASTER_SCHEMA_FILE_NAMES [ version ] except ( TypeError , KeyError ) as e : msg = ( 'Schema version {} is not supported. ' 'Consider updating clkhash.' ) . format ( version ) raise_from ( SchemaError ( msg ) , e ) try : schema_bytes = pkgutil . get_data ( 'clkhash' , 'schemas/{}' . format ( file_name ) ) except IOError as e : msg = ( 'The master schema could not be found. The schema cannot be ' 'validated. Please file a bug report.' ) raise_from ( MasterSchemaError ( msg ) , e ) if schema_bytes is None : msg = ( 'The master schema could not be loaded. The schema cannot be ' 'validated. Please file a bug report.' ) raise MasterSchemaError ( msg ) return schema_bytes
Loads the master schema of given version as bytes .
42,259
def validate_schema_dict ( schema ) : if not isinstance ( schema , dict ) : msg = ( 'The top level of the schema file is a {}, whereas a dict is ' 'expected.' . format ( type ( schema ) . __name__ ) ) raise SchemaError ( msg ) if 'version' in schema : version = schema [ 'version' ] else : raise SchemaError ( 'A format version is expected in the schema.' ) master_schema_bytes = _get_master_schema ( version ) try : master_schema = json . loads ( master_schema_bytes . decode ( 'utf-8' ) ) except ValueError as e : msg = ( 'The master schema is not a valid JSON file. The schema cannot ' 'be validated. Please file a bug report.' ) raise_from ( MasterSchemaError ( msg ) , e ) try : jsonschema . validate ( schema , master_schema ) except jsonschema . exceptions . ValidationError as e : raise_from ( SchemaError ( 'The schema is not valid.' ) , e ) except jsonschema . exceptions . SchemaError as e : msg = ( 'The master schema is not valid. The schema cannot be ' 'validated. Please file a bug report.' ) raise_from ( MasterSchemaError ( msg ) , e )
Validate the schema .
42,260
def compute_hash_speed ( num , quiet = False ) : namelist = NameList ( num ) os_fd , tmpfile_name = tempfile . mkstemp ( text = True ) schema = NameList . SCHEMA header_row = ',' . join ( [ f . identifier for f in schema . fields ] ) with open ( tmpfile_name , 'wt' ) as f : f . write ( header_row ) f . write ( '\n' ) for person in namelist . names : print ( ',' . join ( [ str ( field ) for field in person ] ) , file = f ) with open ( tmpfile_name , 'rt' ) as f : start = timer ( ) generate_clk_from_csv ( f , ( 'key1' , 'key2' ) , schema , progress_bar = not quiet ) end = timer ( ) os . close ( os_fd ) os . remove ( tmpfile_name ) elapsed_time = end - start if not quiet : print ( "{:6d} hashes in {:.6f} seconds. {:.2f} KH/s" . format ( num , elapsed_time , num / ( 1000 * elapsed_time ) ) ) return num / elapsed_time
Hash time .
42,261
def hash ( pii_csv , keys , schema , clk_json , quiet , no_header , check_header , validate ) : schema_object = clkhash . schema . from_json_file ( schema_file = schema ) header = True if not check_header : header = 'ignore' if no_header : header = False try : clk_data = clk . generate_clk_from_csv ( pii_csv , keys , schema_object , validate = validate , header = header , progress_bar = not quiet ) except ( validate_data . EntryError , validate_data . FormatError ) as e : msg , = e . args log ( msg ) log ( 'Hashing failed.' ) else : json . dump ( { 'clks' : clk_data } , clk_json ) if hasattr ( clk_json , 'name' ) : log ( "CLK data written to {}" . format ( clk_json . name ) )
Process data to create CLKs
42,262
def status ( server , output , verbose ) : if verbose : log ( "Connecting to Entity Matching Server: {}" . format ( server ) ) service_status = server_get_status ( server ) if verbose : log ( "Status: {}" . format ( service_status [ 'status' ] ) ) print ( json . dumps ( service_status ) , file = output )
Connect to an entity matching server and check the service status .
42,263
def create_project ( type , schema , server , name , output , verbose ) : if verbose : log ( "Entity Matching Server: {}" . format ( server ) ) if schema is not None : schema_json = json . load ( schema ) clkhash . schema . validate_schema_dict ( schema_json ) else : raise ValueError ( "Schema must be provided when creating new linkage project" ) name = name if name is not None else '' try : project_creation_reply = project_create ( server , schema_json , type , name ) except ServiceError as e : log ( "Unexpected response - {}" . format ( e . status_code ) ) log ( e . text ) raise SystemExit else : log ( "Project created" ) json . dump ( project_creation_reply , output )
Create a new project on an entity matching server .
42,264
def create ( server , name , project , apikey , output , threshold , verbose ) : if verbose : log ( "Entity Matching Server: {}" . format ( server ) ) if threshold is None : raise ValueError ( "Please provide a threshold" ) try : response = run_create ( server , project , apikey , threshold , name ) except ServiceError as e : log ( "Unexpected response with status {}" . format ( e . status_code ) ) log ( e . text ) else : json . dump ( response , output )
Create a new run on an entity matching server .
42,265
def upload ( clk_json , project , apikey , server , output , verbose ) : if verbose : log ( "Uploading CLK data from {}" . format ( clk_json . name ) ) log ( "To Entity Matching Server: {}" . format ( server ) ) log ( "Project ID: {}" . format ( project ) ) log ( "Uploading CLK data to the server" ) response = project_upload_clks ( server , project , apikey , clk_json ) if verbose : log ( response ) json . dump ( response , output )
Upload CLK data to entity matching server .
42,266
def results ( project , apikey , run , watch , server , output ) : status = run_get_status ( server , project , run , apikey ) log ( format_run_status ( status ) ) if watch : for status in watch_run_status ( server , project , run , apikey , 24 * 60 * 60 ) : log ( format_run_status ( status ) ) if status [ 'state' ] == 'completed' : log ( "Downloading result" ) response = run_get_result_text ( server , project , run , apikey ) log ( "Received result" ) print ( response , file = output ) elif status [ 'state' ] == 'error' : log ( "There was an error" ) error_result = run_get_result_text ( server , project , run , apikey ) print ( error_result , file = output ) else : log ( "No result yet" )
Check to see if results are available for a particular mapping and if so download .
42,267
def generate ( size , output , schema ) : pii_data = randomnames . NameList ( size ) if schema is not None : raise NotImplementedError randomnames . save_csv ( pii_data . names , [ f . identifier for f in pii_data . SCHEMA . fields ] , output )
Generate fake PII data for testing
42,268
def generate_default_schema ( output ) : original_path = os . path . join ( os . path . dirname ( __file__ ) , 'data' , 'randomnames-schema.json' ) shutil . copyfile ( original_path , output )
Get default schema for fake PII
42,269
def docx_extraction ( docx , cloud = None , batch = False , api_key = None , version = None , ** kwargs ) : docx = docx_preprocess ( docx , batch = batch ) url_params = { "batch" : batch , "api_key" : api_key , "version" : version } results = api_handler ( docx , cloud = cloud , api = "docxextraction" , url_params = url_params , ** kwargs ) return results
Given a . docx file returns the raw text associated with the given . docx file . The . docx file may be provided as base64 encoded data or as a filepath .
42,270
def docx_preprocess ( docx , batch = False ) : if batch : return [ docx_preprocess ( doc , batch = False ) for doc in docx ] if os . path . isfile ( docx ) : return b64encode ( open ( docx , 'rb' ) . read ( ) ) else : return docx
Load docx files from local filepath if not already b64 encoded
42,271
def LAMBDA ( self , node ) : self . handleNode , self . deferHandleNode = self . deferHandleNode , self . handleNode super ( ) . LAMBDA ( node ) self . handleNode , self . deferHandleNode = self . deferHandleNode , self . handleNode
This is likely very brittle currently works for pyflakes 1 . 3 . 0 .
42,272
def double_hash_encode_ngrams ( ngrams , keys , ks , l , encoding ) : key_sha1 , key_md5 = keys bf = bitarray ( l ) bf . setall ( False ) for m , k in zip ( ngrams , ks ) : sha1hm = int ( hmac . new ( key_sha1 , m . encode ( encoding = encoding ) , sha1 ) . hexdigest ( ) , 16 ) % l md5hm = int ( hmac . new ( key_md5 , m . encode ( encoding = encoding ) , md5 ) . hexdigest ( ) , 16 ) % l for i in range ( k ) : gi = ( sha1hm + i * md5hm ) % l bf [ gi ] = 1 return bf
Computes the double hash encoding of the ngrams with the given keys .
42,273
def double_hash_encode_ngrams_non_singular ( ngrams , keys , ks , l , encoding ) : key_sha1 , key_md5 = keys bf = bitarray ( l ) bf . setall ( False ) for m , k in zip ( ngrams , ks ) : m_bytes = m . encode ( encoding = encoding ) sha1hm_bytes = hmac . new ( key_sha1 , m_bytes , sha1 ) . digest ( ) md5hm_bytes = hmac . new ( key_md5 , m_bytes , md5 ) . digest ( ) sha1hm = int_from_bytes ( sha1hm_bytes , 'big' ) % l md5hm = int_from_bytes ( md5hm_bytes , 'big' ) % l i = 0 while md5hm == 0 : md5hm_bytes = hmac . new ( key_md5 , m_bytes + chr ( i ) . encode ( ) , md5 ) . digest ( ) md5hm = int_from_bytes ( md5hm_bytes , 'big' ) % l i += 1 for i in range ( k ) : gi = ( sha1hm + i * md5hm ) % l bf [ gi ] = True return bf
computes the double hash encoding of the n - grams with the given keys .
42,274
def blake_encode_ngrams ( ngrams , keys , ks , l , encoding ) : key , = keys log_l = int ( math . log ( l , 2 ) ) if not 2 ** log_l == l : raise ValueError ( 'parameter "l" has to be a power of two for the BLAKE2 encoding, ' 'but was: {}' . format ( l ) ) bf = bitarray ( l ) bf . setall ( False ) for m , k in zip ( ngrams , ks ) : random_shorts = [ ] num_macs = ( k + 31 ) // 32 for i in range ( num_macs ) : hash_bytes = blake2b ( m . encode ( encoding = encoding ) , key = key , salt = str ( i ) . encode ( ) ) . digest ( ) random_shorts . extend ( struct . unpack ( '32H' , hash_bytes ) ) for i in range ( k ) : idx = random_shorts [ i ] % l bf [ idx ] = 1 return bf
Computes the encoding of the ngrams using the BLAKE2 hash function .
42,275
def fold_xor ( bloomfilter , folds ) : if len ( bloomfilter ) % 2 ** folds != 0 : msg = ( 'The length of the bloom filter is {length}. It is not ' 'divisible by 2 ** {folds}, so it cannot be folded {folds} ' 'times.' . format ( length = len ( bloomfilter ) , folds = folds ) ) raise ValueError ( msg ) for _ in range ( folds ) : bf1 = bloomfilter [ : len ( bloomfilter ) // 2 ] bf2 = bloomfilter [ len ( bloomfilter ) // 2 : ] bloomfilter = bf1 ^ bf2 return bloomfilter
Performs XOR folding on a Bloom filter .
42,276
def crypto_bloom_filter ( record , tokenizers , schema , keys ) : hash_l = schema . l * 2 ** schema . xor_folds bloomfilter = bitarray ( hash_l ) bloomfilter . setall ( False ) for ( entry , tokenize , field , key ) in zip ( record , tokenizers , schema . fields , keys ) : fhp = field . hashing_properties if fhp : ngrams = list ( tokenize ( field . format_value ( entry ) ) ) hash_function = hashing_function_from_properties ( fhp ) bloomfilter |= hash_function ( ngrams , key , fhp . ks ( len ( ngrams ) ) , hash_l , fhp . encoding ) c1 = bloomfilter . count ( ) bloomfilter = fold_xor ( bloomfilter , schema . xor_folds ) c2 = bloomfilter . count ( ) return bloomfilter , record [ 0 ] , bloomfilter . count ( )
Computes the composite Bloom filter encoding of a record .
42,277
def re_compile_full ( pattern , flags = 0 ) : assert type ( pattern ) is str or type ( pattern ) is unicode return re . compile ( r'(?:{})\Z' . format ( pattern ) , flags = flags )
Create compiled regular expression such that it matches the entire string . Calling re . match on the output of this function is equivalent to calling re . fullmatch on its input .
42,278
def _p2_unicode_reader ( unicode_csv_data , dialect = csv . excel , ** kwargs ) : utf8_csv_data = _utf_8_encoder ( unicode_csv_data ) csv_reader = csv . reader ( utf8_csv_data , dialect = dialect , ** kwargs ) return ( [ unicode ( cell , 'utf-8' ) for cell in row ] for row in csv_reader )
Encode Unicode as UTF - 8 and parse as CSV .
42,279
def get_list_dimensions ( _list ) : if isinstance ( _list , list ) or isinstance ( _list , tuple ) : return [ len ( _list ) ] + get_list_dimensions ( _list [ 0 ] ) return [ ]
Takes a nested list and returns the size of each dimension followed by the element type in the list
42,280
def get_element_type ( _list , dimens ) : elem = _list for _ in range ( len ( dimens ) ) : elem = elem [ 0 ] return type ( elem )
Given the dimensions of a nested list and the list returns the type of the elements in the inner list .
42,281
def image_recognition ( image , cloud = None , batch = False , api_key = None , version = None , ** kwargs ) : image = data_preprocess ( image , batch = batch , size = 144 , min_axis = True ) url_params = { "batch" : batch , "api_key" : api_key , "version" : version } return api_handler ( image , cloud = cloud , api = "imagerecognition" , url_params = url_params , ** kwargs )
Given an input image returns a dictionary of image classifications with associated scores
42,282
def spec_from_json_dict ( json_dict ) : if 'ignored' in json_dict : return Ignore ( json_dict [ 'identifier' ] ) type_str = json_dict [ 'format' ] [ 'type' ] spec_type = cast ( FieldSpec , FIELD_TYPE_MAP [ type_str ] ) return spec_type . from_json_dict ( json_dict )
Turns a dictionary into the appropriate object .
42,283
def replace_missing_value ( self , str_in ) : if self . missing_value is None : return str_in elif self . missing_value . sentinel == str_in : return self . missing_value . replace_with else : return str_in
returns str_in if it is not equals to the sentinel as defined in the missingValue section of the schema . Else it will return the replaceWith value .
42,284
def from_json_dict ( cls , json_dict ) : result = cast ( StringSpec , super ( ) . from_json_dict ( json_dict ) ) format_ = json_dict [ 'format' ] if 'encoding' in format_ and result . hashing_properties : result . hashing_properties . encoding = format_ [ 'encoding' ] if 'pattern' in format_ : pattern = format_ [ 'pattern' ] try : result . regex = re_compile_full ( pattern ) except ( SyntaxError , re . error ) as e : msg = "Invalid regular expression '{}.'" . format ( pattern ) e_new = InvalidSchemaError ( msg ) raise_from ( e_new , e ) result . regex_based = True else : result . case = format_ . get ( 'case' , StringSpec . _DEFAULT_CASE ) result . min_length = format_ . get ( 'minLength' ) result . max_length = format_ . get ( 'maxLength' ) result . regex_based = False return result
Make a StringSpec object from a dictionary containing its properties .
42,285
def from_json_dict ( cls , json_dict ) : result = cast ( IntegerSpec , super ( ) . from_json_dict ( json_dict ) ) format_ = json_dict [ 'format' ] result . minimum = format_ . get ( 'minimum' ) result . maximum = format_ . get ( 'maximum' ) return result
Make a IntegerSpec object from a dictionary containing its properties .
42,286
def from_json_dict ( cls , json_dict ) : result = cast ( DateSpec , super ( ) . from_json_dict ( json_dict ) ) format_ = json_dict [ 'format' ] result . format = format_ [ 'format' ] return result
Make a DateSpec object from a dictionary containing its properties .
42,287
def from_json_dict ( cls , json_dict ) : result = cast ( EnumSpec , super ( ) . from_json_dict ( json_dict ) ) format_ = json_dict [ 'format' ] result . values = set ( format_ [ 'values' ] ) return result
Make a EnumSpec object from a dictionary containing its properties .
42,288
def standardize_input_data ( data ) : if type ( data ) == bytes : data = data . decode ( 'utf-8' ) if type ( data ) == list : data = [ el . decode ( 'utf-8' ) if type ( data ) == bytes else el for el in data ] return data
Ensure utf - 8 encoded strings are passed to the indico API
42,289
def api_handler ( input_data , cloud , api , url_params = None , batch_size = None , ** kwargs ) : url_params = url_params or { } input_data = standardize_input_data ( input_data ) cloud = cloud or config . cloud host = "%s.indico.domains" % cloud if cloud else config . host if not ( host . endswith ( 'indico.domains' ) or host . endswith ( 'indico.io' ) ) : url_protocol = "http" else : url_protocol = config . url_protocol headers = dict ( JSON_HEADERS ) headers [ "X-ApiKey" ] = url_params . get ( "api_key" ) or config . api_key url = create_url ( url_protocol , host , api , dict ( kwargs , ** url_params ) ) return collect_api_results ( input_data , url , headers , api , batch_size , kwargs )
Sends finalized request data to ML server and receives response . If a batch_size is specified breaks down a request into smaller component requests and aggregates the results .
42,290
def collect_api_results ( input_data , url , headers , api , batch_size , kwargs ) : if batch_size : results = [ ] for batch in batched ( input_data , size = batch_size ) : try : result = send_request ( batch , api , url , headers , kwargs ) if isinstance ( result , list ) : results . extend ( result ) else : results . append ( result ) except IndicoError as e : timestamp = datetime . datetime . now ( ) . strftime ( '%Y-%m-%d-%H:%M:%S' ) filename = "indico-{api}-{timestamp}.json" . format ( api = api , timestamp = timestamp ) if sys . version_info > ( 3 , 0 ) : json . dump ( results , open ( filename , mode = 'w' , encoding = 'utf-8' ) , cls = NumpyEncoder ) else : json . dump ( results , open ( filename , mode = 'w' ) , cls = NumpyEncoder ) raise BatchProcessingError ( "The following error occurred while processing your data: `{err}` " "Partial results have been saved to {filename}" . format ( err = e , filename = os . path . abspath ( filename ) ) ) return results else : return send_request ( input_data , api , url , headers , kwargs )
Optionally split up a single request into a series of requests to ensure timely HTTP responses .
42,291
def send_request ( input_data , api , url , headers , kwargs ) : data = { } if input_data != None : data [ 'data' ] = input_data serializer = kwargs . pop ( "serializer" , config . serializer ) data [ 'serializer' ] = serializer data . update ( ** kwargs ) json_data = json . dumps ( data ) response = requests . post ( url , data = json_data , headers = headers ) warning = response . headers . get ( 'x-warning' ) if warning : warnings . warn ( warning ) cloud = urlparse ( url ) . hostname if response . status_code == 503 and not cloud . endswith ( '.indico.io' ) : raise APIDoesNotExist ( "Private cloud '%s' does not include api '%s'" % ( cloud , api ) ) try : if serializer == 'msgpack' : json_results = msgpack . unpackb ( response . content ) else : json_results = response . json ( ) except ( msgpack . exceptions . UnpackException , msgpack . exceptions . ExtraData ) : try : json_results = response . json ( ) except : json_results = { "error" : response . text } if config . PY3 : json_results = convert ( json_results ) results = json_results . get ( 'results' , False ) if results is False : error = json_results . get ( 'error' ) raise convert_to_py_error ( error ) return results
Use the requests library to send of an HTTP call to the indico servers
42,292
def create_url ( url_protocol , host , api , url_params ) : is_batch = url_params . pop ( "batch" , None ) apis = url_params . pop ( "apis" , None ) version = url_params . pop ( "version" , None ) or url_params . pop ( "v" , None ) method = url_params . pop ( 'method' , None ) host_url_seg = url_protocol + "://%s" % host api_url_seg = "/%s" % api batch_url_seg = "/batch" if is_batch else "" method_url_seg = "/%s" % method if method else "" params = { } if apis : params [ "apis" ] = "," . join ( apis ) if version : params [ "version" ] = version url = host_url_seg + api_url_seg + batch_url_seg + method_url_seg if params : url += "?" + urlencode ( params ) return url
Generate the proper url for sending off data for analysis
42,293
def keywords ( text , cloud = None , batch = False , api_key = None , version = 2 , batch_size = None , ** kwargs ) : if kwargs . get ( "language" , "english" ) != "english" : version = 1 url_params = { "batch" : batch , "api_key" : api_key , "version" : version } return api_handler ( text , cloud = cloud , api = "keywords" , url_params = url_params , batch_size = batch_size , ** kwargs )
Given input text returns series of keywords and associated scores
42,294
def personas ( text , cloud = None , batch = False , api_key = None , version = None , ** kwargs ) : url_params = { "batch" : batch , "api_key" : api_key , "version" : version } kwargs [ 'persona' ] = True return api_handler ( text , cloud = cloud , api = "personality" , url_params = url_params , ** kwargs )
Given input text returns the authors likelihood of being 16 different personality types in a dict .
42,295
def pdf_extraction ( pdf , cloud = None , batch = False , api_key = None , version = None , ** kwargs ) : pdf = pdf_preprocess ( pdf , batch = batch ) url_params = { "batch" : batch , "api_key" : api_key , "version" : version } results = api_handler ( pdf , cloud = cloud , api = "pdfextraction" , url_params = url_params , ** kwargs ) if batch : for result in results : result [ "images" ] = postprocess_images ( result . get ( "images" , [ ] ) ) else : results [ 'images' ] = postprocess_images ( results . get ( "images" , [ ] ) ) return results
Given a pdf returns the text and metadata associated with the given pdf . PDFs may be provided as base64 encoded data or as a filepath . Base64 image data and formatted table is optionally returned by setting images = True or tables = True .
42,296
def get_tokenizer ( fhp ) : def dummy ( word , ignore = None ) : return ( '' for i in range ( 0 ) ) if not fhp : return dummy n = fhp . ngram if n < 0 : raise ValueError ( '`n` in `n`-gram must be non-negative.' ) positional = fhp . positional def tok ( word , ignore = None ) : if ignore is not None : word = word . replace ( ignore , '' ) if n > 1 : word = ' {} ' . format ( word ) if positional : return ( '{} {}' . format ( i + 1 , word [ i : i + n ] ) for i in range ( len ( word ) - n + 1 ) ) else : return ( word [ i : i + n ] for i in range ( len ( word ) - n + 1 ) ) return tok
Get tokeniser function from the hash settings .
42,297
def pdf_preprocess ( pdf , batch = False ) : if batch : return [ pdf_preprocess ( doc , batch = False ) for doc in pdf ] if os . path . isfile ( pdf ) : return b64encode ( open ( pdf , 'rb' ) . read ( ) ) else : return pdf
Load pdfs from local filepath if not already b64 encoded
42,298
def update ( self , x ) : if any ( math . isnan ( float ( i ) ) or math . isinf ( float ( i ) ) for i in x ) : raise ValueError ( 'input contains non-finite numbers like "nan" or "+/- inf"' ) t = sum ( x ) m = float ( len ( x ) ) norm_t = t / m S = sum ( ( xi - norm_t ) ** 2 for xi in x ) if self . n == 0 : self . S = self . S + S else : self . S = self . S + S + self . n / ( m * ( m + self . n ) ) * ( m / self . n * self . t - t ) ** 2 self . t = self . t + t self . n = self . n + len ( x )
updates the statistics with the given list of numbers
42,299
def generate_clk_from_csv ( input_f , keys , schema , validate = True , header = True , progress_bar = True ) : if header not in { False , True , 'ignore' } : raise ValueError ( "header must be False, True or 'ignore' but is {}." . format ( header ) ) log . info ( "Hashing data" ) reader = unicode_reader ( input_f ) if header : column_names = next ( reader ) if header != 'ignore' : validate_header ( schema . fields , column_names ) start_time = time . time ( ) pii_data = [ ] for line in reader : pii_data . append ( tuple ( element . strip ( ) for element in line ) ) validate_row_lengths ( schema . fields , pii_data ) if progress_bar : stats = OnlineMeanVariance ( ) with tqdm ( desc = "generating CLKs" , total = len ( pii_data ) , unit = 'clk' , unit_scale = True , postfix = { 'mean' : stats . mean ( ) , 'std' : stats . std ( ) } ) as pbar : def callback ( tics , clk_stats ) : stats . update ( clk_stats ) pbar . set_postfix ( mean = stats . mean ( ) , std = stats . std ( ) , refresh = False ) pbar . update ( tics ) results = generate_clks ( pii_data , schema , keys , validate = validate , callback = callback ) else : results = generate_clks ( pii_data , schema , keys , validate = validate ) log . info ( "Hashing took {:.2f} seconds" . format ( time . time ( ) - start_time ) ) return results
Generate Bloom filters from CSV file then serialise them .