idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
24,500
def get_last_post_for_model ( cr , uid , ids , model_pool ) : if type ( ids ) is not list : ids = [ ids ] res = { } for obj in model_pool . browse ( cr , uid , ids ) : message_ids = obj . message_ids if message_ids : res [ obj . id ] = sorted ( message_ids , key = lambda x : x . date , reverse = True ) [ 0 ] . date else : res [ obj . id ] = False return res
Given a set of ids and a model pool return a dict of each object ids with their latest message date as a value . To be called in post - migration scripts
120
35
24,501
def set_message_last_post ( cr , uid , pool , models ) : if type ( models ) is not list : models = [ models ] for model in models : model_pool = pool [ model ] cr . execute ( "UPDATE {table} " "SET message_last_post=(SELECT max(mm.date) " "FROM mail_message mm " "WHERE mm.model=%s " "AND mm.date IS NOT NULL " "AND mm.res_id={table}.id)" . format ( table = model_pool . _table ) , ( model , ) )
Given a list of models set their message_last_post fields to an estimated last post datetime . To be called in post - migration scripts
128
29
24,502
def column_exists ( cr , table , column ) : cr . execute ( 'SELECT count(attname) FROM pg_attribute ' 'WHERE attrelid = ' '( SELECT oid FROM pg_class WHERE relname = %s ) ' 'AND attname = %s' , ( table , column ) ) return cr . fetchone ( ) [ 0 ] == 1
Check whether a certain column exists
79
6
24,503
def start_logging ( out = _stdout , level = 'info' ) : global _log_level , _loggers , _started_logging if level not in log_levels : raise RuntimeError ( "Invalid log level '{0}'; valid are: {1}" . format ( level , ', ' . join ( log_levels ) ) ) if _started_logging : return _started_logging = True _log_level = level handler = _TxaioFileHandler ( out ) logging . getLogger ( ) . addHandler ( handler ) # note: Don't need to call basicConfig() or similar, because we've # now added at least one handler to the root logger logging . raiseExceptions = True # FIXME level_to_stdlib = { 'critical' : logging . CRITICAL , 'error' : logging . ERROR , 'warn' : logging . WARNING , 'info' : logging . INFO , 'debug' : logging . DEBUG , 'trace' : logging . DEBUG , } logging . getLogger ( ) . setLevel ( level_to_stdlib [ level ] ) # make sure any loggers we created before now have their log-level # set (any created after now will get it from _log_level for logger in _loggers : logger . _set_log_level ( level )
Begin logging .
288
3
24,504
def create_failure ( self , exception = None ) : if exception : return FailedFuture ( type ( exception ) , exception , None ) return FailedFuture ( * sys . exc_info ( ) )
This returns an object implementing IFailedFuture .
42
9
24,505
def gather ( self , futures , consume_exceptions = True ) : # from the asyncio docs: "If return_exceptions is True, exceptions # in the tasks are treated the same as successful results, and # gathered in the result list; otherwise, the first raised # exception will be immediately propagated to the returned # future." return asyncio . gather ( * futures , return_exceptions = consume_exceptions )
This returns a Future that waits for all the Futures in the list futures
89
15
24,506
def _use_framework ( module ) : import txaio for method_name in __all__ : if method_name in [ 'use_twisted' , 'use_asyncio' ] : continue setattr ( txaio , method_name , getattr ( module , method_name ) )
Internal helper to set this modules methods to a specified framework helper - methods .
66
15
24,507
def start_logging ( out = _stdout , level = 'info' ) : global _loggers , _observer , _log_level , _started_logging if level not in log_levels : raise RuntimeError ( "Invalid log level '{0}'; valid are: {1}" . format ( level , ', ' . join ( log_levels ) ) ) if _started_logging : return _started_logging = True _log_level = level set_global_log_level ( _log_level ) if out : _observer = _LogObserver ( out ) if _NEW_LOGGER : _observers = [ ] if _observer : _observers . append ( _observer ) globalLogBeginner . beginLoggingTo ( _observers ) else : assert out , "out needs to be given a value if using Twisteds before 15.2" from twisted . python import log log . startLogging ( out )
Start logging to the file - like object in out . By default this is stdout .
210
18
24,508
def set_log_level ( self , level , keep = True ) : self . _set_log_level ( level ) self . _log_level_set_explicitly = keep
Set the log level . If keep is True then it will not change along with global log changes .
41
20
24,509
def sleep ( self , delay ) : d = Deferred ( ) self . _get_loop ( ) . callLater ( delay , d . callback , None ) return d
Inline sleep for use in co - routines .
36
10
24,510
def _notify_bucket ( self , real_time ) : ( delayed_call , calls ) = self . _buckets [ real_time ] del self . _buckets [ real_time ] errors = [ ] def notify_one_chunk ( calls , chunk_size , chunk_delay_ms ) : for call in calls [ : chunk_size ] : try : call ( ) except Exception as e : errors . append ( e ) calls = calls [ chunk_size : ] if calls : self . _create_delayed_call ( chunk_delay_ms / 1000.0 , notify_one_chunk , calls , chunk_size , chunk_delay_ms , ) else : # done all calls; make sure there were no errors if len ( errors ) : msg = u"Error(s) processing call_later bucket:\n" for e in errors : msg += u"{}\n" . format ( e ) raise RuntimeError ( msg ) # ceil()ing because we want the number of chunks, and a # partial chunk is still a chunk delay_ms = self . _bucket_milliseconds / math . ceil ( float ( len ( calls ) ) / self . _chunk_size ) # I can't imagine any scenario in which chunk_delay_ms is # actually less than zero, but just being safe here notify_one_chunk ( calls , self . _chunk_size , max ( 0.0 , delay_ms ) )
Internal helper . This does the callbacks in a particular bucket .
318
13
24,511
def check_ab ( ab , verb ) : # Try to cast ab into an integer try : ab = int ( ab ) except VariableCatch : print ( '* ERROR :: <ab> must be an integer' ) raise # Check src and rec orientation (<ab> for alpha-beta) # pab: all possible values that <ab> can take pab = [ 11 , 12 , 13 , 14 , 15 , 16 , 21 , 22 , 23 , 24 , 25 , 26 , 31 , 32 , 33 , 34 , 35 , 36 , 41 , 42 , 43 , 44 , 45 , 46 , 51 , 52 , 53 , 54 , 55 , 56 , 61 , 62 , 63 , 64 , 65 , 66 ] if ab not in pab : print ( '* ERROR :: <ab> must be one of: ' + str ( pab ) + ';' + ' <ab> provided: ' + str ( ab ) ) raise ValueError ( 'ab' ) # Print input <ab> if verb > 2 : print ( " Input ab : " , ab ) # Check if src and rec are magnetic or electric msrc = ab % 10 > 3 # If True: magnetic src mrec = ab // 10 > 3 # If True: magnetic rec # If rec is magnetic, switch <ab> using reciprocity. if mrec : if msrc : # G^mm_ab(s, r, e, z) = -G^ee_ab(s, r, -z, -e) ab_calc = ab - 33 # -30 : mrec->erec; -3: msrc->esrc else : # G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z) ab_calc = ab % 10 * 10 + ab // 10 # Swap alpha/beta else : ab_calc = ab # Print actual calculated <ab> if verb > 2 : if ab in [ 36 , 63 ] : print ( "\n> <ab> IS " + str ( ab ) + " WHICH IS ZERO; returning" ) else : print ( " Calculated ab : " , ab_calc ) return ab_calc , msrc , mrec
r Check source - receiver configuration .
478
7
24,512
def check_dipole ( inp , name , verb ) : # Check inp for x, y, and z; x & y must have same length, z is a float _check_shape ( np . squeeze ( inp ) , name , ( 3 , ) ) inp [ 0 ] = _check_var ( inp [ 0 ] , float , 1 , name + '-x' ) inp [ 1 ] = _check_var ( inp [ 1 ] , float , 1 , name + '-y' , inp [ 0 ] . shape ) inp [ 2 ] = _check_var ( inp [ 2 ] , float , 1 , name + '-z' , ( 1 , ) ) # Print spatial parameters if verb > 2 : # Pole-type: src or rec if name == 'src' : longname = ' Source(s) : ' else : longname = ' Receiver(s) : ' print ( longname , str ( inp [ 0 ] . size ) , 'dipole(s)' ) tname = [ 'x ' , 'y ' , 'z ' ] for i in range ( 3 ) : text = " > " + tname [ i ] + " [m] : " _prnt_min_max_val ( inp [ i ] , text , verb ) return inp , inp [ 0 ] . size
r Check dipole parameters .
299
6
24,513
def check_frequency ( freq , res , aniso , epermH , epermV , mpermH , mpermV , verb ) : global _min_freq # Check if the user provided a model for etaH/etaV/zetaH/zetaV if isinstance ( res , dict ) : res = res [ 'res' ] # Check frequency freq = _check_var ( freq , float , 1 , 'freq' ) # Minimum frequency to avoid division by zero at freq = 0 Hz. # => min_freq can be set with utils.set_min freq = _check_min ( freq , _min_freq , 'Frequencies' , 'Hz' , verb ) if verb > 2 : _prnt_min_max_val ( freq , " frequency [Hz] : " , verb ) # Calculate eta and zeta (horizontal and vertical) c = 299792458 # Speed of light m/s mu_0 = 4e-7 * np . pi # Magn. permeability of free space [H/m] epsilon_0 = 1. / ( mu_0 * c * c ) # Elec. permittivity of free space [F/m] etaH = 1 / res + np . outer ( 2j * np . pi * freq , epermH * epsilon_0 ) etaV = 1 / ( res * aniso * aniso ) + np . outer ( 2j * np . pi * freq , epermV * epsilon_0 ) zetaH = np . outer ( 2j * np . pi * freq , mpermH * mu_0 ) zetaV = np . outer ( 2j * np . pi * freq , mpermV * mu_0 ) return freq , etaH , etaV , zetaH , zetaV
r Calculate frequency - dependent parameters .
417
8
24,514
def check_opt ( opt , loop , ht , htarg , verb ) : # Check optimization flag use_ne_eval = False if opt == 'parallel' : if numexpr : use_ne_eval = numexpr . evaluate elif verb > 0 : print ( numexpr_msg ) # Define if to loop over frequencies or over offsets lagged_splined_fht = False if ht == 'fht' : if htarg [ 1 ] != 0 : lagged_splined_fht = True if ht in [ 'hqwe' , 'hquad' ] or lagged_splined_fht : loop_freq = True loop_off = False else : loop_off = loop == 'off' loop_freq = loop == 'freq' # If verbose, print optimization information if verb > 2 : if use_ne_eval : print ( " Kernel Opt. : Use parallel" ) else : print ( " Kernel Opt. : None" ) if loop_off : print ( " Loop over : Offsets" ) elif loop_freq : print ( " Loop over : Frequencies" ) else : print ( " Loop over : None (all vectorized)" ) return use_ne_eval , loop_freq , loop_off
r Check optimization parameters .
279
5
24,515
def check_time_only ( time , signal , verb ) : global _min_time # Check input signal if int ( signal ) not in [ - 1 , 0 , 1 ] : print ( "* ERROR :: <signal> must be one of: [None, -1, 0, 1]; " + "<signal> provided: " + str ( signal ) ) raise ValueError ( 'signal' ) # Check time time = _check_var ( time , float , 1 , 'time' ) # Minimum time to avoid division by zero at time = 0 s. # => min_time can be set with utils.set_min time = _check_min ( time , _min_time , 'Times' , 's' , verb ) if verb > 2 : _prnt_min_max_val ( time , " time [s] : " , verb ) return time
r Check time and signal parameters .
190
7
24,516
def check_solution ( solution , signal , ab , msrc , mrec ) : # Ensure valid solution. if solution not in [ 'fs' , 'dfs' , 'dhs' , 'dsplit' , 'dtetm' ] : print ( "* ERROR :: Solution must be one of ['fs', 'dfs', 'dhs', " + "'dsplit', 'dtetm']; <solution> provided: " + solution ) raise ValueError ( 'solution' ) # If diffusive solution is required, ensure EE-field. if solution [ 0 ] == 'd' and ( msrc or mrec ) : print ( '* ERROR :: Diffusive solution is only implemented for ' + 'electric sources and electric receivers, <ab> provided: ' + str ( ab ) ) raise ValueError ( 'ab' ) # If full solution is required, ensure frequency-domain. if solution == 'fs' and signal is not None : print ( '* ERROR :: Full fullspace solution is only implemented for ' + 'the frequency domain, <signal> provided: ' + str ( signal ) ) raise ValueError ( 'signal' )
r Check required solution with parameters .
250
7
24,517
def get_abs ( msrc , mrec , srcazm , srcdip , recazm , recdip , verb ) : # Get required ab's (9 at most) ab_calc = np . array ( [ [ 11 , 12 , 13 ] , [ 21 , 22 , 23 ] , [ 31 , 32 , 33 ] ] ) if msrc : ab_calc += 3 if mrec : ab_calc += 30 # Switch <ab> using reciprocity. if msrc : # G^mm_ab(s, r, e, z) = -G^ee_ab(s, r, -z, -e) ab_calc -= 33 # -30 : mrec->erec; -3: msrc->esrc else : # G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z) ab_calc = ab_calc % 10 * 10 + ab_calc // 10 # Swap alpha/beta # Remove unnecessary ab's bab = np . asarray ( ab_calc * 0 + 1 , dtype = bool ) # Remove if source is x- or y-directed check = np . atleast_1d ( srcazm ) [ 0 ] if np . allclose ( srcazm % ( np . pi / 2 ) , 0 ) : # if all angles are multiples of 90 if np . isclose ( check // ( np . pi / 2 ) % 2 , 0 ) : # Multiples of pi (180) bab [ : , 1 ] *= False # x-directed source, remove y else : # Multiples of pi/2 (90) bab [ : , 0 ] *= False # y-directed source, remove x # Remove if source is vertical check = np . atleast_1d ( srcdip ) [ 0 ] if np . allclose ( srcdip % ( np . pi / 2 ) , 0 ) : # if all angles are multiples of 90 if np . isclose ( check // ( np . pi / 2 ) % 2 , 0 ) : # Multiples of pi (180) bab [ : , 2 ] *= False # Horizontal, remove z else : # Multiples of pi/2 (90) bab [ : , : 2 ] *= False # Vertical, remove x/y # Remove if receiver is x- or y-directed check = np . atleast_1d ( recazm ) [ 0 ] if np . allclose ( recazm % ( np . pi / 2 ) , 0 ) : # if all angles are multiples of 90 if np . isclose ( check // ( np . pi / 2 ) % 2 , 0 ) : # Multiples of pi (180) bab [ 1 , : ] *= False # x-directed receiver, remove y else : # Multiples of pi/2 (90) bab [ 0 , : ] *= False # y-directed receiver, remove x # Remove if receiver is vertical check = np . atleast_1d ( recdip ) [ 0 ] if np . allclose ( recdip % ( np . pi / 2 ) , 0 ) : # if all angles are multiples of 90 if np . isclose ( check // ( np . pi / 2 ) % 2 , 0 ) : # Multiples of pi (180) bab [ 2 , : ] *= False # Horizontal, remove z else : # Multiples of pi/2 (90) bab [ : 2 , : ] *= False # Vertical, remove x/y # Reduce ab_calc = ab_calc [ bab ] . ravel ( ) # Print actual calculated <ab> if verb > 2 : print ( " Required ab's : " , _strvar ( ab_calc ) ) return ab_calc
r Get required ab s for given angles .
826
9
24,518
def get_geo_fact ( ab , srcazm , srcdip , recazm , recdip , msrc , mrec ) : global _min_angle # Get current direction for source and receiver fis = ab % 10 fir = ab // 10 # If rec is magnetic and src not, swap directions (reciprocity). # (They have been swapped in get_abs, but the original scaling applies.) if mrec and not msrc : fis , fir = fir , fis def gfact ( bp , azm , dip ) : r"""Geometrical factor of source or receiver.""" if bp in [ 1 , 4 ] : # x-directed return np . cos ( azm ) * np . cos ( dip ) elif bp in [ 2 , 5 ] : # y-directed return np . sin ( azm ) * np . cos ( dip ) else : # z-directed return np . sin ( dip ) # Calculate src-rec-factor fsrc = gfact ( fis , srcazm , srcdip ) frec = gfact ( fir , recazm , recdip ) fact = np . outer ( fsrc , frec ) . ravel ( ) # Set very small angles to proper zero (because e.g. sin(pi/2) != exact 0) # => min_angle can be set with utils.set_min fact [ np . abs ( fact ) < _min_angle ] = 0 return fact
r Get required geometrical scaling factor for given angles .
320
12
24,519
def get_layer_nr ( inp , depth ) : zinp = inp [ 2 ] # depth = [-infty : last interface]; create additional depth-array # pdepth = [fist interface : +infty] pdepth = np . concatenate ( ( depth [ 1 : ] , np . array ( [ np . infty ] ) ) ) # Broadcast arrays b_zinp = np . atleast_1d ( zinp ) [ : , None ] # Get layers linp = np . where ( ( depth [ None , : ] < b_zinp ) * ( pdepth [ None , : ] >= b_zinp ) ) [ 1 ] # Return; squeeze in case of only one inp-depth return np . squeeze ( linp ) , zinp
r Get number of layer in which inp resides .
172
11
24,520
def get_off_ang ( src , rec , nsrc , nrec , verb ) : global _min_off # Pre-allocate off and angle off = np . empty ( ( nrec * nsrc , ) ) angle = np . empty ( ( nrec * nsrc , ) ) # Coordinates # Loop over sources, append them one after another. for i in range ( nsrc ) : xco = rec [ 0 ] - src [ 0 ] [ i ] # X-coordinates [m] yco = rec [ 1 ] - src [ 1 ] [ i ] # Y-coordinates [m] off [ i * nrec : ( i + 1 ) * nrec ] = np . sqrt ( xco * xco + yco * yco ) # Offset [m] angle [ i * nrec : ( i + 1 ) * nrec ] = np . arctan2 ( yco , xco ) # Angle [rad] # Note: One could achieve a potential speed-up using np.unique to sort out # src-rec configurations that have the same offset and angle. Very unlikely # for real data. # Minimum offset to avoid singularities at off = 0 m. # => min_off can be set with utils.set_min angle [ np . where ( off < _min_off ) ] = np . nan off = _check_min ( off , _min_off , 'Offsets' , 'm' , verb ) return off , angle
r Get depths offsets angles hence spatial input parameters .
320
10
24,521
def printstartfinish ( verb , inp = None , kcount = None ) : if inp : if verb > 1 : ttxt = str ( timedelta ( seconds = default_timer ( ) - inp ) ) ktxt = ' ' if kcount : ktxt += str ( kcount ) + ' kernel call(s)' print ( '\n:: empymod END; runtime = ' + ttxt + ' ::' + ktxt + '\n' ) else : t0 = default_timer ( ) if verb > 2 : print ( "\n:: empymod START ::\n" ) return t0
r Print start and finish with time measure and kernel count .
134
12
24,522
def set_minimum ( min_freq = None , min_time = None , min_off = None , min_res = None , min_angle = None ) : global _min_freq , _min_time , _min_off , _min_res , _min_angle if min_freq is not None : _min_freq = min_freq if min_time is not None : _min_time = min_time if min_off is not None : _min_off = min_off if min_res is not None : _min_res = min_res if min_angle is not None : _min_angle = min_angle
r Set minimum values of parameters .
146
7
24,523
def get_minimum ( ) : d = dict ( min_freq = _min_freq , min_time = _min_time , min_off = _min_off , min_res = _min_res , min_angle = _min_angle ) return d
r Return the current minimum values .
60
7
24,524
def _check_var ( var , dtype , ndmin , name , shape = None , shape2 = None ) : if var is None : raise ValueError var = np . array ( var , dtype = dtype , copy = True , ndmin = ndmin ) if shape : _check_shape ( var , name , shape , shape2 ) return var
r Return variable as array of dtype ndmin ; shape - checked .
80
16
24,525
def _strvar ( a , prec = '{:G}' ) : return ' ' . join ( [ prec . format ( i ) for i in np . atleast_1d ( a ) ] )
r Return variable as a string to print with given precision .
46
12
24,526
def _check_min ( par , minval , name , unit , verb ) : scalar = False if par . shape == ( ) : scalar = True par = np . atleast_1d ( par ) if minval is not None : ipar = np . where ( par < minval ) par [ ipar ] = minval if verb > 0 and np . size ( ipar ) != 0 : print ( '* WARNING :: ' + name + ' < ' + str ( minval ) + ' ' + unit + ' are set to ' + str ( minval ) + ' ' + unit + '!' ) if scalar : return np . squeeze ( par ) else : return par
r Check minimum value of parameter .
149
7
24,527
def spline_backwards_hankel ( ht , htarg , opt ) : # Ensure ht is all lowercase ht = ht . lower ( ) # Only relevant for 'fht' and 'hqwe', not for 'quad' if ht in [ 'fht' , 'qwe' , 'hqwe' ] : # Get corresponding htarg if ht == 'fht' : htarg = _check_targ ( htarg , [ 'fhtfilt' , 'pts_per_dec' ] ) elif ht in [ 'qwe' , 'hqwe' ] : htarg = _check_targ ( htarg , [ 'rtol' , 'atol' , 'nquad' , 'maxint' , 'pts_per_dec' , 'diff_quad' , 'a' , 'b' , 'limit' ] ) # If spline (qwe, fht) or lagged (fht) if opt == 'spline' : # Issue warning mesg = ( "\n The use of `opt='spline'` is deprecated and will " + "be removed\n in v2.0.0; use the corresponding " + "setting in `htarg`." ) warnings . warn ( mesg , DeprecationWarning ) # Reset opt opt = None # Check pts_per_dec; set to old default values if not given if 'pts_per_dec' not in htarg : if ht == 'fht' : htarg [ 'pts_per_dec' ] = - 1 # Lagged Convolution DLF elif ht in [ 'qwe' , 'hqwe' ] : htarg [ 'pts_per_dec' ] = 80 # Splined QWE; old default value return htarg , opt
r Check opt if deprecated spline is used .
413
10
24,528
def gpr ( src , rec , depth , res , freqtime , cf , gain = None , ab = 11 , aniso = None , epermH = None , epermV = None , mpermH = None , mpermV = None , xdirect = False , ht = 'quad' , htarg = None , ft = 'fft' , ftarg = None , opt = None , loop = None , verb = 2 ) : if verb > 2 : print ( " GPR : EXPERIMENTAL, USE WITH CAUTION" ) print ( " > centre freq : " + str ( cf ) ) print ( " > gain : " + str ( gain ) ) # === 1. CHECK TIME ============ # Check times and Fourier Transform arguments, get required frequencies time , freq , ft , ftarg = check_time ( freqtime , 0 , ft , ftarg , verb ) # === 2. CALL DIPOLE ============ EM = dipole ( src , rec , depth , res , freq , None , ab , aniso , epermH , epermV , mpermH , mpermV , xdirect , ht , htarg , ft , ftarg , opt , loop , verb ) # === 3. GPR STUFF # Get required parameters src , nsrc = check_dipole ( src , 'src' , 0 ) rec , nrec = check_dipole ( rec , 'rec' , 0 ) off , _ = get_off_ang ( src , rec , nsrc , nrec , 0 ) # Reshape output from dipole EM = EM . reshape ( ( - 1 , nrec * nsrc ) , order = 'F' ) # Multiply with ricker wavelet cfc = - ( np . r_ [ 0 , freq [ : - 1 ] ] / cf ) ** 2 fwave = cfc * np . exp ( cfc ) EM *= fwave [ : , None ] # Do f->t transform EM , conv = tem ( EM , off , freq , time , 0 , ft , ftarg ) # In case of QWE/QUAD, print Warning if not converged conv_warning ( conv , ftarg , 'Fourier' , verb ) # Apply gain; make pure real EM *= ( 1 + np . abs ( ( time * 10 ** 9 ) ** gain ) ) [ : , None ] EM = EM . real # Reshape for number of sources EM = np . squeeze ( EM . reshape ( ( - 1 , nrec , nsrc ) , order = 'F' ) ) return EM
r Return the Ground - Penetrating Radar signal .
569
10
24,529
def wavenumber ( src , rec , depth , res , freq , wavenumber , ab = 11 , aniso = None , epermH = None , epermV = None , mpermH = None , mpermV = None , verb = 2 ) : # Issue warning mesg = ( "\n The use of `model.wavenumber` is deprecated and will " + "be removed;\n use `model.dipole_k` instead." ) warnings . warn ( mesg , DeprecationWarning ) return dipole_k ( src , rec , depth , res , freq , wavenumber , ab , aniso , epermH , epermV , mpermH , mpermV , verb )
r Depreciated . Use dipole_k instead .
158
12
24,530
def tem ( fEM , off , freq , time , signal , ft , ftarg , conv = True ) : # 1. Scale frequencies if switch-on/off response # Step function for causal times is like a unit fct, therefore an impulse # in frequency domain if signal in [ - 1 , 1 ] : # Divide by signal/(2j*pi*f) to obtain step response fact = signal / ( 2j * np . pi * freq ) else : fact = 1 # 2. f->t transform tEM = np . zeros ( ( time . size , off . size ) ) for i in range ( off . size ) : out = getattr ( transform , ft ) ( fEM [ : , i ] * fact , time , freq , ftarg ) tEM [ : , i ] += out [ 0 ] conv *= out [ 1 ] return tEM * 2 / np . pi , conv
r Return the time - domain response of the frequency - domain response fEM .
196
16
24,531
def save_filter ( name , filt , full = None , path = 'filters' ) : # First we'll save the filter using its internal routine. # This will create the directory ./filters if it doesn't exist already. filt . tofile ( path ) # If full, we store the inversion output if full : # Get file name path = os . path . abspath ( path ) if len ( name . split ( '.' ) ) == 2 : suffix = '.gz' else : suffix = '' fullfile = os . path . join ( path , name . split ( '.' ) [ 0 ] + '_full.txt' + suffix ) # Get number of spacing and shift values nspace , nshift = full [ 3 ] . shape # Create header header = 'Full inversion output from empymod.fdesign.design\n' header += 'Line 11: Nr of spacing values\n' header += 'Line 12: Nr of shift values\n' header += 'Line 13: Best spacing value\n' header += 'Line 14: Best shift value\n' header += 'Line 15: Min amplitude or max offset\n' header += 'Lines 16-{}: Spacing matrix ' . format ( nspace + 15 ) header += '({} x {})\n' . format ( nspace , nshift ) header += 'Lines {}-{}: Spacing matrix ' . format ( nspace + 16 , 2 * nspace + 15 ) header += '({} x {})\n' . format ( nspace , nshift ) header += 'Lines {}-{}: Spacing ' . format ( 2 * nspace + 16 , 3 * nspace + 15 ) header += 'matrix ({} x {})\n' . format ( nspace , nshift ) header += 'Line {}: Integer: 0: min amp, 1: max r' . format ( 3 * nspace + 16 ) # Create arrays; put single values in arrays of nshift values nr_spacing = np . r_ [ nspace , np . zeros ( nshift - 1 ) ] nr_shift = np . r_ [ nshift , np . zeros ( nshift - 1 ) ] best_spacing = np . r_ [ full [ 0 ] [ 0 ] , np . zeros ( nshift - 1 ) ] best_shift = np . r_ [ full [ 0 ] [ 1 ] , np . zeros ( nshift - 1 ) ] min_value = np . r_ [ np . atleast_1d ( full [ 1 ] ) , np . zeros ( nshift - 1 ) ] min_max = np . r_ [ full [ 4 ] , np . zeros ( nshift - 1 ) ] # Collect all in one array fullsave = np . vstack ( ( nr_spacing , nr_shift , best_spacing , best_shift , min_value , full [ 2 ] [ 0 ] , full [ 2 ] [ 1 ] , full [ 3 ] , min_max ) ) # Save array np . savetxt ( fullfile , fullsave , header = header )
r Save DLF - filter and inversion output to plain text files .
676
15
24,532
def load_filter ( name , full = False , path = 'filters' ) : # First we'll get the filter using its internal routine. filt = DigitalFilter ( name . split ( '.' ) [ 0 ] ) filt . fromfile ( path ) # If full, we get the inversion output if full : # Try to get the inversion result. If files are not found, most likely # because they were not stored, we only return the filter try : # Get file name path = os . path . abspath ( path ) if len ( name . split ( '.' ) ) == 2 : suffix = '.gz' else : suffix = '' fullfile = os . path . join ( path , name . split ( '.' ) [ 0 ] + '_full.txt' + suffix ) # Read data out = np . loadtxt ( fullfile ) except IOError : return filt # Collect inversion-result tuple nspace = int ( out [ 0 ] [ 0 ] ) nshift = int ( out [ 1 ] [ 0 ] ) space_shift_matrix = np . zeros ( ( 2 , nspace , nshift ) ) space_shift_matrix [ 0 , : , : ] = out [ 5 : nspace + 5 , : ] space_shift_matrix [ 1 , : , : ] = out [ nspace + 5 : 2 * nspace + 5 , : ] out = ( np . array ( [ out [ 2 ] [ 0 ] , out [ 3 ] [ 0 ] ] ) , out [ 4 ] [ 0 ] , space_shift_matrix , out [ 2 * nspace + 5 : 3 * nspace + 5 , : ] , int ( out [ 3 * nspace + 5 , 0 ] ) ) return filt , out else : return filt
r Load saved DLF - filter and inversion output from text files .
385
15
24,533
def plot_result ( filt , full , prntres = True ) : # Check matplotlib (soft dependency) if not plt : print ( plt_msg ) return if prntres : print_result ( filt , full ) # Get spacing and shift values from full output of brute spacing = full [ 2 ] [ 0 , : , 0 ] shift = full [ 2 ] [ 1 , 0 , : ] # Get minimum field values from full output of brute minfield = np . squeeze ( full [ 3 ] ) plt . figure ( "Brute force result" , figsize = ( 9.5 , 4.5 ) ) plt . subplots_adjust ( wspace = .4 , bottom = 0.2 ) # Figure 1: Only if more than 1 spacing or more than 1 shift # Figure of minfield, depending if spacing/shift are vectors or floats if spacing . size > 1 or shift . size > 1 : plt . subplot ( 121 ) if full [ 4 ] == 0 : # Min amp plt . title ( "Minimal recovered fields" ) ylabel = 'Minimal recovered amplitude (log10)' field = np . log10 ( minfield ) cmap = plt . cm . viridis else : # Max r plt . title ( "Maximum recovered r" ) ylabel = 'Maximum recovered r' field = 1 / minfield cmap = plt . cm . viridis_r if shift . size == 1 : # (a) if only one shift value, plt . plot ( spacing , field ) plt . xlabel ( 'Spacing' ) plt . ylabel ( ylabel ) elif spacing . size == 1 : # (b) if only one spacing value plt . plot ( shift , field ) plt . xlabel ( 'Shift' ) plt . ylabel ( ylabel ) else : # (c) if several spacing and several shift values field = np . ma . masked_where ( np . isinf ( minfield ) , field ) plt . pcolormesh ( shift , spacing , field , cmap = cmap ) plt . ylabel ( 'Spacing' ) plt . xlabel ( 'Shift' ) plt . colorbar ( ) # Figure 2: Filter values if spacing . size > 1 or shift . size > 1 : plt . subplot ( 122 ) plt . title ( 'Filter values of best filter' ) for attr in [ 'j0' , 'j1' , 'sin' , 'cos' ] : if hasattr ( filt , attr ) : plt . plot ( np . log10 ( filt . base ) , np . log10 ( np . abs ( getattr ( filt , attr ) ) ) , '.-' , lw = .5 , label = 'abs(' + attr + ')' ) plt . plot ( np . log10 ( filt . base ) , np . log10 ( - getattr ( filt , attr ) ) , '.' , color = 'k' , ms = 4 ) plt . plot ( np . inf , 0 , '.' , color = 'k' , ms = 4 , label = 'Neg. values' ) plt . xlabel ( 'Base (log10)' ) plt . ylabel ( 'Abs(Amplitude) (log10)' ) plt . legend ( loc = 'best' ) plt . gcf ( ) . canvas . draw ( ) # To force draw in notebook while running plt . show ( )
r QC the inversion result .
760
7
24,534
def print_result ( filt , full = None ) : print ( ' Filter length : %d' % filt . base . size ) print ( ' Best filter' ) if full : # If full provided, we have more information if full [ 4 ] == 0 : # Min amp print ( ' > Min field : %g' % full [ 1 ] ) else : # Max amp r = 1 / full [ 1 ] print ( ' > Max r : %g' % r ) spacing = full [ 0 ] [ 0 ] shift = full [ 0 ] [ 1 ] else : # Print what we can without full n = filt . base . size a = filt . base [ - 1 ] b = filt . base [ - 2 ] spacing = np . log ( a ) - np . log ( b ) shift = np . log ( a ) - spacing * ( n // 2 ) print ( ' > Spacing : %1.10g' % spacing ) print ( ' > Shift : %1.10g' % shift ) print ( ' > Base min/max : %e / %e' % ( filt . base . min ( ) , filt . base . max ( ) ) )
r Print best filter information .
255
6
24,535
def _call_qc_transform_pairs ( n , ispacing , ishift , fI , fC , r , r_def , reim ) : print ( '* QC: Input transform-pairs:' ) print ( ' fC: x-range defined through ``n``, ``spacing``, ``shift``, and ' + '``r``-parameters; b-range defined through ``r``-parameter.' ) print ( ' fI: x- and b-range defined through ``n``, ``spacing``' + ', ``shift``, and ``r_def``-parameters.' ) # Calculate min/max k, from minimum and maximum spacing/shift minspace = np . arange ( * ispacing ) . min ( ) maxspace = np . arange ( * ispacing ) . max ( ) minshift = np . arange ( * ishift ) . min ( ) maxshift = np . arange ( * ishift ) . max ( ) maxbase = np . exp ( maxspace * ( n // 2 ) + maxshift ) minbase = np . exp ( maxspace * ( - n // 2 + 1 ) + minshift ) # For fC-r (k defined with same amount of points as r) kmax = maxbase / r . min ( ) kmin = minbase / r . max ( ) k = np . logspace ( np . log10 ( kmin ) , np . log10 ( kmax ) + minspace , r . size ) # For fI-r rI = np . logspace ( np . log10 ( 1 / maxbase ) - r_def [ 0 ] , np . log10 ( 1 / minbase ) + r_def [ 1 ] , r_def [ 2 ] * n ) kmaxI = maxbase / rI . min ( ) kminI = minbase / rI . max ( ) kI = np . logspace ( np . log10 ( kminI ) , np . log10 ( kmaxI ) + minspace , r_def [ 2 ] * n ) # Plot QC fig , axs = plt . subplots ( figsize = ( 9.5 , 6 ) , nrows = 2 , ncols = 2 , num = "Transform pairs" ) axs = axs . ravel ( ) plt . subplots_adjust ( wspace = .3 , hspace = .4 ) _plot_transform_pairs ( fC , r , k , axs [ : 2 ] , 'fC' ) if reim == np . real : tit = 'RE(fI)' else : tit = 'IM(fI)' _plot_transform_pairs ( fI , rI , kI , axs [ 2 : ] , tit ) fig . canvas . draw ( ) # To force draw in notebook while running plt . show ( )
r QC the input transform pairs .
629
7
24,536
def _plot_transform_pairs ( fCI , r , k , axes , tit ) : # Plot lhs plt . sca ( axes [ 0 ] ) plt . title ( '|' + tit + ' lhs|' ) for f in fCI : if f . name == 'j2' : lhs = f . lhs ( k ) plt . loglog ( k , np . abs ( lhs [ 0 ] ) , lw = 2 , label = 'j0' ) plt . loglog ( k , np . abs ( lhs [ 1 ] ) , lw = 2 , label = 'j1' ) else : plt . loglog ( k , np . abs ( f . lhs ( k ) ) , lw = 2 , label = f . name ) if tit != 'fC' : plt . xlabel ( 'l' ) plt . legend ( loc = 'best' ) # Plot rhs plt . sca ( axes [ 1 ] ) plt . title ( '|' + tit + ' rhs|' ) # Transform pair rhs for f in fCI : if tit == 'fC' : plt . loglog ( r , np . abs ( f . rhs ) , lw = 2 , label = f . name ) else : plt . loglog ( r , np . abs ( f . rhs ( r ) ) , lw = 2 , label = f . name ) # Transform with Key for f in fCI : if f . name [ 1 ] in [ '0' , '1' , '2' ] : filt = j0j1filt ( ) else : filt = sincosfilt ( ) kk = filt . base / r [ : , None ] if f . name == 'j2' : lhs = f . lhs ( kk ) kr0 = np . dot ( lhs [ 0 ] , getattr ( filt , 'j0' ) ) / r kr1 = np . dot ( lhs [ 1 ] , getattr ( filt , 'j1' ) ) / r ** 2 kr = kr0 + kr1 else : kr = np . dot ( f . lhs ( kk ) , getattr ( filt , f . name ) ) / r plt . loglog ( r , np . abs ( kr ) , '-.' , lw = 2 , label = filt . name ) if tit != 'fC' : plt . xlabel ( 'r' ) plt . legend ( loc = 'best' )
r Plot the input transform pairs .
568
7
24,537
def _plot_inversion ( f , rhs , r , k , imin , spacing , shift , cvar ) : # Check matplotlib (soft dependency) if not plt : print ( plt_msg ) return plt . figure ( "Inversion result " + f . name , figsize = ( 9.5 , 4 ) ) plt . subplots_adjust ( wspace = .3 , bottom = 0.2 ) plt . clf ( ) tk = np . logspace ( np . log10 ( k . min ( ) ) , np . log10 ( k . max ( ) ) , r . size ) plt . suptitle ( f . name + '; Spacing ::' + str ( spacing ) + '; Shift ::' + str ( shift ) ) # Plot lhs plt . subplot ( 121 ) plt . title ( '|lhs|' ) if f . name == 'j2' : lhs = f . lhs ( tk ) plt . loglog ( tk , np . abs ( lhs [ 0 ] ) , lw = 2 , label = 'Theoretical J0' ) plt . loglog ( tk , np . abs ( lhs [ 1 ] ) , lw = 2 , label = 'Theoretical J1' ) else : plt . loglog ( tk , np . abs ( f . lhs ( tk ) ) , lw = 2 , label = 'Theoretical' ) plt . xlabel ( 'l' ) plt . legend ( loc = 'best' ) # Plot rhs plt . subplot ( 122 ) plt . title ( '|rhs|' ) # Transform pair rhs plt . loglog ( r , np . abs ( f . rhs ) , lw = 2 , label = 'Theoretical' ) # Transform with filter plt . loglog ( r , np . abs ( rhs ) , '-.' , lw = 2 , label = 'This filter' ) # Plot minimum amplitude or max r, respectively if cvar == 'amp' : label = 'Min. Amp' else : label = 'Max. r' plt . loglog ( r [ imin ] , np . abs ( rhs [ imin ] ) , 'go' , label = label ) plt . xlabel ( 'r' ) plt . legend ( loc = 'best' ) plt . gcf ( ) . canvas . draw ( ) # To force draw in notebook while running plt . show ( )
r QC the resulting filter .
556
6
24,538
def empy_hankel ( ftype , zsrc , zrec , res , freqtime , depth = None , aniso = None , epermH = None , epermV = None , mpermH = None , mpermV = None , htarg = None , verblhs = 0 , verbrhs = 0 ) : # Loop over ftypes, if there are several if isinstance ( ftype , list ) : out = [ ] for f in ftype : out . append ( empy_hankel ( f , zsrc , zrec , res , freqtime , depth , aniso , epermH , epermV , mpermH , mpermV , htarg , verblhs , verbrhs ) ) return out # Collect model model = { 'src' : [ 0 , 0 , zsrc ] , 'depth' : depth , 'res' : res , 'aniso' : aniso , 'epermH' : epermH , 'epermV' : epermV , 'mpermH' : mpermH , 'mpermV' : mpermV } # Finalize model depending on ftype if ftype == 'j0' : # J0: 11, 45° model [ 'ab' ] = 11 x = 1 / np . sqrt ( 2 ) y = 1 / np . sqrt ( 2 ) elif ftype == 'j1' : # J1: 31, 0° model [ 'ab' ] = 31 x = 1 y = 0 elif ftype == 'j2' : # J2: 12, 45° model [ 'ab' ] = 12 x = 1 / np . sqrt ( 2 ) y = 1 / np . sqrt ( 2 ) # rhs: empymod.model.dipole # If depth=[], the analytical full-space solution will be used internally def rhs ( r ) : out = dipole ( rec = [ r * x , r * y , zrec ] , ht = 'qwe' , xdirect = True , verb = verbrhs , htarg = htarg , freqtime = freqtime , * * model ) return out # lhs: empymod.model.dipole_k def lhs ( k ) : lhs0 , lhs1 = dipole_k ( rec = [ x , y , zrec ] , wavenumber = k , verb = verblhs , freq = freqtime , * * model ) if ftype == 'j0' : return lhs0 elif ftype == 'j1' : return lhs1 elif ftype == 'j2' : return ( lhs0 , lhs1 ) return Ghosh ( ftype , lhs , rhs )
r Numerical transform pair with empymod .
603
11
24,539
def _get_min_val ( spaceshift , * params ) : # Get parameters from tuples spacing , shift = spaceshift n , fI , fC , r , r_def , error , reim , cvar , verb , plot , log = params # Get filter for these parameters dlf = _calculate_filter ( n , spacing , shift , fI , r_def , reim , 'filt' ) # Calculate rhs-response with this filter k = dlf . base / r [ : , None ] # Loop over transforms for i , f in enumerate ( fC ) : # Calculate lhs and rhs; rhs depends on ftype lhs = f . lhs ( k ) if f . name == 'j2' : rhs0 = np . dot ( lhs [ 0 ] , getattr ( dlf , 'j0' ) ) / r rhs1 = np . dot ( lhs [ 1 ] , getattr ( dlf , 'j1' ) ) / r ** 2 rhs = rhs0 + rhs1 else : rhs = np . dot ( lhs , getattr ( dlf , f . name ) ) / r # Get relative error rel_error = np . abs ( ( rhs - f . rhs ) / f . rhs ) # Get indices where relative error is bigger than error imin0 = np . where ( rel_error > error ) [ 0 ] # Find first occurrence of failure if np . all ( rhs == 0 ) or np . all ( np . isnan ( rhs ) ) : # if all rhs are zeros or nans, the filter is useless imin0 = 0 elif imin0 . size == 0 : # if imin0.size == 0: # empty array, all rel_error < error. imin0 = rhs . size - 1 # set to last r if verb > 0 and log [ 'warn-r' ] == 0 : print ( '* WARNING :: all data have error < ' + str ( error ) + '; choose larger r or set error-level higher.' ) log [ 'warn-r' ] = 1 # Only do this once else : # Kind of a dirty hack: Permit to jump up to four bad values, # resulting for instance from high rel_error from zero crossings # of the transform pair. Should be made an input argument or # generally improved. if imin0 . size > 4 : imin0 = np . max ( [ 0 , imin0 [ 4 ] - 5 ] ) else : # just take the first one (no jumping allowed; normal case) imin0 = np . max ( [ 0 , imin0 [ 0 ] - 1 ] ) # Note that both version yield the same result if the failure is # consistent. # Depending on cvar, store minimum amplitude or 1/maxr if cvar == 'amp' : min_val0 = np . abs ( rhs [ imin0 ] ) else : min_val0 = 1 / r [ imin0 ] # Check if this inversion is better than previous ones if i == 0 : # First run, store these values imin = dc ( imin0 ) min_val = dc ( min_val0 ) else : # Replace imin, min_val if this one is better if min_val0 > min_val : min_val = dc ( min_val0 ) imin = dc ( imin0 ) # QC plot if plot > 2 : _plot_inversion ( f , rhs , r , k , imin0 , spacing , shift , cvar ) # If verbose, print progress if verb > 1 : log = _print_count ( log ) # If there is no point with rel_error < error (imin=0) it returns np.inf. return np . where ( imin == 0 , np . inf , min_val )
r Calculate minimum resolved amplitude or maximum r .
842
10
24,540
def _calculate_filter ( n , spacing , shift , fI , r_def , reim , name ) : # Base :: For this n/spacing/shift base = np . exp ( spacing * ( np . arange ( n ) - n // 2 ) + shift ) # r :: Start/end is defined by base AND r_def[0]/r_def[1] # Overdetermined system if r_def[2] > 1 r = np . logspace ( np . log10 ( 1 / np . max ( base ) ) - r_def [ 0 ] , np . log10 ( 1 / np . min ( base ) ) + r_def [ 1 ] , r_def [ 2 ] * n ) # k :: Get required k-values (matrix of shape (r.size, base.size)) k = base / r [ : , None ] # Create filter instance dlf = DigitalFilter ( name . split ( '.' ) [ 0 ] ) dlf . base = base dlf . factor = np . around ( np . average ( base [ 1 : ] / base [ : - 1 ] ) , 15 ) # Loop over transforms for f in fI : # Calculate lhs and rhs for inversion lhs = reim ( f . lhs ( k ) ) rhs = reim ( f . rhs ( r ) * r ) # Calculate filter values: Solve lhs*J=rhs using linalg.qr. # If factoring fails (qr) or if matrix is singular or square (solve) it # will raise a LinAlgError. Error is ignored and zeros are returned # instead. try : qq , rr = np . linalg . qr ( lhs ) J = np . linalg . solve ( rr , rhs . dot ( qq ) ) except np . linalg . LinAlgError : J = np . zeros ( ( base . size , ) ) setattr ( dlf , f . name , J ) return dlf
r Calculate filter for this spacing shift n .
443
10
24,541
def _print_count ( log ) : log [ 'cnt2' ] += 1 # Current number cp = log [ 'cnt2' ] / log [ 'totnr' ] * 100 # Percentage if log [ 'cnt2' ] == 0 : # Not sure about this; brute seems to call the pass # function with the first arguments twice... elif log [ 'cnt2' ] > log [ 'totnr' ] : # fmin-status print ( " fmin fct calls : %d" % ( log [ 'cnt2' ] - log [ 'totnr' ] ) , end = '\r' ) elif int ( cp ) > log [ 'cnt1' ] or cp < 1 or log [ 'cnt2' ] == log [ 'totnr' ] : # Get seconds since start sec = int ( default_timer ( ) - log [ 'time' ] ) # Get estimate of remaining time, as string tleft = str ( timedelta ( seconds = int ( 100 * sec / cp - sec ) ) ) # Print progress pstr = ( " brute fct calls : %d/%d" % ( log [ 'cnt2' ] , log [ 'totnr' ] ) ) if log [ 'totnr' ] > 100 : pstr += ( " (%d %%); est: %s " % ( cp , tleft ) ) print ( pstr , end = '\r' ) if log [ 'cnt2' ] == log [ 'totnr' ] : # Empty previous line print ( " " * len ( pstr ) , end = '\r' ) # Print final brute-message print ( " brute fct calls : %d" % log [ 'totnr' ] ) # Update percentage cnt1 log [ 'cnt1' ] = cp return log
r Print run - count information .
405
7
24,542
def wavenumber ( zsrc , zrec , lsrc , lrec , depth , etaH , etaV , zetaH , zetaV , lambd , ab , xdirect , msrc , mrec , use_ne_eval ) : # ** CALCULATE GREEN'S FUNCTIONS # Shape of PTM, PTE: (nfreq, noffs, nfilt) PTM , PTE = greenfct ( zsrc , zrec , lsrc , lrec , depth , etaH , etaV , zetaH , zetaV , lambd , ab , xdirect , msrc , mrec , use_ne_eval ) # ** AB-SPECIFIC COLLECTION OF PJ0, PJ1, AND PJ0b # Pre-allocate output PJ0 = None PJ1 = None PJ0b = None # Calculate Ptot which is used in all cases Ptot = ( PTM + PTE ) / ( 4 * np . pi ) # If rec is magnetic switch sign (reciprocity MM/ME => EE/EM). if mrec : sign = - 1 else : sign = 1 # Group into PJ0 and PJ1 for J0/J1 Hankel Transform if ab in [ 11 , 12 , 21 , 22 , 14 , 24 , 15 , 25 ] : # Eqs 105, 106, 111, 112, # J2(kr) = 2/(kr)*J1(kr) - J0(kr) # 119, 120, 123, 124 if ab in [ 14 , 22 ] : sign *= - 1 PJ0b = sign / 2 * Ptot * lambd PJ1 = - sign * Ptot if ab in [ 11 , 22 , 24 , 15 ] : if ab in [ 22 , 24 ] : sign *= - 1 PJ0 = sign * ( PTM - PTE ) / ( 8 * np . pi ) * lambd elif ab in [ 13 , 23 , 31 , 32 , 34 , 35 , 16 , 26 ] : # Eqs 107, 113, 114, 115, PJ1 = sign * Ptot * lambd * lambd # . 121, 125, 126, 127 if ab in [ 34 , 26 ] : PJ1 *= - 1 elif ab in [ 33 , ] : # Eq 116 PJ0 = sign * Ptot * lambd * lambd * lambd # Return PJ0, PJ1, PJ0b return PJ0 , PJ1 , PJ0b
r Calculate wavenumber domain solution .
535
9
24,543
def reflections ( depth , e_zH , Gam , lrec , lsrc , use_ne_eval ) : # Loop over Rp, Rm for plus in [ True , False ] : # Switches depending if plus or minus if plus : pm = 1 layer_count = np . arange ( depth . size - 2 , min ( lrec , lsrc ) - 1 , - 1 ) izout = abs ( lsrc - lrec ) minmax = max ( lrec , lsrc ) else : pm = - 1 layer_count = np . arange ( 1 , max ( lrec , lsrc ) + 1 , 1 ) izout = 0 minmax = - min ( lrec , lsrc ) # If rec in last and rec below src (plus) or # if rec in first and rec above src (minus), shift izout shiftplus = lrec < lsrc and lrec == 0 and not plus shiftminus = lrec > lsrc and lrec == depth . size - 1 and plus if shiftplus or shiftminus : izout -= pm # Pre-allocate Ref Ref = np . zeros ( ( Gam . shape [ 0 ] , Gam . shape [ 1 ] , abs ( lsrc - lrec ) + 1 , Gam . shape [ 3 ] ) , dtype = complex ) # Calculate the reflection for iz in layer_count : # Eqs 65, A-12 e_zHa = e_zH [ : , None , iz + pm , None ] Gama = Gam [ : , : , iz , : ] e_zHb = e_zH [ : , None , iz , None ] Gamb = Gam [ : , : , iz + pm , : ] if use_ne_eval : rlocstr = "(e_zHa*Gama - e_zHb*Gamb)/(e_zHa*Gama + e_zHb*Gamb)" rloc = use_ne_eval ( rlocstr ) else : rloca = e_zHa * Gama rlocb = e_zHb * Gamb rloc = ( rloca - rlocb ) / ( rloca + rlocb ) # In first layer tRef = rloc if iz == layer_count [ 0 ] : tRef = rloc . copy ( ) else : ddepth = depth [ iz + 1 + pm ] - depth [ iz + pm ] # Eqs 64, A-11 if use_ne_eval : term = use_ne_eval ( "tRef*exp(-2*Gamb*ddepth)" ) tRef = use_ne_eval ( "(rloc + term)/(1 + rloc*term)" ) else : term = tRef * np . exp ( - 2 * Gamb * ddepth ) # NOQA tRef = ( rloc + term ) / ( 1 + rloc * term ) # The global reflection coefficient is given back for all layers # between and including src- and rec-layer if lrec != lsrc and pm * iz <= minmax : Ref [ : , : , izout , : ] = tRef [ : ] izout -= pm # If lsrc = lrec, we just store the last values if lsrc == lrec and layer_count . size > 0 : Ref = tRef # Store Ref in Rm/Rp if plus : Rm = Ref else : Rp = Ref # Return reflections (minus and plus) return Rm , Rp
r Calculate Rp Rm .
763
8
24,544
def angle_factor ( angle , ab , msrc , mrec ) : # 33/66 are completely symmetric and hence independent of angle if ab in [ 33 , ] : return np . ones ( angle . size ) # Evaluation angle eval_angle = angle . copy ( ) # Add pi if receiver is magnetic (reciprocity), but not if source is # electric, because then source and receiver are swapped, ME => EM: # G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z). if mrec and not msrc : eval_angle += np . pi # Define fct (cos/sin) and angles to be tested if ab in [ 11 , 22 , 15 , 24 , 13 , 31 , 26 , 35 ] : fct = np . cos test_ang_1 = np . pi / 2 test_ang_2 = 3 * np . pi / 2 else : fct = np . sin test_ang_1 = np . pi test_ang_2 = 2 * np . pi if ab in [ 11 , 22 , 15 , 24 , 12 , 21 , 14 , 25 ] : eval_angle *= 2 # Get factor factAng = fct ( eval_angle ) # Ensure cos([pi/2, 3pi/2]) and sin([pi, 2pi]) are zero (floating pt issue) factAng [ np . isclose ( np . abs ( eval_angle ) , test_ang_1 , 1e-10 , 1e-14 ) ] = 0 factAng [ np . isclose ( np . abs ( eval_angle ) , test_ang_2 , 1e-10 , 1e-14 ) ] = 0 return factAng
r Return the angle - dependent factor .
375
8
24,545
def versions ( mode = None , add_pckg = None , ncol = 4 ) : # Issue warning mesg = ( "\n Func `versions` is deprecated and will " + "be removed; use Class `Versions` instead." ) warnings . warn ( mesg , DeprecationWarning ) return Versions ( add_pckg , ncol )
r Old func - way of class Versions here for backwards compatibility .
78
14
24,546
def _repr_html_ ( self ) : # Check ncol ncol = int ( self . ncol ) # Define html-styles border = "border: 2px solid #fff;'" def colspan ( html , txt , ncol , nrow ) : r"""Print txt in a row spanning whole table.""" html += " <tr>\n" html += " <td style='text-align: center; " if nrow == 0 : html += "font-weight: bold; font-size: 1.2em; " elif nrow % 2 == 0 : html += "background-color: #ddd;" html += border + " colspan='" html += str ( 2 * ncol ) + "'>%s</td>\n" % txt html += " </tr>\n" return html def cols ( html , version , name , ncol , i ) : r"""Print package information in two cells.""" # Check if we have to start a new row if i > 0 and i % ncol == 0 : html += " </tr>\n" html += " <tr>\n" html += " <td style='text-align: right; background-color: " html += "#ccc; " + border + ">%s</td>\n" % version html += " <td style='text-align: left; " html += border + ">%s</td>\n" % name return html , i + 1 # Start html-table html = "<table style='border: 3px solid #ddd;'>\n" # Date and time info as title html = colspan ( html , time . strftime ( '%a %b %d %H:%M:%S %Y %Z' ) , ncol , 0 ) # OS and CPUs html += " <tr>\n" html , i = cols ( html , platform . system ( ) , 'OS' , ncol , 0 ) html , i = cols ( html , multiprocessing . cpu_count ( ) , 'CPU(s)' , ncol , i ) # Loop over packages for pckg in self . _get_packages ( self . add_pckg ) : html , i = cols ( html , pckg . __version__ , pckg . __name__ , ncol , i ) # Fill up the row while i % ncol != 0 : html += " <td style= " + border + "></td>\n" html += " <td style= " + border + "></td>\n" i += 1 # Finish row html += " </tr>\n" # sys.version html = colspan ( html , sys . version , ncol , 1 ) # mkl version if mklinfo : html = colspan ( html , mklinfo , ncol , 2 ) # Finish table html += "</table>" return html
HTML - rendered versions information .
638
6
24,547
def _get_packages ( add_pckg ) : # Mandatory packages pckgs = [ numpy , scipy , empymod ] # Optional packages for module in [ IPython , numexpr , matplotlib ] : if module : pckgs += [ module ] # Cast and add add_pckg if add_pckg is not None : # Cast add_pckg if isinstance ( add_pckg , tuple ) : add_pckg = list ( add_pckg ) if not isinstance ( add_pckg , list ) : add_pckg = [ add_pckg , ] # Add add_pckg pckgs += add_pckg return pckgs
r Create list of packages .
161
6
24,548
def tofile ( self , path = 'filters' ) : # Get name of filter name = self . savename # Get absolute path, create if it doesn't exist path = os . path . abspath ( path ) os . makedirs ( path , exist_ok = True ) # Save filter base basefile = os . path . join ( path , name + '_base.txt' ) with open ( basefile , 'w' ) as f : self . base . tofile ( f , sep = "\n" ) # Save filter coefficients for val in [ 'j0' , 'j1' , 'sin' , 'cos' ] : if hasattr ( self , val ) : attrfile = os . path . join ( path , name + '_' + val + '.txt' ) with open ( attrfile , 'w' ) as f : getattr ( self , val ) . tofile ( f , sep = "\n" )
r Save filter values to ascii - files .
207
11
24,549
def fromfile ( self , path = 'filters' ) : # Get name of filter name = self . savename # Get absolute path path = os . path . abspath ( path ) # Get filter base basefile = os . path . join ( path , name + '_base.txt' ) with open ( basefile , 'r' ) as f : self . base = np . fromfile ( f , sep = "\n" ) # Get filter coefficients for val in [ 'j0' , 'j1' , 'sin' , 'cos' ] : attrfile = os . path . join ( path , name + '_' + val + '.txt' ) if os . path . isfile ( attrfile ) : with open ( attrfile , 'r' ) as f : setattr ( self , val , np . fromfile ( f , sep = "\n" ) ) # Add factor self . factor = np . around ( np . average ( self . base [ 1 : ] / self . base [ : - 1 ] ) , 15 )
r Load filter values from ascii - files .
229
11
24,550
def fht ( zsrc , zrec , lsrc , lrec , off , factAng , depth , ab , etaH , etaV , zetaH , zetaV , xdirect , fhtarg , use_ne_eval , msrc , mrec ) : # 1. Get fhtargs fhtfilt = fhtarg [ 0 ] pts_per_dec = fhtarg [ 1 ] lambd = fhtarg [ 2 ] int_pts = fhtarg [ 3 ] # 2. Call the kernel PJ = kernel . wavenumber ( zsrc , zrec , lsrc , lrec , depth , etaH , etaV , zetaH , zetaV , lambd , ab , xdirect , msrc , mrec , use_ne_eval ) # 3. Carry out the dlf fEM = dlf ( PJ , lambd , off , fhtfilt , pts_per_dec , factAng = factAng , ab = ab , int_pts = int_pts ) return fEM , 1 , True
r Hankel Transform using the Digital Linear Filter method .
234
11
24,551
def hquad ( zsrc , zrec , lsrc , lrec , off , factAng , depth , ab , etaH , etaV , zetaH , zetaV , xdirect , quadargs , use_ne_eval , msrc , mrec ) : # Get quadargs rtol , atol , limit , a , b , pts_per_dec = quadargs # Get required lambdas la = np . log10 ( a ) lb = np . log10 ( b ) ilambd = np . logspace ( la , lb , ( lb - la ) * pts_per_dec + 1 ) # Call the kernel PJ0 , PJ1 , PJ0b = kernel . wavenumber ( zsrc , zrec , lsrc , lrec , depth , etaH , etaV , zetaH , zetaV , np . atleast_2d ( ilambd ) , ab , xdirect , msrc , mrec , use_ne_eval ) # Interpolation in wavenumber domain: Has to be done separately on each PJ, # in order to work with multiple offsets which have different angles. # We check if the kernels are zero, to avoid unnecessary calculations. if PJ0 is not None : sPJ0r = iuSpline ( np . log ( ilambd ) , PJ0 . real ) sPJ0i = iuSpline ( np . log ( ilambd ) , PJ0 . imag ) else : sPJ0r = None sPJ0i = None if PJ1 is not None : sPJ1r = iuSpline ( np . log ( ilambd ) , PJ1 . real ) sPJ1i = iuSpline ( np . log ( ilambd ) , PJ1 . imag ) else : sPJ1r = None sPJ1i = None if PJ0b is not None : sPJ0br = iuSpline ( np . log ( ilambd ) , PJ0b . real ) sPJ0bi = iuSpline ( np . log ( ilambd ) , PJ0b . imag ) else : sPJ0br = None sPJ0bi = None # Pre-allocate output array fEM = np . zeros ( off . size , dtype = complex ) conv = True # Input-dictionary for quad iinp = { 'a' : a , 'b' : b , 'epsabs' : atol , 'epsrel' : rtol , 'limit' : limit } # Loop over offsets for i in range ( off . size ) : fEM [ i ] , tc = quad ( sPJ0r , sPJ0i , sPJ1r , sPJ1i , sPJ0br , sPJ0bi , ab , off [ i ] , factAng [ i ] , iinp ) conv *= tc # Return the electromagnetic field # Second argument (1) is the kernel count, last argument is only for QWE. return fEM , 1 , conv
r Hankel Transform using the QUADPACK library .
671
12
24,552
def ffht ( fEM , time , freq , ftarg ) : # Get ffhtargs ffhtfilt = ftarg [ 0 ] pts_per_dec = ftarg [ 1 ] kind = ftarg [ 2 ] # Sine (`sin`) or cosine (`cos`) # Cast into Standard DLF format if pts_per_dec == 0 : fEM = fEM . reshape ( time . size , - 1 ) # Carry out DLF tEM = dlf ( fEM , 2 * np . pi * freq , time , ffhtfilt , pts_per_dec , kind = kind ) # Return the electromagnetic time domain field # (Second argument is only for QWE) return tEM , True
r Fourier Transform using the Digital Linear Filter method .
159
11
24,553
def fft ( fEM , time , freq , ftarg ) : # Get ftarg values dfreq , nfreq , ntot , pts_per_dec = ftarg # If pts_per_dec, we have first to interpolate fEM to required freqs if pts_per_dec : sfEMr = iuSpline ( np . log ( freq ) , fEM . real ) sfEMi = iuSpline ( np . log ( freq ) , fEM . imag ) freq = np . arange ( 1 , nfreq + 1 ) * dfreq fEM = sfEMr ( np . log ( freq ) ) + 1j * sfEMi ( np . log ( freq ) ) # Pad the frequency result fEM = np . pad ( fEM , ( 0 , ntot - nfreq ) , 'linear_ramp' ) # Carry out FFT ifftEM = fftpack . ifft ( np . r_ [ fEM [ 1 : ] , 0 , fEM [ : : - 1 ] . conj ( ) ] ) . real stEM = 2 * ntot * fftpack . fftshift ( ifftEM * dfreq , 0 ) # Interpolate in time domain dt = 1 / ( 2 * ntot * dfreq ) ifEM = iuSpline ( np . linspace ( - ntot , ntot - 1 , 2 * ntot ) * dt , stEM ) tEM = ifEM ( time ) / 2 * np . pi # (Multiplication of 2/pi in model.tem) # Return the electromagnetic time domain field # (Second argument is only for QWE) return tEM , True
r Fourier Transform using the Fast Fourier Transform .
385
11
24,554
def quad ( sPJ0r , sPJ0i , sPJ1r , sPJ1i , sPJ0br , sPJ0bi , ab , off , factAng , iinp ) : # Define the quadrature kernels def quad_PJ0 ( klambd , sPJ0 , koff ) : r"""Quadrature for PJ0.""" return sPJ0 ( np . log ( klambd ) ) * special . j0 ( koff * klambd ) def quad_PJ1 ( klambd , sPJ1 , ab , koff , kang ) : r"""Quadrature for PJ1.""" tP1 = kang * sPJ1 ( np . log ( klambd ) ) if ab in [ 11 , 12 , 21 , 22 , 14 , 24 , 15 , 25 ] : # Because of J2 # J2(kr) = 2/(kr)*J1(kr) - J0(kr) tP1 /= koff return tP1 * special . j1 ( koff * klambd ) def quad_PJ0b ( klambd , sPJ0b , koff , kang ) : r"""Quadrature for PJ0b.""" return kang * sPJ0b ( np . log ( klambd ) ) * special . j0 ( koff * klambd ) # Pre-allocate output conv = True out = np . array ( 0.0 + 0.0j ) # Carry out quadrature for required kernels iinp [ 'full_output' ] = 1 if sPJ0r is not None : re = integrate . quad ( quad_PJ0 , args = ( sPJ0r , off ) , * * iinp ) im = integrate . quad ( quad_PJ0 , args = ( sPJ0i , off ) , * * iinp ) out += re [ 0 ] + 1j * im [ 0 ] # If there is a fourth output from QUAD, it means it did not converge if ( len ( re ) or len ( im ) ) > 3 : conv = False if sPJ1r is not None : re = integrate . quad ( quad_PJ1 , args = ( sPJ1r , ab , off , factAng ) , * * iinp ) im = integrate . quad ( quad_PJ1 , args = ( sPJ1i , ab , off , factAng ) , * * iinp ) out += re [ 0 ] + 1j * im [ 0 ] # If there is a fourth output from QUAD, it means it did not converge if ( len ( re ) or len ( im ) ) > 3 : conv = False if sPJ0br is not None : re = integrate . quad ( quad_PJ0b , args = ( sPJ0br , off , factAng ) , * * iinp ) im = integrate . quad ( quad_PJ0b , args = ( sPJ0bi , off , factAng ) , * * iinp ) out += re [ 0 ] + 1j * im [ 0 ] # If there is a fourth output from QUAD, it means it did not converge if ( len ( re ) or len ( im ) ) > 3 : conv = False # Collect the results return out , conv
r Quadrature for Hankel transform .
741
9
24,555
def get_spline_values ( filt , inp , nr_per_dec = None ) : # Standard DLF if nr_per_dec == 0 : return filt . base / inp [ : , None ] , inp # Get min and max required out-values (depends on filter and inp-value) outmax = filt . base [ - 1 ] / inp . min ( ) outmin = filt . base [ 0 ] / inp . max ( ) # Get pts_per_dec and define number of out-values, depending on pts_per_dec if nr_per_dec < 0 : # Lagged Convolution DLF pts_per_dec = 1 / np . log ( filt . factor ) # Calculate number of output values nout = int ( np . ceil ( np . log ( outmax / outmin ) * pts_per_dec ) + 1 ) else : # Splined DLF pts_per_dec = nr_per_dec # Calculate number of output values nout = int ( np . ceil ( np . log10 ( outmax / outmin ) * pts_per_dec ) + 1 ) # Min-nout check, becaus the cubic InterpolatedUnivariateSpline needs at # least 4 points. if nr_per_dec < 0 : # Lagged Convolution DLF # Lagged Convolution DLF interpolates in output domain, so `new_inp` # needs to have at least 4 points. if nout - filt . base . size < 3 : nout = filt . base . size + 3 else : # Splined DLF # Splined DLF interpolates in input domain, so `out` needs to have at # least 4 points. This should always be the case, we're just overly # cautious here. if nout < 4 : nout = 4 if nr_per_dec < 0 : # Calculate output values out = np . exp ( np . arange ( np . log ( outmin ) , np . log ( outmin ) + nout / pts_per_dec , 1 / pts_per_dec ) ) # If lagged convolution is used, we calculate the new input values, as # spline is carried out in the input domain. new_inp = inp . max ( ) * np . exp ( - np . arange ( nout - filt . base . size + 1 ) / pts_per_dec ) else : # Calculate output values out = 10 ** np . arange ( np . log10 ( outmin ) , np . log10 ( outmin ) + nout / pts_per_dec , 1 / pts_per_dec ) # If spline is used, interpolation is carried out in output domain and # we calculate the intermediate values. new_inp = filt . base / inp [ : , None ] # Return output values return np . atleast_2d ( out ) , new_inp
r Return required calculation points .
651
6
24,556
def fhti ( rmin , rmax , n , q , mu ) : # Central point log10(r_c) of periodic interval logrc = ( rmin + rmax ) / 2 # Central index (1/2 integral if n is even) nc = ( n + 1 ) / 2. # Log spacing of points dlogr = ( rmax - rmin ) / n dlnr = dlogr * np . log ( 10. ) # Get low-ringing kr y = 1j * np . pi / ( 2.0 * dlnr ) zp = special . loggamma ( ( mu + 1.0 + q ) / 2.0 + y ) zm = special . loggamma ( ( mu + 1.0 - q ) / 2.0 + y ) arg = np . log ( 2.0 ) / dlnr + ( zp . imag + zm . imag ) / np . pi kr = np . exp ( ( arg - np . round ( arg ) ) * dlnr ) # Calculate required input x-values (freq); angular freq -> freq freq = 10 ** ( logrc + ( np . arange ( 1 , n + 1 ) - nc ) * dlogr ) / ( 2 * np . pi ) # Calculate tcalc with adjusted kr logkc = np . log10 ( kr ) - logrc tcalc = 10 ** ( logkc + ( np . arange ( 1 , n + 1 ) - nc ) * dlogr ) # rk = r_c/k_r; adjust for Fourier transform scaling rk = 10 ** ( logrc - logkc ) * np . pi / 2 return freq , tcalc , dlnr , kr , rk
r Return parameters required for FFTLog .
395
9
24,557
def _actual_get_cpu_info_from_cpuid ( queue ) : # Pipe all output to nothing sys . stdout = open ( os . devnull , 'w' ) sys . stderr = open ( os . devnull , 'w' ) # Get the CPU arch and bits arch , bits = _parse_arch ( DataSource . arch_string_raw ) # Return none if this is not an X86 CPU if not arch in [ 'X86_32' , 'X86_64' ] : queue . put ( _obj_to_b64 ( { } ) ) return # Return none if SE Linux is in enforcing mode cpuid = CPUID ( ) if cpuid . is_selinux_enforcing : queue . put ( _obj_to_b64 ( { } ) ) return # Get the cpu info from the CPUID register max_extension_support = cpuid . get_max_extension_support ( ) cache_info = cpuid . get_cache ( max_extension_support ) info = cpuid . get_info ( ) processor_brand = cpuid . get_processor_brand ( max_extension_support ) # Get the Hz and scale hz_actual = cpuid . get_raw_hz ( ) hz_actual = _to_decimal_string ( hz_actual ) # Get the Hz and scale hz_advertised , scale = _parse_cpu_brand_string ( processor_brand ) info = { 'vendor_id_raw' : cpuid . get_vendor_id ( ) , 'hardware_raw' : '' , 'brand_raw' : processor_brand , 'hz_advertised_friendly' : _hz_short_to_friendly ( hz_advertised , scale ) , 'hz_actual_friendly' : _hz_short_to_friendly ( hz_actual , 0 ) , 'hz_advertised' : _hz_short_to_full ( hz_advertised , scale ) , 'hz_actual' : _hz_short_to_full ( hz_actual , 0 ) , 'l2_cache_size' : _to_friendly_bytes ( cache_info [ 'size_kb' ] ) , 'l2_cache_line_size' : cache_info [ 'line_size_b' ] , 'l2_cache_associativity' : hex ( cache_info [ 'associativity' ] ) , 'stepping' : info [ 'stepping' ] , 'model' : info [ 'model' ] , 'family' : info [ 'family' ] , 'processor_type' : info [ 'processor_type' ] , 'extended_model' : info [ 'extended_model' ] , 'extended_family' : info [ 'extended_family' ] , 'flags' : cpuid . get_flags ( max_extension_support ) } info = { k : v for k , v in info . items ( ) if v } queue . put ( _obj_to_b64 ( info ) )
Warning! This function has the potential to crash the Python runtime . Do not call it directly . Use the _get_cpu_info_from_cpuid function instead . It will safely call this function in another process .
678
45
24,558
def get_cpu_info_json ( ) : import json output = None # If running under pyinstaller, run normally if getattr ( sys , 'frozen' , False ) : info = _get_cpu_info_internal ( ) output = json . dumps ( info ) output = "{0}" . format ( output ) # if not running under pyinstaller, run in another process. # This is done because multiprocesing has a design flaw that # causes non main programs to run multiple times on Windows. else : from subprocess import Popen , PIPE command = [ sys . executable , __file__ , '--json' ] p1 = Popen ( command , stdout = PIPE , stderr = PIPE , stdin = PIPE ) output = p1 . communicate ( ) [ 0 ] if p1 . returncode != 0 : return "{}" if not IS_PY2 : output = output . decode ( encoding = 'UTF-8' ) return output
Returns the CPU info by using the best sources of information for your OS . Returns the result in a json string
215
22
24,559
def get_cpu_info ( ) : import json output = get_cpu_info_json ( ) # Convert JSON to Python with non unicode strings output = json . loads ( output , object_hook = _utf_to_str ) return output
Returns the CPU info by using the best sources of information for your OS . Returns the result in a dict
53
21
24,560
def _verbs_with_subjects ( doc ) : # TODO: UNUSED verb_subj = [ ] for possible_subject in doc : if ( possible_subject . dep_ == 'nsubj' and possible_subject . head . pos_ == 'VERB' ) : verb_subj . append ( [ possible_subject . head , possible_subject ] ) return verb_subj
Given a spacy document return the verbs that have subjects
87
11
24,561
def mangle_agreement ( correct_sentence ) : # # Examples # # Back in the 1800s, people were much shorter and much stronger. # This sentence begins with the introductory phrase, 'back in the 1800s' # which means that it should have the past tense verb. Any other verb would # be incorrect. # # # Jack and jill went up the hill. # This sentence is different; 'go' would also be correct. If it began with # 'Yesterday', a single-word introductory phrase requiring no comma, only # 'went' would be acceptable. # # # The man in the checkered shirt danced his warrior dance to show that # he was the most dominant male in the room. # This sentence has multiple verbs. If the sentence ended at the word dance, # changing 'danced' to 'dances' would be acceptable, but since the sentence # continues we cannot make this change -- 'was' agrees with 'danced' but not # with 'dances'. This is a shifty tense error, a classic subject verb # agreement error. # # # Our Method # # Right now, we will assume that any change in verb form of a single verb in # a sentence is incorrect. As demonstrated above, this is not always true. # We hope that since any model created off of this data will use a # confidence interval to determine likelihood of a subject-verb agreement # error, that some number can be found for which the model excels. # # It would also be possible to use a rule based learner to evaluate single # verb sentences, and only evaluating more complex sentences with the # tensorflow model. bad_sents = [ ] doc = nlp ( correct_sentence ) verbs = [ ( i , v ) for ( i , v ) in enumerate ( doc ) if v . tag_ . startswith ( 'VB' ) ] for i , v in verbs : for alt_verb in lexeme ( doc [ i ] . text ) : if alt_verb == doc [ i ] . text : continue # Same as the original, skip it if ( tenses ( alt_verb ) == tenses ( v . text ) or ( alt_verb . startswith ( v . text ) and alt_verb . endswith ( "n't" ) ) ) : continue # Negated version of the original, skip it new_sent = str ( doc [ : i ] ) + " {} " . format ( alt_verb ) + str ( doc [ i + 1 : ] ) new_sent = new_sent . replace ( ' ,' , ',' ) # fix space before comma bad_sents . append ( new_sent ) return bad_sents
Given a correct sentence return a sentence or sentences with a subject verb agreement error
571
15
24,562
def _build_trigram_indices ( trigram_index ) : result = { } trigram_count = 0 for key , val in csv . reader ( open ( trigram_index ) ) : result [ key ] = int ( val ) trigram_count += 1 return result , trigram_count
Build a dictionary of trigrams and their indices from a csv
68
13
24,563
def _begins_with_one_of ( sentence , parts_of_speech ) : doc = nlp ( sentence ) if doc [ 0 ] . tag_ in parts_of_speech : return True return False
Return True if the sentence or fragment begins with one of the parts of speech in the list else False
46
20
24,564
def get_language_tool_feedback ( sentence ) : payload = { 'language' : 'en-US' , 'text' : sentence } try : r = requests . post ( LT_SERVER , data = payload ) except requests . exceptions . ConnectionError as e : raise requests . exceptions . ConnectionError ( '''The languagetool server is not running. Try starting it with "ltserver" ''' ) if r . status_code >= 200 and r . status_code < 300 : return r . json ( ) . get ( 'matches' , [ ] ) return [ ]
Get matches from languagetool
128
7
24,565
def is_participle_clause_fragment ( sentence ) : # short circuit if sentence or fragment doesn't start with a participle # past participles can sometimes look like adjectives -- ie, Tired if not _begins_with_one_of ( sentence , [ 'VBG' , 'VBN' , 'JJ' ] ) : return 0.0 if _begins_with_one_of ( sentence , [ 'JJ' ] ) : doc = nlp ( sentence ) fw = [ w for w in doc ] [ 0 ] # Beautiful toy birds if fw . dep_ == 'amod' : return 0.0 # short circuit if sentence starts with a gerund and the gerund is the # subject. if _begins_with_one_of ( sentence , [ 'VBG' ] ) : doc = nlp ( sentence ) fw = [ w for w in doc ] [ 0 ] # Running is fun if fw . dep_ . endswith ( 'subj' ) : return 0.0 fc = [ c for c in doc . noun_chunks ] # Dancing boys can never sing if str ( fw ) in str ( fc ) : return 0.0 positive_prob = models [ 'participle' ] . predict ( [ _text_to_vector ( sentence , trigram2idx [ 'participle' ] , trigram_count [ 'participle' ] ) ] ) [ 0 ] [ 1 ] return float ( positive_prob )
Supply a sentence or fragment and recieve a confidence interval
332
12
24,566
def check ( sentence ) : # How we decide what to put as the human readable feedback # # Our order of prefence is, # # 1. Spelling errors. # - A spelling error can change the sentence meaning # 2. Subject-verb agreement errors # 3. Subordinate conjunction starting a sentence # 4. Participle phrase fragment # 5. Other errors result = Feedback ( ) is_missing_verb = detect_missing_verb ( sentence ) is_infinitive = detect_infinitive_phrase ( sentence ) is_participle = is_participle_clause_fragment ( sentence ) lang_tool_feedback = get_language_tool_feedback ( sentence ) subject_and_verb_agree = get_subject_verb_agreement_feedback ( sentence ) #### if is_missing_verb : # Lowest priority result . matches [ 'missing_verb' ] = True result . human_readable = MISSING_VERB_ADVICE . replace ( '\n' , '' ) result . primary_error = 'MISSING_VERB_ERROR' result . specific_error = 'MISSING_VERB' if is_participle > .5 : result . matches [ 'participle_phrase' ] = is_participle result . human_readable = PARTICIPLE_FRAGMENT_ADVICE . replace ( '\n' , '' ) result . primary_error = 'FRAGMENT_ERROR' result . specific_error = 'PARTICIPLE_PHRASE' if lang_tool_feedback : result . matches [ 'lang_tool' ] = lang_tool_feedback for ltf in lang_tool_feedback : if ltf [ 'rule' ] [ 'id' ] == 'SENTENCE_FRAGMENT' : result . human_readable = lang_tool_feedback [ 0 ] [ 'message' ] result . primary_error = 'FRAGMENT_ERROR' result . specific_error = 'SUBORDINATE_CLAUSE' if is_infinitive : result . matches [ 'infinitive_phrase' ] = True result . human_readable = INFINITIVE_PHRASE_ADVICE . replace ( '\n' , '' ) result . primary_error = 'INFINITIVE_PHRASE_ERROR' result . specific_error = 'INFINITIVE_PHRASE' if not subject_and_verb_agree : result . matches [ 'subject_verb_agreement' ] = subject_and_verb_agree result . human_readable = SUBJECT_VERB_AGREEMENT_ADVICE . replace ( '\n' , '' ) result . primary_error = 'SUBJECT_VERB_AGREEMENT_ERROR' result . specific_error = 'SUBJECT_VERB_AGREEMENT' if lang_tool_feedback : # Highest priority (spelling, other lang tool errors) result . matches [ 'lang_tool' ] = lang_tool_feedback for ltf in lang_tool_feedback : if ltf [ 'rule' ] [ 'id' ] == 'MORFOLOGIK_RULE_EN_US' : result . human_readable = ltf [ 'message' ] result . primary_error = 'SPELLING_ERROR' result . specific_error = 'SPELLING_ERROR' if not result . primary_error : result . human_readable = ltf [ 'message' ] result . primary_error = 'OTHER_ERROR' result . specific_error = ltf [ 'rule' ] [ 'id' ] #### if not result . matches : result . human_readable = STRONG_SENTENCE_ADVICE . replace ( '\n' , '' ) return result
Supply a sentence or fragment and recieve feedback
823
10
24,567
def list_submissions ( ) : submissions = [ ] try : submissions = session . query ( Submission ) . all ( ) except SQLAlchemyError as e : session . rollback ( ) return render_template ( 'list_submissions.html' , submissions = submissions )
List the past submissions with information about them
58
8
24,568
def get_submissions ( ) : print ( request . args . to_dict ( ) ) print ( request . args . get ( 'search[value]' ) ) print ( request . args . get ( 'draw' , 1 ) ) # submissions = session.query(Submission).all() if request . args . get ( 'correct_filter' , 'all' ) == 'all' : correct_filter = [ True , False ] elif request . args [ 'correct_filter' ] == 'correct' : correct_filter = [ True ] else : correct_filter = [ False ] if request . args . get ( 'order[0][column]' , '0' ) == '0' : column = 'id' elif request . args [ 'order[0][column]' ] == '1' : column = 'text' else : column = 'primary_error' order_str = "{} {}" . format ( column , request . args . get ( 'order[0][dir]' , 'desc' ) ) search_val = request . args . get ( 'search[value]' ) draw = request . args . get ( 'draw' , 1 ) filtered_len = session . query ( Submission ) . filter ( Submission . text . startswith ( search_val ) ) . filter ( Submission . correct . in_ ( correct_filter ) ) . count ( ) subs = session . query ( Submission ) . filter ( Submission . text . startswith ( search_val ) ) . filter ( Submission . correct . in_ ( correct_filter ) ) . order_by ( order_str ) . offset ( request . args . get ( 'start' , 0 ) ) . limit ( request . args . get ( 'length' , 10 ) ) . all ( ) submissions = { 'draw' : draw , 'recordsTotal' : 0 , 'recordsFiltered' : 0 , 'data' : [ ] } i = 0 for i , submission in enumerate ( subs ) : submissions [ 'data' ] . append ( [ submission . id , submission . text , submission . primary_error , submission . correct ] ) submissions [ 'recordsTotal' ] = session . query ( Submission ) . count ( ) submissions [ 'recordsFiltered' ] = filtered_len return jsonify ( submissions )
API endpoint to get submissions in JSON format
493
8
24,569
def check_sentence ( ) : text = '' if request . method == 'POST' : text = request . form [ 'text' ] if not text : error = 'No input' flash_message = error else : fb = check ( request . form [ 'text' ] ) correct = False if request . form . get ( 'is_correct' ) and not fb . primary_error : correct = True elif not request . form . get ( 'is_correct' ) and fb . primary_error : correct = True sub = Submission ( text = text , correct = correct , primary_error = fb . primary_error , specific_error = fb . specific_error ) session . add ( sub ) session . commit ( ) # TODO: remove the hack below if not fb . primary_error : fb . human_readable = "No errors were found." flash_message = fb . human_readable flash ( flash_message ) return render_template ( 'check_sentence.html' , text = text )
Sole porcupine endpoint
224
6
24,570
def raise_double_modal_error ( verb_phrase_doc ) : prev_word = None for word in verb_phrase : if word . tag_ == 'MD' and prev_word . tag == 'MD' : raise ( 'DoubleModalError' ) prev_word = word
A modal auxilary verb should not follow another modal auxilary verb
63
17
24,571
def raise_modal_error ( verb_phrase_doc ) : verb_phrase = verb_phrase_doc . text . lower ( ) bad_strings = [ 'should had' , 'should has' , 'could had' , 'could has' , 'would ' 'had' , 'would has' ] [ "should" , "could" , "would" ] for bs in bad_strings : if bs in verb_phrase : raise ( 'ShouldCouldWouldError' )
Given a verb phrase raise an error if the modal auxilary has an issue with it
105
19
24,572
def split_infinitive_warning ( sentence_str ) : sent_doc = textacy . Doc ( sentence_str , lang = 'en_core_web_lg' ) inf_pattern = r'<PART><ADV><VERB>' # To aux/auxpass* csubj infinitives = textacy . extract . pos_regex_matches ( sent_doc , inf_pattern ) for inf in infinitives : if inf [ 0 ] . text . lower ( ) != 'to' : continue if inf [ - 1 ] . tag_ != 'VB' : continue return 'SplitInfinitiveWarning'
Return a warning for a split infinitive else None
138
11
24,573
def raise_infinitive_error ( sentence_str ) : sent_doc = textacy . Doc ( sentence_str , lang = 'en_core_web_lg' ) inf_pattern = r'<PART|ADP><VERB>' # To aux/auxpass* csubj infinitives = textacy . extract . pos_regex_matches ( sent_doc , inf_pattern ) for inf in infinitives : if inf [ 0 ] . text . lower ( ) != 'to' : continue if inf [ - 1 ] . tag_ != 'VB' : raise Exception ( 'InfinitivePhraseError' )
Given a string check that all infinitives are properly formatted
141
12
24,574
def drop_modifiers ( sentence_str ) : tdoc = textacy . Doc ( sentence_str , lang = 'en_core_web_lg' ) new_sent = tdoc . text unusual_char = '形' for tag in tdoc : if tag . dep_ . endswith ( 'mod' ) : # Replace the tag new_sent = new_sent [ : tag . idx ] + unusual_char * len ( tag . text ) + new_sent [ tag . idx + len ( tag . text ) : ] new_sent = new_sent . replace ( unusual_char , '' ) new_sent = textacy . preprocess . normalize_whitespace ( new_sent ) return new_sent
Given a string drop the modifiers and return a string without them
161
12
24,575
def cluster ( list_of_texts , num_clusters = 3 ) : pipeline = Pipeline ( [ ( "vect" , CountVectorizer ( ) ) , ( "tfidf" , TfidfTransformer ( ) ) , ( "clust" , KMeans ( n_clusters = num_clusters ) ) ] ) try : clusters = pipeline . fit_predict ( list_of_texts ) except ValueError : clusters = list ( range ( len ( list_of_texts ) ) ) return clusters
Cluster a list of texts into a predefined number of clusters .
117
14
24,576
def find_topics ( token_lists , num_topics = 10 ) : dictionary = Dictionary ( token_lists ) print ( 'Number of unique words in original documents:' , len ( dictionary ) ) dictionary . filter_extremes ( no_below = 2 , no_above = 0.7 ) print ( 'Number of unique words after removing rare and common words:' , len ( dictionary ) ) corpus = [ dictionary . doc2bow ( tokens ) for tokens in token_lists ] model = LdaModel ( corpus = corpus , id2word = dictionary , num_topics = num_topics , chunksize = 100 , passes = 5 , random_state = 1 ) print_topics ( model ) return model , dictionary
Find the topics in a list of texts with Latent Dirichlet Allocation .
155
17
24,577
def fetch_bookshelf ( start_url , output_dir ) : # make output directory try : os . mkdir ( OUTPUT_DIR + output_dir ) except OSError as e : raise ( e ) # fetch page r = requests . get ( start_url ) # extract links soup = bs ( r . text , 'html.parser' ) book_links = soup . find_all ( class_ = re . compile ( "extiw" ) ) new_links = [ ] for el in book_links : link = el [ 'href' ] title = el . text bookid = link . split ( '/' ) [ - 1 ] if bookid . isdigit ( ) : new_link = NEW_LINK_BASE . format ( bookid , bookid ) new_links . append ( [ title , new_link ] ) # save links as books for link_tup in new_links : time . sleep ( .10 ) # be nice to project gutenberg r1 = requests . get ( link_tup [ 1 ] ) new_filename = link_tup [ 0 ] . lower ( ) . replace ( ' ' , '-' ) . replace ( '\n' , '-' ) new_new_filename = '' for char in new_filename : if char in 'abcdefghijklmnopqrstuvwxyz-' : new_new_filename += char new_filename = new_new_filename [ : MAX_FILENAME_LEN ] + '.txt' with open ( OUTPUT_DIR + output_dir + '/' + new_filename , 'w+' ) as output_file : output_file . write ( r1 . text ) return None
Fetch all the books off of a gutenberg project bookshelf page
371
15
24,578
def lemmatize ( text , lowercase = True , remove_stopwords = True ) : doc = nlp ( text ) if lowercase and remove_stopwords : lemmas = [ t . lemma_ . lower ( ) for t in doc if not ( t . is_stop or t . orth_ . lower ( ) in STOPWORDS ) ] elif lowercase : lemmas = [ t . lemma_ . lower ( ) for t in doc ] elif remove_stopwords : lemmas = [ t . lemma_ for t in doc if not ( t . is_stop or t . orth_ . lower ( ) in STOPWORDS ) ] else : lemmas = [ t . lemma_ for t in doc ] return lemmas
Return the lemmas of the tokens in a text .
167
12
24,579
def inflate ( deflated_vector ) : dv = json . loads ( deflated_vector ) #result = np.zeros(dv['reductions']) # some claim vector length 5555, others #5530. this could have occurred doing remote computations? or something. # anyhow, we will use 5555. Let's just hard code it. Gosh darnit. result = np . zeros ( 5555 ) # some claim vector length 5555, others for n in dv [ 'indices' ] : result [ int ( n ) ] = dv [ 'indices' ] [ n ] #print("Inflated vector. Length", len(result)) return result
Given a defalated vector inflate it into a np array and return it
150
16
24,580
def text_to_vector ( sent_str ) : r = requests . get ( "{}/sva/vector" . format ( VECTORIZE_API ) , params = { 's' : sent_str } ) return inflate ( r . text )
Given a string get it s defalted vector inflate it then return the inflated vector
57
17
24,581
def detect_missing_verb ( sentence ) : # TODO: should this be relocated? doc = nlp ( sentence ) for w in doc : if w . tag_ . startswith ( 'VB' ) and w . dep_ == 'ROOT' : return False # looks like there is at least 1 main verb return True
Return True if the sentence appears to be missing a main verb
70
12
24,582
def detect_infinitive_phrase ( sentence ) : # eliminate sentences without to if not 'to' in sentence . lower ( ) : return False doc = nlp ( sentence ) prev_word = None for w in doc : # if statement will execute exactly once if prev_word == 'to' : if w . dep_ == 'ROOT' and w . tag_ . startswith ( 'VB' ) : return True # this is quite likely to be an infinitive phrase else : return False prev_word = w . text . lower ( )
Given a string return true if it is an infinitive phrase fragment
118
14
24,583
def perform_srl ( responses , prompt ) : predictor = Predictor . from_path ( "https://s3-us-west-2.amazonaws.com/allennlp/models/srl-model-2018.05.25.tar.gz" ) sentences = [ { "sentence" : prompt + " " + response } for response in responses ] output = predictor . predict_batch_json ( sentences ) full_output = [ { "sentence" : prompt + response , "response" : response , "srl" : srl } for ( response , srl ) in zip ( responses , output ) ] return full_output
Perform semantic role labeling on a list of responses given a prompt .
140
14
24,584
def detokenize ( s ) : print ( s ) s = re . sub ( "\s+([;:,\.\?!])" , "\\1" , s ) s = re . sub ( "\s+(n't)" , "\\1" , s ) return s
Detokenize a string by removing spaces before punctuation .
59
12
24,585
def start ( self ) : if self . threadPool : self . threadPool . addTask ( self ) # Lets other threads have a chance to run time . sleep ( 0 ) else : raise TaskError ( "start(): nothing to start for task %s" % self )
This method starts a task executing and returns immediately . Subclass should override this method if it has an asynchronous way to start the task and return immediately .
57
30
24,586
def init_and_start ( self , taskParent , override = { } ) : tag = self . initialize ( taskParent , override = override ) self . start ( ) return tag
Convenience method to initialize and start a task .
38
11
24,587
def wait ( self , timeout = None ) : self . ev_done . wait ( timeout = timeout ) if not self . ev_done . is_set ( ) : raise TaskTimeout ( "Task %s timed out." % self ) # --> self.result is set # If it is an exception, then raise it in this waiter if isinstance ( self . result , Exception ) : raise self . result # Release waiters and perform callbacks # done() has already been called, because of self.ev_done check # "asynchronous" tasks should could call done() here #self.done(self.result) return self . result
This method waits for an executing task to finish . Subclass can override this method if necessary .
134
19
24,588
def done ( self , result , noraise = False ) : # [??] Should this be in a critical section? # Has done() already been called on this task? if self . ev_done . is_set ( ) : # ?? if isinstance ( self . result , Exception ) and ( not noraise ) : raise self . result return self . result # calculate running time and other finalization self . endtime = time . time ( ) try : self . totaltime = self . endtime - self . starttime except AttributeError : # task was not initialized properly self . totaltime = 0.0 self . result = result # Release thread waiters self . ev_done . set ( ) # Perform callbacks for event-style waiters self . make_callback ( 'resolved' , self . result ) # If the result is an exception, then our final act is to raise # it in the caller, unless the caller explicitly supressed that if isinstance ( result , Exception ) and ( not noraise ) : raise result return result
This method is called when a task has finished executing . Subclass can override this method if desired but should call superclass method at the end .
223
29
24,589
def runTask ( self , task , timeout = None ) : # Initialize the task. task . initialize ( self ) # Start the task. task . start ( ) # Lets other threads run time . sleep ( 0 ) # Wait for it to finish. res = task . wait ( timeout = timeout ) # Now we're done return res
Run a child task to completion . Returns the result of the child task .
70
15
24,590
def execute ( self ) : while self . index < len ( self . tasklist ) : res = self . step ( ) self . logger . debug ( 'SeqSet task %i has completed with result %s' % ( self . index , res ) ) # Returns result of last task to quit return res
Run all child tasks in order waiting for completion of each . Return the result of the final child task s execution .
65
23
24,591
def execute ( self ) : self . count = 0 self . taskset = [ ] self . results = { } self . totaltime = time . time ( ) # Register termination callbacks for all my child tasks. for task in list ( self . taskseq ) : self . taskset . append ( task ) task . add_callback ( 'resolved' , self . child_done , self . count ) self . count += 1 self . numtasks = self . count # Now start each child task. with self . regcond : for task in list ( self . taskset ) : task . initialize ( self ) task . start ( ) # Account for time needed to start subtasks self . totaltime = time . time ( ) - self . totaltime # Now give up the critical section and wait for last child # task to terminate. while self . count > 0 : self . regcond . wait ( ) # Scan results for errors (exceptions) and raise the first one we find for key in self . results . keys ( ) : value = self . results [ key ] if isinstance ( value , Exception ) : ( count , task ) = key self . logger . error ( "Child task %s terminated with exception: %s" % ( task . tag , str ( value ) ) ) raise value return 0
Run all child tasks concurrently in separate threads . Return 0 after all child tasks have completed execution .
276
19
24,592
def execute ( self ) : with self . _lock_c : self . count = 0 self . numtasks = 0 self . taskset = [ ] self . results = { } self . totaltime = time . time ( ) # Start all tasks for task in self . taskseq : self . taskset . append ( task ) self . numtasks += 1 task . init_and_start ( self ) num_tasks = self . getNumTasks ( ) # Wait on each task to clean up results while num_tasks > 0 : self . check_state ( ) for i in range ( num_tasks ) : try : try : task = self . getTask ( i ) except IndexError : # A task got deleted from the set. Jump back out # to outer loop and repoll the number of tasks break #self.logger.debug("waiting on %s" % task) res = task . wait ( timeout = self . idletime ) #self.logger.debug("finished: %s" % task) self . child_done ( res , task ) except TaskTimeout : continue except Exception as e : #self.logger.warning("Subtask propagated exception: %s" % str(e)) self . child_done ( e , task ) continue # wait a bit and try again #self.ev_quit.wait(self.idletime) # re-get number of tasks, in case some were added or deleted num_tasks = self . getNumTasks ( ) # Scan results for errors (exceptions) and raise the first one we find for key in self . results . keys ( ) : value = self . results [ key ] if isinstance ( value , Exception ) : ( count , task ) = key self . logger . error ( "Child task %s terminated with exception: %s" % ( task . tag , str ( value ) ) ) raise value # Return value of last child to complete return value
Run all child tasks concurrently in separate threads . Return last result after all child tasks have completed execution .
419
20
24,593
def execute ( self , task ) : taskid = str ( task ) res = None try : # Try to run the task. If we catch an exception, then # it becomes the result. self . time_start = time . time ( ) self . setstatus ( 'executing %s' % taskid ) self . logger . debug ( "now executing task '%s'" % taskid ) try : res = task . execute ( ) except UserTaskException as e : res = e except Exception as e : self . logger . error ( "Task '%s' raised exception: %s" % ( str ( task ) , str ( e ) ) ) res = e try : ( type , value , tb ) = sys . exc_info ( ) self . logger . debug ( "Traceback:\n%s" % "" . join ( traceback . format_tb ( tb ) ) ) # NOTE: to avoid creating a cycle that might cause # problems for GC--see Python library doc for sys # module tb = None except Exception as e : self . logger . debug ( "Traceback information unavailable." ) finally : self . logger . debug ( "done executing task '%s'" % str ( task ) ) self . setstatus ( 'cleaning %s' % taskid ) # Wake up waiters on other threads task . done ( res , noraise = True ) self . time_start = 0.0 self . setstatus ( 'idle' )
Execute a task .
316
5
24,594
def startall ( self , wait = False , * * kwdargs ) : self . logger . debug ( "startall called" ) with self . regcond : while self . status != 'down' : if self . status in ( 'start' , 'up' ) or self . ev_quit . is_set ( ) : # For now, abandon additional request to start self . logger . error ( "ignoring duplicate request to start thread pool" ) return self . logger . debug ( "waiting for threads: count=%d" % self . runningcount ) self . regcond . wait ( ) #assert(self.status == 'down') if self . ev_quit . is_set ( ) : return self . runningcount = 0 self . status = 'start' self . workers = [ ] if wait : tpool = self else : tpool = None # Start all worker threads self . logger . debug ( "starting threads in thread pool" ) for i in range ( self . numthreads ) : t = self . workerClass ( self . queue , logger = self . logger , ev_quit = self . ev_quit , tpool = tpool , * * kwdargs ) self . workers . append ( t ) t . start ( ) # if started with wait=True, then expect that threads will register # themselves and last one up will set status to "up" if wait : # Threads are on the way up. Wait until last one starts. while self . status != 'up' and not self . ev_quit . is_set ( ) : self . logger . debug ( "waiting for threads: count=%d" % self . runningcount ) self . regcond . wait ( ) else : # otherwise, we just assume the pool is up self . status = 'up' self . logger . debug ( "startall done" )
Start all of the threads in the thread pool . If _wait_ is True then don t return until all threads are up and running . Any extra keyword arguments are passed to the worker thread constructor .
397
40
24,595
def stopall ( self , wait = False ) : self . logger . debug ( "stopall called" ) with self . regcond : while self . status != 'up' : if self . status in ( 'stop' , 'down' ) or self . ev_quit . is_set ( ) : # For now, silently abandon additional request to stop self . logger . warning ( "ignoring duplicate request to stop thread pool." ) return self . logger . debug ( "waiting for threads: count=%d" % self . runningcount ) self . regcond . wait ( ) #assert(self.status == 'up') self . logger . debug ( "stopping threads in thread pool" ) self . status = 'stop' # Signal to all threads to terminate. self . ev_quit . set ( ) if wait : # Threads are on the way down. Wait until last one quits. while self . status != 'down' : self . logger . debug ( "waiting for threads: count=%d" % self . runningcount ) self . regcond . wait ( ) self . logger . debug ( "stopall done" )
Stop all threads in the worker pool . If _wait_ is True then don t return until all threads are down .
244
24
24,596
def wcs_pix_transform ( ct , i , format = 0 ) : z1 = float ( ct . z1 ) z2 = float ( ct . z2 ) i = float ( i ) yscale = 128.0 / ( z2 - z1 ) if ( format == 'T' or format == 't' ) : format = 1 if ( i == 0 ) : t = 0. else : if ( ct . zt == W_LINEAR ) : t = ( ( i - 1 ) * ( z2 - z1 ) / 199.0 ) + z1 t = max ( z1 , min ( z2 , t ) ) else : t = float ( i ) if ( format > 1 ) : t = ( z2 - t ) * yscale return ( t )
Computes the WCS corrected pixel value given a coordinate transformation and the raw pixel value .
175
17
24,597
def handle_request ( self ) : try : ( request , client_address ) = self . get_request ( ) except socket . error as e : # Error handling goes here. self . logger . error ( "error opening the connection: %s" % ( str ( e ) ) ) for exctn in sys . exc_info ( ) : print ( exctn ) return try : self . RequestHandlerClass ( request , client_address , self ) except Exception as e : # Error handling goes here. self . logger . error ( 'error handling the request: %s' % ( str ( e ) ) ) for exctn in sys . exc_info ( ) : print ( exctn ) return
Handles incoming connections one at the time .
151
9
24,598
def mainloop ( self ) : try : while ( not self . ev_quit . is_set ( ) ) : try : self . handle_request ( ) except socketTimeout : continue finally : self . socket . close ( )
main control loop .
48
4
24,599
def handle_feedback ( self , pkt ) : self . logger . debug ( "handle feedback" ) self . frame = self . decode_frameno ( pkt . z & 0o7777 ) - 1 # erase the frame buffer self . server . controller . init_frame ( self . frame ) self . server . controller . set_frame ( self . frame )
This part of the protocol is used by IRAF to erase a frame in the framebuffers .
79
20