idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
24,600
def update_workflow_workitems ( cr , pool , ref_spec_actions ) : workflow_workitems = pool [ 'workflow.workitem' ] ir_model_data_model = pool [ 'ir.model.data' ] for ( target_external_id , fallback_external_id ) in ref_spec_actions : target_activity = ir_model_data_model . get_object ( cr , SUPERUSER_ID , target_external_id . split ( "." ) [ 0 ] , target_external_id . split ( "." ) [ 1 ] , ) fallback_activity = ir_model_data_model . get_object ( cr , SUPERUSER_ID , fallback_external_id . split ( "." ) [ 0 ] , fallback_external_id . split ( "." ) [ 1 ] , ) ids = workflow_workitems . search ( cr , SUPERUSER_ID , [ ( 'act_id' , '=' , target_activity . id ) ] ) if ids : logger . info ( "Moving %d items in the removed workflow action (%s) to a " "fallback action (%s): %s" , len ( ids ) , target_activity . name , fallback_activity . name , ids ) workflow_workitems . write ( cr , SUPERUSER_ID , ids , { 'act_id' : fallback_activity . id } )
Find all the workflow items from the target state to set them to the wanted state .
24,601
def logged_query ( cr , query , args = None , skip_no_result = False ) : if args is None : args = ( ) args = tuple ( args ) if type ( args ) == list else args try : cr . execute ( query , args ) except ( ProgrammingError , IntegrityError ) : logger . error ( 'Error running %s' % cr . mogrify ( query , args ) ) raise if not skip_no_result or cr . rowcount : logger . debug ( 'Running %s' , query % args ) logger . debug ( '%s rows affected' , cr . rowcount ) return cr . rowcount
Logs query and affected rows at level DEBUG .
24,602
def update_module_names ( cr , namespec , merge_modules = False ) : for ( old_name , new_name ) in namespec : if merge_modules : query = "SELECT id FROM ir_module_module WHERE name = %s" cr . execute ( query , [ old_name ] ) row = cr . fetchone ( ) if row : old_id = row [ 0 ] query = "DELETE FROM ir_model_constraint WHERE module = %s" logged_query ( cr , query , [ old_id ] ) query = "DELETE FROM ir_model_relation WHERE module = %s" logged_query ( cr , query , [ old_id ] ) else : query = "UPDATE ir_module_module SET name = %s WHERE name = %s" logged_query ( cr , query , ( new_name , old_name ) ) query = ( "UPDATE ir_model_data SET name = %s " "WHERE name = %s AND module = 'base' AND " "model='ir.module.module' " ) logged_query ( cr , query , ( "module_%s" % new_name , "module_%s" % old_name ) ) query = ( "UPDATE ir_model_data SET module = %s " "WHERE module = %s AND name NOT IN " "(SELECT name FROM ir_model_data WHERE module = %s)" ) logged_query ( cr , query , ( new_name , old_name , new_name ) ) query = ( "UPDATE ir_model_data " "SET name = name || '_openupgrade_' || id, " "module = %s " "WHERE module = %s" ) logged_query ( cr , query , ( new_name , old_name ) ) query = ( "UPDATE ir_module_module_dependency SET name = %s " "WHERE name = %s" ) logged_query ( cr , query , ( new_name , old_name ) ) if version_info [ 0 ] > 7 : query = ( "UPDATE ir_translation SET module = %s " "WHERE module = %s" ) logged_query ( cr , query , ( new_name , old_name ) ) if merge_modules : logged_query ( cr , "UPDATE ir_module_module m1 SET state=m2.state " "FROM ir_module_module m2 WHERE m1.name=%s AND " "m2.name=%s AND m1.state='uninstalled'" , ( new_name , old_name ) , ) query = "DELETE FROM ir_module_module WHERE name = %s" logged_query ( cr , query , [ old_name ] ) logged_query ( cr , "DELETE FROM ir_model_data WHERE module = 'base' " "AND model='ir.module.module' AND name = %s" , ( 'module_%s' % old_name , ) , )
Deal with changed module names making all the needed changes on the related tables like XML - IDs translations and so on .
24,603
def add_ir_model_fields ( cr , columnspec ) : for column in columnspec : query = 'ALTER TABLE ir_model_fields ADD COLUMN %s %s' % ( column ) logged_query ( cr , query , [ ] )
Typically new columns on ir_model_fields need to be added in a very early stage in the upgrade process of the base module in raw sql as they need to be in place before any model gets initialized . Do not use for fields with additional SQL constraints such as a reference to another table or the cascade constraint but craft your own statement taking them into account .
24,604
def m2o_to_m2m ( cr , model , table , field , source_field ) : return m2o_to_x2m ( cr , model , table , field , source_field )
Recreate relations in many2many fields that were formerly many2one fields . Use rename_columns in your pre - migrate script to retain the column s old value then call m2o_to_m2m in your post - migrate script .
24,605
def message ( cr , module , table , column , message , * args , ** kwargs ) : argslist = list ( args or [ ] ) prefix = ': ' if column : argslist . insert ( 0 , column ) prefix = ', column %s' + prefix if table : argslist . insert ( 0 , table ) prefix = ', table %s' + prefix argslist . insert ( 0 , module ) prefix = 'Module %s' + prefix logger . warn ( prefix + message , * argslist , ** kwargs )
Log handler for non - critical notifications about the upgrade . To be extended with logging to a table for reporting purposes .
24,606
def reactivate_workflow_transitions ( cr , transition_conditions ) : for transition_id , condition in transition_conditions . iteritems ( ) : cr . execute ( 'update wkf_transition set condition = %s where id = %s' , ( condition , transition_id ) )
Reactivate workflow transition previously deactivated by deactivate_workflow_transitions .
24,607
def convert_field_to_html ( cr , table , field_name , html_field_name ) : if version_info [ 0 ] < 7 : logger . error ( "You cannot use this method in an OpenUpgrade version " "prior to 7.0." ) return cr . execute ( "SELECT id, %(field)s FROM %(table)s WHERE %(field)s IS NOT NULL" % { 'field' : field_name , 'table' : table , } ) for row in cr . fetchall ( ) : logged_query ( cr , "UPDATE %(table)s SET %(field)s = %%s WHERE id = %%s" % { 'field' : html_field_name , 'table' : table , } , ( plaintext2html ( row [ 1 ] ) , row [ 0 ] ) )
Convert field value to HTML value .
24,608
def lift_constraints ( cr , table , column ) : cr . execute ( 'select relname, array_agg(conname) from ' '(select t1.relname, c.conname ' 'from pg_constraint c ' 'join pg_attribute a ' 'on c.confrelid=a.attrelid and a.attnum=any(c.conkey) ' 'join pg_class t on t.oid=a.attrelid ' 'join pg_class t1 on t1.oid=c.conrelid ' 'where t.relname=%(table)s and attname=%(column)s ' 'union select t.relname, c.conname ' 'from pg_constraint c ' 'join pg_attribute a ' 'on c.conrelid=a.attrelid and a.attnum=any(c.conkey) ' 'join pg_class t on t.oid=a.attrelid ' 'where relname=%(table)s and attname=%(column)s) in_out ' 'group by relname' , { 'table' : table , 'column' : column , } ) for table , constraints in cr . fetchall ( ) : cr . execute ( 'alter table %s drop constraint %s' , ( AsIs ( table ) , AsIs ( ', drop constraint ' . join ( constraints ) ) ) )
Lift all constraints on column in table . Typically you use this in a pre - migrate script where you adapt references for many2one fields with changed target objects . If everything went right the constraints will be recreated
24,609
def savepoint ( cr ) : if hasattr ( cr , 'savepoint' ) : with cr . savepoint ( ) : yield else : name = uuid . uuid1 ( ) . hex cr . execute ( 'SAVEPOINT "%s"' % name ) try : yield cr . execute ( 'RELEASE SAVEPOINT "%s"' % name ) except : cr . execute ( 'ROLLBACK TO SAVEPOINT "%s"' % name )
return a context manager wrapping postgres savepoints
24,610
def rename_property ( cr , model , old_name , new_name ) : cr . execute ( "update ir_model_fields f set name=%s " "from ir_model m " "where m.id=f.model_id and m.model=%s and f.name=%s " "returning f.id" , ( new_name , model , old_name ) ) field_ids = tuple ( i for i , in cr . fetchall ( ) ) cr . execute ( "update ir_model_data set name=%s where model='ir.model.fields' and " "res_id in %s" , ( '%s,%s' % ( model , new_name ) , field_ids ) ) cr . execute ( "update ir_property set name=%s where fields_id in %s" , ( new_name , field_ids ) )
Rename property old_name owned by model to new_name . This should happen in a pre - migration script .
24,611
def delete_records_safely_by_xml_id ( env , xml_ids ) : for xml_id in xml_ids : logger . debug ( 'Deleting record for XML-ID %s' , xml_id ) try : with env . cr . savepoint ( ) : env . ref ( xml_id ) . exists ( ) . unlink ( ) except Exception as e : logger . error ( 'Error deleting XML-ID %s: %s' , xml_id , repr ( e ) )
This removes in the safest possible way the records whose XML - IDs are passed as argument .
24,612
def chunked ( records , single = True ) : if version_info [ 0 ] > 10 : invalidate = records . env . cache . invalidate elif version_info [ 0 ] > 7 : invalidate = records . env . invalidate_all else : raise Exception ( 'Not supported Odoo version for this method.' ) size = core . models . PREFETCH_MAX model = records . _name ids = records . with_context ( prefetch_fields = False ) . ids for i in range ( 0 , len ( ids ) , size ) : invalidate ( ) chunk = records . env [ model ] . browse ( ids [ i : i + size ] ) if single : for record in chunk : yield record continue yield chunk
Memory and performance friendly method to iterate over a potentially large number of records . Yields either a whole chunk or a single record at the time . Don t nest calls to this method .
24,613
def get_last_post_for_model ( cr , uid , ids , model_pool ) : if type ( ids ) is not list : ids = [ ids ] res = { } for obj in model_pool . browse ( cr , uid , ids ) : message_ids = obj . message_ids if message_ids : res [ obj . id ] = sorted ( message_ids , key = lambda x : x . date , reverse = True ) [ 0 ] . date else : res [ obj . id ] = False return res
Given a set of ids and a model pool return a dict of each object ids with their latest message date as a value . To be called in post - migration scripts
24,614
def set_message_last_post ( cr , uid , pool , models ) : if type ( models ) is not list : models = [ models ] for model in models : model_pool = pool [ model ] cr . execute ( "UPDATE {table} " "SET message_last_post=(SELECT max(mm.date) " "FROM mail_message mm " "WHERE mm.model=%s " "AND mm.date IS NOT NULL " "AND mm.res_id={table}.id)" . format ( table = model_pool . _table ) , ( model , ) )
Given a list of models set their message_last_post fields to an estimated last post datetime . To be called in post - migration scripts
24,615
def column_exists ( cr , table , column ) : cr . execute ( 'SELECT count(attname) FROM pg_attribute ' 'WHERE attrelid = ' '( SELECT oid FROM pg_class WHERE relname = %s ) ' 'AND attname = %s' , ( table , column ) ) return cr . fetchone ( ) [ 0 ] == 1
Check whether a certain column exists
24,616
def start_logging ( out = _stdout , level = 'info' ) : global _log_level , _loggers , _started_logging if level not in log_levels : raise RuntimeError ( "Invalid log level '{0}'; valid are: {1}" . format ( level , ', ' . join ( log_levels ) ) ) if _started_logging : return _started_logging = True _log_level = level handler = _TxaioFileHandler ( out ) logging . getLogger ( ) . addHandler ( handler ) logging . raiseExceptions = True level_to_stdlib = { 'critical' : logging . CRITICAL , 'error' : logging . ERROR , 'warn' : logging . WARNING , 'info' : logging . INFO , 'debug' : logging . DEBUG , 'trace' : logging . DEBUG , } logging . getLogger ( ) . setLevel ( level_to_stdlib [ level ] ) for logger in _loggers : logger . _set_log_level ( level )
Begin logging .
24,617
def create_failure ( self , exception = None ) : if exception : return FailedFuture ( type ( exception ) , exception , None ) return FailedFuture ( * sys . exc_info ( ) )
This returns an object implementing IFailedFuture .
24,618
def gather ( self , futures , consume_exceptions = True ) : return asyncio . gather ( * futures , return_exceptions = consume_exceptions )
This returns a Future that waits for all the Futures in the list futures
24,619
def _use_framework ( module ) : import txaio for method_name in __all__ : if method_name in [ 'use_twisted' , 'use_asyncio' ] : continue setattr ( txaio , method_name , getattr ( module , method_name ) )
Internal helper to set this modules methods to a specified framework helper - methods .
24,620
def start_logging ( out = _stdout , level = 'info' ) : global _loggers , _observer , _log_level , _started_logging if level not in log_levels : raise RuntimeError ( "Invalid log level '{0}'; valid are: {1}" . format ( level , ', ' . join ( log_levels ) ) ) if _started_logging : return _started_logging = True _log_level = level set_global_log_level ( _log_level ) if out : _observer = _LogObserver ( out ) if _NEW_LOGGER : _observers = [ ] if _observer : _observers . append ( _observer ) globalLogBeginner . beginLoggingTo ( _observers ) else : assert out , "out needs to be given a value if using Twisteds before 15.2" from twisted . python import log log . startLogging ( out )
Start logging to the file - like object in out . By default this is stdout .
24,621
def set_log_level ( self , level , keep = True ) : self . _set_log_level ( level ) self . _log_level_set_explicitly = keep
Set the log level . If keep is True then it will not change along with global log changes .
24,622
def sleep ( self , delay ) : d = Deferred ( ) self . _get_loop ( ) . callLater ( delay , d . callback , None ) return d
Inline sleep for use in co - routines .
24,623
def _notify_bucket ( self , real_time ) : ( delayed_call , calls ) = self . _buckets [ real_time ] del self . _buckets [ real_time ] errors = [ ] def notify_one_chunk ( calls , chunk_size , chunk_delay_ms ) : for call in calls [ : chunk_size ] : try : call ( ) except Exception as e : errors . append ( e ) calls = calls [ chunk_size : ] if calls : self . _create_delayed_call ( chunk_delay_ms / 1000.0 , notify_one_chunk , calls , chunk_size , chunk_delay_ms , ) else : if len ( errors ) : msg = u"Error(s) processing call_later bucket:\n" for e in errors : msg += u"{}\n" . format ( e ) raise RuntimeError ( msg ) delay_ms = self . _bucket_milliseconds / math . ceil ( float ( len ( calls ) ) / self . _chunk_size ) notify_one_chunk ( calls , self . _chunk_size , max ( 0.0 , delay_ms ) )
Internal helper . This does the callbacks in a particular bucket .
24,624
def check_ab ( ab , verb ) : r try : ab = int ( ab ) except VariableCatch : print ( '* ERROR :: <ab> must be an integer' ) raise pab = [ 11 , 12 , 13 , 14 , 15 , 16 , 21 , 22 , 23 , 24 , 25 , 26 , 31 , 32 , 33 , 34 , 35 , 36 , 41 , 42 , 43 , 44 , 45 , 46 , 51 , 52 , 53 , 54 , 55 , 56 , 61 , 62 , 63 , 64 , 65 , 66 ] if ab not in pab : print ( '* ERROR :: <ab> must be one of: ' + str ( pab ) + ';' + ' <ab> provided: ' + str ( ab ) ) raise ValueError ( 'ab' ) if verb > 2 : print ( " Input ab : " , ab ) msrc = ab % 10 > 3 mrec = ab // 10 > 3 if mrec : if msrc : ab_calc = ab - 33 else : ab_calc = ab % 10 * 10 + ab // 10 else : ab_calc = ab if verb > 2 : if ab in [ 36 , 63 ] : print ( "\n> <ab> IS " + str ( ab ) + " WHICH IS ZERO; returning" ) else : print ( " Calculated ab : " , ab_calc ) return ab_calc , msrc , mrec
r Check source - receiver configuration .
24,625
def check_dipole ( inp , name , verb ) : r _check_shape ( np . squeeze ( inp ) , name , ( 3 , ) ) inp [ 0 ] = _check_var ( inp [ 0 ] , float , 1 , name + '-x' ) inp [ 1 ] = _check_var ( inp [ 1 ] , float , 1 , name + '-y' , inp [ 0 ] . shape ) inp [ 2 ] = _check_var ( inp [ 2 ] , float , 1 , name + '-z' , ( 1 , ) ) if verb > 2 : if name == 'src' : longname = ' Source(s) : ' else : longname = ' Receiver(s) : ' print ( longname , str ( inp [ 0 ] . size ) , 'dipole(s)' ) tname = [ 'x ' , 'y ' , 'z ' ] for i in range ( 3 ) : text = " > " + tname [ i ] + " [m] : " _prnt_min_max_val ( inp [ i ] , text , verb ) return inp , inp [ 0 ] . size
r Check dipole parameters .
24,626
def check_frequency ( freq , res , aniso , epermH , epermV , mpermH , mpermV , verb ) : r global _min_freq if isinstance ( res , dict ) : res = res [ 'res' ] freq = _check_var ( freq , float , 1 , 'freq' ) freq = _check_min ( freq , _min_freq , 'Frequencies' , 'Hz' , verb ) if verb > 2 : _prnt_min_max_val ( freq , " frequency [Hz] : " , verb ) c = 299792458 mu_0 = 4e-7 * np . pi epsilon_0 = 1. / ( mu_0 * c * c ) etaH = 1 / res + np . outer ( 2j * np . pi * freq , epermH * epsilon_0 ) etaV = 1 / ( res * aniso * aniso ) + np . outer ( 2j * np . pi * freq , epermV * epsilon_0 ) zetaH = np . outer ( 2j * np . pi * freq , mpermH * mu_0 ) zetaV = np . outer ( 2j * np . pi * freq , mpermV * mu_0 ) return freq , etaH , etaV , zetaH , zetaV
r Calculate frequency - dependent parameters .
24,627
def check_opt ( opt , loop , ht , htarg , verb ) : r use_ne_eval = False if opt == 'parallel' : if numexpr : use_ne_eval = numexpr . evaluate elif verb > 0 : print ( numexpr_msg ) lagged_splined_fht = False if ht == 'fht' : if htarg [ 1 ] != 0 : lagged_splined_fht = True if ht in [ 'hqwe' , 'hquad' ] or lagged_splined_fht : loop_freq = True loop_off = False else : loop_off = loop == 'off' loop_freq = loop == 'freq' if verb > 2 : if use_ne_eval : print ( " Kernel Opt. : Use parallel" ) else : print ( " Kernel Opt. : None" ) if loop_off : print ( " Loop over : Offsets" ) elif loop_freq : print ( " Loop over : Frequencies" ) else : print ( " Loop over : None (all vectorized)" ) return use_ne_eval , loop_freq , loop_off
r Check optimization parameters .
24,628
def check_time_only ( time , signal , verb ) : r global _min_time if int ( signal ) not in [ - 1 , 0 , 1 ] : print ( "* ERROR :: <signal> must be one of: [None, -1, 0, 1]; " + "<signal> provided: " + str ( signal ) ) raise ValueError ( 'signal' ) time = _check_var ( time , float , 1 , 'time' ) time = _check_min ( time , _min_time , 'Times' , 's' , verb ) if verb > 2 : _prnt_min_max_val ( time , " time [s] : " , verb ) return time
r Check time and signal parameters .
24,629
def check_solution ( solution , signal , ab , msrc , mrec ) : r if solution not in [ 'fs' , 'dfs' , 'dhs' , 'dsplit' , 'dtetm' ] : print ( "* ERROR :: Solution must be one of ['fs', 'dfs', 'dhs', " + "'dsplit', 'dtetm']; <solution> provided: " + solution ) raise ValueError ( 'solution' ) if solution [ 0 ] == 'd' and ( msrc or mrec ) : print ( '* ERROR :: Diffusive solution is only implemented for ' + 'electric sources and electric receivers, <ab> provided: ' + str ( ab ) ) raise ValueError ( 'ab' ) if solution == 'fs' and signal is not None : print ( '* ERROR :: Full fullspace solution is only implemented for ' + 'the frequency domain, <signal> provided: ' + str ( signal ) ) raise ValueError ( 'signal' )
r Check required solution with parameters .
24,630
def get_abs ( msrc , mrec , srcazm , srcdip , recazm , recdip , verb ) : r ab_calc = np . array ( [ [ 11 , 12 , 13 ] , [ 21 , 22 , 23 ] , [ 31 , 32 , 33 ] ] ) if msrc : ab_calc += 3 if mrec : ab_calc += 30 if msrc : ab_calc -= 33 else : ab_calc = ab_calc % 10 * 10 + ab_calc // 10 bab = np . asarray ( ab_calc * 0 + 1 , dtype = bool ) check = np . atleast_1d ( srcazm ) [ 0 ] if np . allclose ( srcazm % ( np . pi / 2 ) , 0 ) : if np . isclose ( check // ( np . pi / 2 ) % 2 , 0 ) : bab [ : , 1 ] *= False else : bab [ : , 0 ] *= False check = np . atleast_1d ( srcdip ) [ 0 ] if np . allclose ( srcdip % ( np . pi / 2 ) , 0 ) : if np . isclose ( check // ( np . pi / 2 ) % 2 , 0 ) : bab [ : , 2 ] *= False else : bab [ : , : 2 ] *= False check = np . atleast_1d ( recazm ) [ 0 ] if np . allclose ( recazm % ( np . pi / 2 ) , 0 ) : if np . isclose ( check // ( np . pi / 2 ) % 2 , 0 ) : bab [ 1 , : ] *= False else : bab [ 0 , : ] *= False check = np . atleast_1d ( recdip ) [ 0 ] if np . allclose ( recdip % ( np . pi / 2 ) , 0 ) : if np . isclose ( check // ( np . pi / 2 ) % 2 , 0 ) : bab [ 2 , : ] *= False else : bab [ : 2 , : ] *= False ab_calc = ab_calc [ bab ] . ravel ( ) if verb > 2 : print ( " Required ab's : " , _strvar ( ab_calc ) ) return ab_calc
r Get required ab s for given angles .
24,631
def get_geo_fact ( ab , srcazm , srcdip , recazm , recdip , msrc , mrec ) : r global _min_angle fis = ab % 10 fir = ab // 10 if mrec and not msrc : fis , fir = fir , fis def gfact ( bp , azm , dip ) : r if bp in [ 1 , 4 ] : return np . cos ( azm ) * np . cos ( dip ) elif bp in [ 2 , 5 ] : return np . sin ( azm ) * np . cos ( dip ) else : return np . sin ( dip ) fsrc = gfact ( fis , srcazm , srcdip ) frec = gfact ( fir , recazm , recdip ) fact = np . outer ( fsrc , frec ) . ravel ( ) fact [ np . abs ( fact ) < _min_angle ] = 0 return fact
r Get required geometrical scaling factor for given angles .
24,632
def get_layer_nr ( inp , depth ) : r zinp = inp [ 2 ] pdepth = np . concatenate ( ( depth [ 1 : ] , np . array ( [ np . infty ] ) ) ) b_zinp = np . atleast_1d ( zinp ) [ : , None ] linp = np . where ( ( depth [ None , : ] < b_zinp ) * ( pdepth [ None , : ] >= b_zinp ) ) [ 1 ] return np . squeeze ( linp ) , zinp
r Get number of layer in which inp resides .
24,633
def get_off_ang ( src , rec , nsrc , nrec , verb ) : r global _min_off off = np . empty ( ( nrec * nsrc , ) ) angle = np . empty ( ( nrec * nsrc , ) ) for i in range ( nsrc ) : xco = rec [ 0 ] - src [ 0 ] [ i ] yco = rec [ 1 ] - src [ 1 ] [ i ] off [ i * nrec : ( i + 1 ) * nrec ] = np . sqrt ( xco * xco + yco * yco ) angle [ i * nrec : ( i + 1 ) * nrec ] = np . arctan2 ( yco , xco ) angle [ np . where ( off < _min_off ) ] = np . nan off = _check_min ( off , _min_off , 'Offsets' , 'm' , verb ) return off , angle
r Get depths offsets angles hence spatial input parameters .
24,634
def printstartfinish ( verb , inp = None , kcount = None ) : r if inp : if verb > 1 : ttxt = str ( timedelta ( seconds = default_timer ( ) - inp ) ) ktxt = ' ' if kcount : ktxt += str ( kcount ) + ' kernel call(s)' print ( '\n:: empymod END; runtime = ' + ttxt + ' ::' + ktxt + '\n' ) else : t0 = default_timer ( ) if verb > 2 : print ( "\n:: empymod START ::\n" ) return t0
r Print start and finish with time measure and kernel count .
24,635
def set_minimum ( min_freq = None , min_time = None , min_off = None , min_res = None , min_angle = None ) : r global _min_freq , _min_time , _min_off , _min_res , _min_angle if min_freq is not None : _min_freq = min_freq if min_time is not None : _min_time = min_time if min_off is not None : _min_off = min_off if min_res is not None : _min_res = min_res if min_angle is not None : _min_angle = min_angle
r Set minimum values of parameters .
24,636
def get_minimum ( ) : r d = dict ( min_freq = _min_freq , min_time = _min_time , min_off = _min_off , min_res = _min_res , min_angle = _min_angle ) return d
r Return the current minimum values .
24,637
def _check_var ( var , dtype , ndmin , name , shape = None , shape2 = None ) : r if var is None : raise ValueError var = np . array ( var , dtype = dtype , copy = True , ndmin = ndmin ) if shape : _check_shape ( var , name , shape , shape2 ) return var
r Return variable as array of dtype ndmin ; shape - checked .
24,638
def _strvar ( a , prec = '{:G}' ) : r return ' ' . join ( [ prec . format ( i ) for i in np . atleast_1d ( a ) ] )
r Return variable as a string to print with given precision .
24,639
def _check_min ( par , minval , name , unit , verb ) : r scalar = False if par . shape == ( ) : scalar = True par = np . atleast_1d ( par ) if minval is not None : ipar = np . where ( par < minval ) par [ ipar ] = minval if verb > 0 and np . size ( ipar ) != 0 : print ( '* WARNING :: ' + name + ' < ' + str ( minval ) + ' ' + unit + ' are set to ' + str ( minval ) + ' ' + unit + '!' ) if scalar : return np . squeeze ( par ) else : return par
r Check minimum value of parameter .
24,640
def spline_backwards_hankel ( ht , htarg , opt ) : r ht = ht . lower ( ) if ht in [ 'fht' , 'qwe' , 'hqwe' ] : if ht == 'fht' : htarg = _check_targ ( htarg , [ 'fhtfilt' , 'pts_per_dec' ] ) elif ht in [ 'qwe' , 'hqwe' ] : htarg = _check_targ ( htarg , [ 'rtol' , 'atol' , 'nquad' , 'maxint' , 'pts_per_dec' , 'diff_quad' , 'a' , 'b' , 'limit' ] ) if opt == 'spline' : mesg = ( "\n The use of `opt='spline'` is deprecated and will " + "be removed\n in v2.0.0; use the corresponding " + "setting in `htarg`." ) warnings . warn ( mesg , DeprecationWarning ) opt = None if 'pts_per_dec' not in htarg : if ht == 'fht' : htarg [ 'pts_per_dec' ] = - 1 elif ht in [ 'qwe' , 'hqwe' ] : htarg [ 'pts_per_dec' ] = 80 return htarg , opt
r Check opt if deprecated spline is used .
24,641
def gpr ( src , rec , depth , res , freqtime , cf , gain = None , ab = 11 , aniso = None , epermH = None , epermV = None , mpermH = None , mpermV = None , xdirect = False , ht = 'quad' , htarg = None , ft = 'fft' , ftarg = None , opt = None , loop = None , verb = 2 ) : r if verb > 2 : print ( " GPR : EXPERIMENTAL, USE WITH CAUTION" ) print ( " > centre freq : " + str ( cf ) ) print ( " > gain : " + str ( gain ) ) time , freq , ft , ftarg = check_time ( freqtime , 0 , ft , ftarg , verb ) EM = dipole ( src , rec , depth , res , freq , None , ab , aniso , epermH , epermV , mpermH , mpermV , xdirect , ht , htarg , ft , ftarg , opt , loop , verb ) src , nsrc = check_dipole ( src , 'src' , 0 ) rec , nrec = check_dipole ( rec , 'rec' , 0 ) off , _ = get_off_ang ( src , rec , nsrc , nrec , 0 ) EM = EM . reshape ( ( - 1 , nrec * nsrc ) , order = 'F' ) cfc = - ( np . r_ [ 0 , freq [ : - 1 ] ] / cf ) ** 2 fwave = cfc * np . exp ( cfc ) EM *= fwave [ : , None ] EM , conv = tem ( EM , off , freq , time , 0 , ft , ftarg ) conv_warning ( conv , ftarg , 'Fourier' , verb ) EM *= ( 1 + np . abs ( ( time * 10 ** 9 ) ** gain ) ) [ : , None ] EM = EM . real EM = np . squeeze ( EM . reshape ( ( - 1 , nrec , nsrc ) , order = 'F' ) ) return EM
r Return the Ground - Penetrating Radar signal .
24,642
def dipole_k ( src , rec , depth , res , freq , wavenumber , ab = 11 , aniso = None , epermH = None , epermV = None , mpermH = None , mpermV = None , verb = 2 ) : r t0 = printstartfinish ( verb ) modl = check_model ( depth , res , aniso , epermH , epermV , mpermH , mpermV , False , verb ) depth , res , aniso , epermH , epermV , mpermH , mpermV , _ = modl f = check_frequency ( freq , res , aniso , epermH , epermV , mpermH , mpermV , verb ) freq , etaH , etaV , zetaH , zetaV = f ab_calc , msrc , mrec = check_ab ( ab , verb ) src , nsrc = check_dipole ( src , 'src' , verb ) rec , nrec = check_dipole ( rec , 'rec' , verb ) off , angle = get_off_ang ( src , rec , nsrc , nrec , verb ) factAng = kernel . angle_factor ( angle , ab , msrc , mrec ) lsrc , zsrc = get_layer_nr ( src , depth ) lrec , zrec = get_layer_nr ( rec , depth ) if off . size == 1 and np . ndim ( wavenumber ) == 2 : PJ0 = np . zeros ( ( freq . size , wavenumber . shape [ 0 ] , wavenumber . shape [ 1 ] ) , dtype = complex ) PJ1 = np . zeros ( ( freq . size , wavenumber . shape [ 0 ] , wavenumber . shape [ 1 ] ) , dtype = complex ) else : PJ0 = np . zeros ( ( freq . size , off . size , wavenumber . size ) , dtype = complex ) PJ1 = np . zeros ( ( freq . size , off . size , wavenumber . size ) , dtype = complex ) if ab_calc not in [ 36 , ] : J0 , J1 , J0b = kernel . wavenumber ( zsrc , zrec , lsrc , lrec , depth , etaH , etaV , zetaH , zetaV , np . atleast_2d ( wavenumber ) , ab_calc , False , msrc , mrec , False ) if J1 is not None : PJ1 += factAng [ : , np . newaxis ] * J1 if ab in [ 11 , 12 , 21 , 22 , 14 , 24 , 15 , 25 ] : PJ1 /= off [ : , None ] if J0 is not None : PJ0 += J0 if J0b is not None : PJ0 += factAng [ : , np . newaxis ] * J0b printstartfinish ( verb , t0 , 1 ) return np . squeeze ( PJ0 ) , np . squeeze ( PJ1 )
r Return the electromagnetic wavenumber - domain field .
24,643
def wavenumber ( src , rec , depth , res , freq , wavenumber , ab = 11 , aniso = None , epermH = None , epermV = None , mpermH = None , mpermV = None , verb = 2 ) : r mesg = ( "\n The use of `model.wavenumber` is deprecated and will " + "be removed;\n use `model.dipole_k` instead." ) warnings . warn ( mesg , DeprecationWarning ) return dipole_k ( src , rec , depth , res , freq , wavenumber , ab , aniso , epermH , epermV , mpermH , mpermV , verb )
r Depreciated . Use dipole_k instead .
24,644
def tem ( fEM , off , freq , time , signal , ft , ftarg , conv = True ) : r if signal in [ - 1 , 1 ] : fact = signal / ( 2j * np . pi * freq ) else : fact = 1 tEM = np . zeros ( ( time . size , off . size ) ) for i in range ( off . size ) : out = getattr ( transform , ft ) ( fEM [ : , i ] * fact , time , freq , ftarg ) tEM [ : , i ] += out [ 0 ] conv *= out [ 1 ] return tEM * 2 / np . pi , conv
r Return the time - domain response of the frequency - domain response fEM .
24,645
def save_filter ( name , filt , full = None , path = 'filters' ) : r filt . tofile ( path ) if full : path = os . path . abspath ( path ) if len ( name . split ( '.' ) ) == 2 : suffix = '.gz' else : suffix = '' fullfile = os . path . join ( path , name . split ( '.' ) [ 0 ] + '_full.txt' + suffix ) nspace , nshift = full [ 3 ] . shape header = 'Full inversion output from empymod.fdesign.design\n' header += 'Line 11: Nr of spacing values\n' header += 'Line 12: Nr of shift values\n' header += 'Line 13: Best spacing value\n' header += 'Line 14: Best shift value\n' header += 'Line 15: Min amplitude or max offset\n' header += 'Lines 16-{}: Spacing matrix ' . format ( nspace + 15 ) header += '({} x {})\n' . format ( nspace , nshift ) header += 'Lines {}-{}: Spacing matrix ' . format ( nspace + 16 , 2 * nspace + 15 ) header += '({} x {})\n' . format ( nspace , nshift ) header += 'Lines {}-{}: Spacing ' . format ( 2 * nspace + 16 , 3 * nspace + 15 ) header += 'matrix ({} x {})\n' . format ( nspace , nshift ) header += 'Line {}: Integer: 0: min amp, 1: max r' . format ( 3 * nspace + 16 ) nr_spacing = np . r_ [ nspace , np . zeros ( nshift - 1 ) ] nr_shift = np . r_ [ nshift , np . zeros ( nshift - 1 ) ] best_spacing = np . r_ [ full [ 0 ] [ 0 ] , np . zeros ( nshift - 1 ) ] best_shift = np . r_ [ full [ 0 ] [ 1 ] , np . zeros ( nshift - 1 ) ] min_value = np . r_ [ np . atleast_1d ( full [ 1 ] ) , np . zeros ( nshift - 1 ) ] min_max = np . r_ [ full [ 4 ] , np . zeros ( nshift - 1 ) ] fullsave = np . vstack ( ( nr_spacing , nr_shift , best_spacing , best_shift , min_value , full [ 2 ] [ 0 ] , full [ 2 ] [ 1 ] , full [ 3 ] , min_max ) ) np . savetxt ( fullfile , fullsave , header = header )
r Save DLF - filter and inversion output to plain text files .
24,646
def load_filter ( name , full = False , path = 'filters' ) : r filt = DigitalFilter ( name . split ( '.' ) [ 0 ] ) filt . fromfile ( path ) if full : try : path = os . path . abspath ( path ) if len ( name . split ( '.' ) ) == 2 : suffix = '.gz' else : suffix = '' fullfile = os . path . join ( path , name . split ( '.' ) [ 0 ] + '_full.txt' + suffix ) out = np . loadtxt ( fullfile ) except IOError : return filt nspace = int ( out [ 0 ] [ 0 ] ) nshift = int ( out [ 1 ] [ 0 ] ) space_shift_matrix = np . zeros ( ( 2 , nspace , nshift ) ) space_shift_matrix [ 0 , : , : ] = out [ 5 : nspace + 5 , : ] space_shift_matrix [ 1 , : , : ] = out [ nspace + 5 : 2 * nspace + 5 , : ] out = ( np . array ( [ out [ 2 ] [ 0 ] , out [ 3 ] [ 0 ] ] ) , out [ 4 ] [ 0 ] , space_shift_matrix , out [ 2 * nspace + 5 : 3 * nspace + 5 , : ] , int ( out [ 3 * nspace + 5 , 0 ] ) ) return filt , out else : return filt
r Load saved DLF - filter and inversion output from text files .
24,647
def plot_result ( filt , full , prntres = True ) : r if not plt : print ( plt_msg ) return if prntres : print_result ( filt , full ) spacing = full [ 2 ] [ 0 , : , 0 ] shift = full [ 2 ] [ 1 , 0 , : ] minfield = np . squeeze ( full [ 3 ] ) plt . figure ( "Brute force result" , figsize = ( 9.5 , 4.5 ) ) plt . subplots_adjust ( wspace = .4 , bottom = 0.2 ) if spacing . size > 1 or shift . size > 1 : plt . subplot ( 121 ) if full [ 4 ] == 0 : plt . title ( "Minimal recovered fields" ) ylabel = 'Minimal recovered amplitude (log10)' field = np . log10 ( minfield ) cmap = plt . cm . viridis else : plt . title ( "Maximum recovered r" ) ylabel = 'Maximum recovered r' field = 1 / minfield cmap = plt . cm . viridis_r if shift . size == 1 : plt . plot ( spacing , field ) plt . xlabel ( 'Spacing' ) plt . ylabel ( ylabel ) elif spacing . size == 1 : plt . plot ( shift , field ) plt . xlabel ( 'Shift' ) plt . ylabel ( ylabel ) else : field = np . ma . masked_where ( np . isinf ( minfield ) , field ) plt . pcolormesh ( shift , spacing , field , cmap = cmap ) plt . ylabel ( 'Spacing' ) plt . xlabel ( 'Shift' ) plt . colorbar ( ) if spacing . size > 1 or shift . size > 1 : plt . subplot ( 122 ) plt . title ( 'Filter values of best filter' ) for attr in [ 'j0' , 'j1' , 'sin' , 'cos' ] : if hasattr ( filt , attr ) : plt . plot ( np . log10 ( filt . base ) , np . log10 ( np . abs ( getattr ( filt , attr ) ) ) , '.-' , lw = .5 , label = 'abs(' + attr + ')' ) plt . plot ( np . log10 ( filt . base ) , np . log10 ( - getattr ( filt , attr ) ) , '.' , color = 'k' , ms = 4 ) plt . plot ( np . inf , 0 , '.' , color = 'k' , ms = 4 , label = 'Neg. values' ) plt . xlabel ( 'Base (log10)' ) plt . ylabel ( 'Abs(Amplitude) (log10)' ) plt . legend ( loc = 'best' ) plt . gcf ( ) . canvas . draw ( ) plt . show ( )
r QC the inversion result .
24,648
def print_result ( filt , full = None ) : r print ( ' Filter length : %d' % filt . base . size ) print ( ' Best filter' ) if full : if full [ 4 ] == 0 : print ( ' > Min field : %g' % full [ 1 ] ) else : r = 1 / full [ 1 ] print ( ' > Max r : %g' % r ) spacing = full [ 0 ] [ 0 ] shift = full [ 0 ] [ 1 ] else : n = filt . base . size a = filt . base [ - 1 ] b = filt . base [ - 2 ] spacing = np . log ( a ) - np . log ( b ) shift = np . log ( a ) - spacing * ( n // 2 ) print ( ' > Spacing : %1.10g' % spacing ) print ( ' > Shift : %1.10g' % shift ) print ( ' > Base min/max : %e / %e' % ( filt . base . min ( ) , filt . base . max ( ) ) )
r Print best filter information .
24,649
def _call_qc_transform_pairs ( n , ispacing , ishift , fI , fC , r , r_def , reim ) : r print ( '* QC: Input transform-pairs:' ) print ( ' fC: x-range defined through ``n``, ``spacing``, ``shift``, and ' + '``r``-parameters; b-range defined through ``r``-parameter.' ) print ( ' fI: x- and b-range defined through ``n``, ``spacing``' + ', ``shift``, and ``r_def``-parameters.' ) minspace = np . arange ( * ispacing ) . min ( ) maxspace = np . arange ( * ispacing ) . max ( ) minshift = np . arange ( * ishift ) . min ( ) maxshift = np . arange ( * ishift ) . max ( ) maxbase = np . exp ( maxspace * ( n // 2 ) + maxshift ) minbase = np . exp ( maxspace * ( - n // 2 + 1 ) + minshift ) kmax = maxbase / r . min ( ) kmin = minbase / r . max ( ) k = np . logspace ( np . log10 ( kmin ) , np . log10 ( kmax ) + minspace , r . size ) rI = np . logspace ( np . log10 ( 1 / maxbase ) - r_def [ 0 ] , np . log10 ( 1 / minbase ) + r_def [ 1 ] , r_def [ 2 ] * n ) kmaxI = maxbase / rI . min ( ) kminI = minbase / rI . max ( ) kI = np . logspace ( np . log10 ( kminI ) , np . log10 ( kmaxI ) + minspace , r_def [ 2 ] * n ) fig , axs = plt . subplots ( figsize = ( 9.5 , 6 ) , nrows = 2 , ncols = 2 , num = "Transform pairs" ) axs = axs . ravel ( ) plt . subplots_adjust ( wspace = .3 , hspace = .4 ) _plot_transform_pairs ( fC , r , k , axs [ : 2 ] , 'fC' ) if reim == np . real : tit = 'RE(fI)' else : tit = 'IM(fI)' _plot_transform_pairs ( fI , rI , kI , axs [ 2 : ] , tit ) fig . canvas . draw ( ) plt . show ( )
r QC the input transform pairs .
24,650
def _plot_transform_pairs ( fCI , r , k , axes , tit ) : r plt . sca ( axes [ 0 ] ) plt . title ( '|' + tit + ' lhs|' ) for f in fCI : if f . name == 'j2' : lhs = f . lhs ( k ) plt . loglog ( k , np . abs ( lhs [ 0 ] ) , lw = 2 , label = 'j0' ) plt . loglog ( k , np . abs ( lhs [ 1 ] ) , lw = 2 , label = 'j1' ) else : plt . loglog ( k , np . abs ( f . lhs ( k ) ) , lw = 2 , label = f . name ) if tit != 'fC' : plt . xlabel ( 'l' ) plt . legend ( loc = 'best' ) plt . sca ( axes [ 1 ] ) plt . title ( '|' + tit + ' rhs|' ) for f in fCI : if tit == 'fC' : plt . loglog ( r , np . abs ( f . rhs ) , lw = 2 , label = f . name ) else : plt . loglog ( r , np . abs ( f . rhs ( r ) ) , lw = 2 , label = f . name ) for f in fCI : if f . name [ 1 ] in [ '0' , '1' , '2' ] : filt = j0j1filt ( ) else : filt = sincosfilt ( ) kk = filt . base / r [ : , None ] if f . name == 'j2' : lhs = f . lhs ( kk ) kr0 = np . dot ( lhs [ 0 ] , getattr ( filt , 'j0' ) ) / r kr1 = np . dot ( lhs [ 1 ] , getattr ( filt , 'j1' ) ) / r ** 2 kr = kr0 + kr1 else : kr = np . dot ( f . lhs ( kk ) , getattr ( filt , f . name ) ) / r plt . loglog ( r , np . abs ( kr ) , '-.' , lw = 2 , label = filt . name ) if tit != 'fC' : plt . xlabel ( 'r' ) plt . legend ( loc = 'best' )
r Plot the input transform pairs .
24,651
def _plot_inversion ( f , rhs , r , k , imin , spacing , shift , cvar ) : r if not plt : print ( plt_msg ) return plt . figure ( "Inversion result " + f . name , figsize = ( 9.5 , 4 ) ) plt . subplots_adjust ( wspace = .3 , bottom = 0.2 ) plt . clf ( ) tk = np . logspace ( np . log10 ( k . min ( ) ) , np . log10 ( k . max ( ) ) , r . size ) plt . suptitle ( f . name + '; Spacing ::' + str ( spacing ) + '; Shift ::' + str ( shift ) ) plt . subplot ( 121 ) plt . title ( '|lhs|' ) if f . name == 'j2' : lhs = f . lhs ( tk ) plt . loglog ( tk , np . abs ( lhs [ 0 ] ) , lw = 2 , label = 'Theoretical J0' ) plt . loglog ( tk , np . abs ( lhs [ 1 ] ) , lw = 2 , label = 'Theoretical J1' ) else : plt . loglog ( tk , np . abs ( f . lhs ( tk ) ) , lw = 2 , label = 'Theoretical' ) plt . xlabel ( 'l' ) plt . legend ( loc = 'best' ) plt . subplot ( 122 ) plt . title ( '|rhs|' ) plt . loglog ( r , np . abs ( f . rhs ) , lw = 2 , label = 'Theoretical' ) plt . loglog ( r , np . abs ( rhs ) , '-.' , lw = 2 , label = 'This filter' ) if cvar == 'amp' : label = 'Min. Amp' else : label = 'Max. r' plt . loglog ( r [ imin ] , np . abs ( rhs [ imin ] ) , 'go' , label = label ) plt . xlabel ( 'r' ) plt . legend ( loc = 'best' ) plt . gcf ( ) . canvas . draw ( ) plt . show ( )
r QC the resulting filter .
24,652
def empy_hankel ( ftype , zsrc , zrec , res , freqtime , depth = None , aniso = None , epermH = None , epermV = None , mpermH = None , mpermV = None , htarg = None , verblhs = 0 , verbrhs = 0 ) : r if isinstance ( ftype , list ) : out = [ ] for f in ftype : out . append ( empy_hankel ( f , zsrc , zrec , res , freqtime , depth , aniso , epermH , epermV , mpermH , mpermV , htarg , verblhs , verbrhs ) ) return out model = { 'src' : [ 0 , 0 , zsrc ] , 'depth' : depth , 'res' : res , 'aniso' : aniso , 'epermH' : epermH , 'epermV' : epermV , 'mpermH' : mpermH , 'mpermV' : mpermV } if ftype == 'j0' : model [ 'ab' ] = 11 x = 1 / np . sqrt ( 2 ) y = 1 / np . sqrt ( 2 ) elif ftype == 'j1' : model [ 'ab' ] = 31 x = 1 y = 0 elif ftype == 'j2' : model [ 'ab' ] = 12 x = 1 / np . sqrt ( 2 ) y = 1 / np . sqrt ( 2 ) def rhs ( r ) : out = dipole ( rec = [ r * x , r * y , zrec ] , ht = 'qwe' , xdirect = True , verb = verbrhs , htarg = htarg , freqtime = freqtime , ** model ) return out def lhs ( k ) : lhs0 , lhs1 = dipole_k ( rec = [ x , y , zrec ] , wavenumber = k , verb = verblhs , freq = freqtime , ** model ) if ftype == 'j0' : return lhs0 elif ftype == 'j1' : return lhs1 elif ftype == 'j2' : return ( lhs0 , lhs1 ) return Ghosh ( ftype , lhs , rhs )
r Numerical transform pair with empymod .
24,653
def _get_min_val ( spaceshift , * params ) : r spacing , shift = spaceshift n , fI , fC , r , r_def , error , reim , cvar , verb , plot , log = params dlf = _calculate_filter ( n , spacing , shift , fI , r_def , reim , 'filt' ) k = dlf . base / r [ : , None ] for i , f in enumerate ( fC ) : lhs = f . lhs ( k ) if f . name == 'j2' : rhs0 = np . dot ( lhs [ 0 ] , getattr ( dlf , 'j0' ) ) / r rhs1 = np . dot ( lhs [ 1 ] , getattr ( dlf , 'j1' ) ) / r ** 2 rhs = rhs0 + rhs1 else : rhs = np . dot ( lhs , getattr ( dlf , f . name ) ) / r rel_error = np . abs ( ( rhs - f . rhs ) / f . rhs ) imin0 = np . where ( rel_error > error ) [ 0 ] if np . all ( rhs == 0 ) or np . all ( np . isnan ( rhs ) ) : imin0 = 0 elif imin0 . size == 0 : imin0 = rhs . size - 1 if verb > 0 and log [ 'warn-r' ] == 0 : print ( '* WARNING :: all data have error < ' + str ( error ) + '; choose larger r or set error-level higher.' ) log [ 'warn-r' ] = 1 else : if imin0 . size > 4 : imin0 = np . max ( [ 0 , imin0 [ 4 ] - 5 ] ) else : imin0 = np . max ( [ 0 , imin0 [ 0 ] - 1 ] ) if cvar == 'amp' : min_val0 = np . abs ( rhs [ imin0 ] ) else : min_val0 = 1 / r [ imin0 ] if i == 0 : imin = dc ( imin0 ) min_val = dc ( min_val0 ) else : if min_val0 > min_val : min_val = dc ( min_val0 ) imin = dc ( imin0 ) if plot > 2 : _plot_inversion ( f , rhs , r , k , imin0 , spacing , shift , cvar ) if verb > 1 : log = _print_count ( log ) return np . where ( imin == 0 , np . inf , min_val )
r Calculate minimum resolved amplitude or maximum r .
24,654
def _calculate_filter ( n , spacing , shift , fI , r_def , reim , name ) : r base = np . exp ( spacing * ( np . arange ( n ) - n // 2 ) + shift ) r = np . logspace ( np . log10 ( 1 / np . max ( base ) ) - r_def [ 0 ] , np . log10 ( 1 / np . min ( base ) ) + r_def [ 1 ] , r_def [ 2 ] * n ) k = base / r [ : , None ] dlf = DigitalFilter ( name . split ( '.' ) [ 0 ] ) dlf . base = base dlf . factor = np . around ( np . average ( base [ 1 : ] / base [ : - 1 ] ) , 15 ) for f in fI : lhs = reim ( f . lhs ( k ) ) rhs = reim ( f . rhs ( r ) * r ) try : qq , rr = np . linalg . qr ( lhs ) J = np . linalg . solve ( rr , rhs . dot ( qq ) ) except np . linalg . LinAlgError : J = np . zeros ( ( base . size , ) ) setattr ( dlf , f . name , J ) return dlf
r Calculate filter for this spacing shift n .
24,655
def _print_count ( log ) : r log [ 'cnt2' ] += 1 cp = log [ 'cnt2' ] / log [ 'totnr' ] * 100 if log [ 'cnt2' ] == 0 : pass elif log [ 'cnt2' ] > log [ 'totnr' ] : print ( " fmin fct calls : %d" % ( log [ 'cnt2' ] - log [ 'totnr' ] ) , end = '\r' ) elif int ( cp ) > log [ 'cnt1' ] or cp < 1 or log [ 'cnt2' ] == log [ 'totnr' ] : sec = int ( default_timer ( ) - log [ 'time' ] ) tleft = str ( timedelta ( seconds = int ( 100 * sec / cp - sec ) ) ) pstr = ( " brute fct calls : %d/%d" % ( log [ 'cnt2' ] , log [ 'totnr' ] ) ) if log [ 'totnr' ] > 100 : pstr += ( " (%d %%); est: %s " % ( cp , tleft ) ) print ( pstr , end = '\r' ) if log [ 'cnt2' ] == log [ 'totnr' ] : print ( " " * len ( pstr ) , end = '\r' ) print ( " brute fct calls : %d" % log [ 'totnr' ] ) log [ 'cnt1' ] = cp return log
r Print run - count information .
24,656
def wavenumber ( zsrc , zrec , lsrc , lrec , depth , etaH , etaV , zetaH , zetaV , lambd , ab , xdirect , msrc , mrec , use_ne_eval ) : r PTM , PTE = greenfct ( zsrc , zrec , lsrc , lrec , depth , etaH , etaV , zetaH , zetaV , lambd , ab , xdirect , msrc , mrec , use_ne_eval ) PJ0 = None PJ1 = None PJ0b = None Ptot = ( PTM + PTE ) / ( 4 * np . pi ) if mrec : sign = - 1 else : sign = 1 if ab in [ 11 , 12 , 21 , 22 , 14 , 24 , 15 , 25 ] : if ab in [ 14 , 22 ] : sign *= - 1 PJ0b = sign / 2 * Ptot * lambd PJ1 = - sign * Ptot if ab in [ 11 , 22 , 24 , 15 ] : if ab in [ 22 , 24 ] : sign *= - 1 PJ0 = sign * ( PTM - PTE ) / ( 8 * np . pi ) * lambd elif ab in [ 13 , 23 , 31 , 32 , 34 , 35 , 16 , 26 ] : PJ1 = sign * Ptot * lambd * lambd if ab in [ 34 , 26 ] : PJ1 *= - 1 elif ab in [ 33 , ] : PJ0 = sign * Ptot * lambd * lambd * lambd return PJ0 , PJ1 , PJ0b
r Calculate wavenumber domain solution .
24,657
def reflections ( depth , e_zH , Gam , lrec , lsrc , use_ne_eval ) : r for plus in [ True , False ] : if plus : pm = 1 layer_count = np . arange ( depth . size - 2 , min ( lrec , lsrc ) - 1 , - 1 ) izout = abs ( lsrc - lrec ) minmax = max ( lrec , lsrc ) else : pm = - 1 layer_count = np . arange ( 1 , max ( lrec , lsrc ) + 1 , 1 ) izout = 0 minmax = - min ( lrec , lsrc ) shiftplus = lrec < lsrc and lrec == 0 and not plus shiftminus = lrec > lsrc and lrec == depth . size - 1 and plus if shiftplus or shiftminus : izout -= pm Ref = np . zeros ( ( Gam . shape [ 0 ] , Gam . shape [ 1 ] , abs ( lsrc - lrec ) + 1 , Gam . shape [ 3 ] ) , dtype = complex ) for iz in layer_count : e_zHa = e_zH [ : , None , iz + pm , None ] Gama = Gam [ : , : , iz , : ] e_zHb = e_zH [ : , None , iz , None ] Gamb = Gam [ : , : , iz + pm , : ] if use_ne_eval : rlocstr = "(e_zHa*Gama - e_zHb*Gamb)/(e_zHa*Gama + e_zHb*Gamb)" rloc = use_ne_eval ( rlocstr ) else : rloca = e_zHa * Gama rlocb = e_zHb * Gamb rloc = ( rloca - rlocb ) / ( rloca + rlocb ) if iz == layer_count [ 0 ] : tRef = rloc . copy ( ) else : ddepth = depth [ iz + 1 + pm ] - depth [ iz + pm ] if use_ne_eval : term = use_ne_eval ( "tRef*exp(-2*Gamb*ddepth)" ) tRef = use_ne_eval ( "(rloc + term)/(1 + rloc*term)" ) else : term = tRef * np . exp ( - 2 * Gamb * ddepth ) tRef = ( rloc + term ) / ( 1 + rloc * term ) if lrec != lsrc and pm * iz <= minmax : Ref [ : , : , izout , : ] = tRef [ : ] izout -= pm if lsrc == lrec and layer_count . size > 0 : Ref = tRef if plus : Rm = Ref else : Rp = Ref return Rm , Rp
r Calculate Rp Rm .
24,658
def angle_factor ( angle , ab , msrc , mrec ) : r if ab in [ 33 , ] : return np . ones ( angle . size ) eval_angle = angle . copy ( ) if mrec and not msrc : eval_angle += np . pi if ab in [ 11 , 22 , 15 , 24 , 13 , 31 , 26 , 35 ] : fct = np . cos test_ang_1 = np . pi / 2 test_ang_2 = 3 * np . pi / 2 else : fct = np . sin test_ang_1 = np . pi test_ang_2 = 2 * np . pi if ab in [ 11 , 22 , 15 , 24 , 12 , 21 , 14 , 25 ] : eval_angle *= 2 factAng = fct ( eval_angle ) factAng [ np . isclose ( np . abs ( eval_angle ) , test_ang_1 , 1e-10 , 1e-14 ) ] = 0 factAng [ np . isclose ( np . abs ( eval_angle ) , test_ang_2 , 1e-10 , 1e-14 ) ] = 0 return factAng
r Return the angle - dependent factor .
24,659
def versions ( mode = None , add_pckg = None , ncol = 4 ) : r mesg = ( "\n Func `versions` is deprecated and will " + "be removed; use Class `Versions` instead." ) warnings . warn ( mesg , DeprecationWarning ) return Versions ( add_pckg , ncol )
r Old func - way of class Versions here for backwards compatibility .
24,660
def _repr_html_ ( self ) : ncol = int ( self . ncol ) border = "border: 2px solid #fff;'" def colspan ( html , txt , ncol , nrow ) : r html += " <tr>\n" html += " <td style='text-align: center; " if nrow == 0 : html += "font-weight: bold; font-size: 1.2em; " elif nrow % 2 == 0 : html += "background-color: #ddd;" html += border + " colspan='" html += str ( 2 * ncol ) + "'>%s</td>\n" % txt html += " </tr>\n" return html def cols ( html , version , name , ncol , i ) : r if i > 0 and i % ncol == 0 : html += " </tr>\n" html += " <tr>\n" html += " <td style='text-align: right; background-color: " html += "#ccc; " + border + ">%s</td>\n" % version html += " <td style='text-align: left; " html += border + ">%s</td>\n" % name return html , i + 1 html = "<table style='border: 3px solid #ddd;'>\n" html = colspan ( html , time . strftime ( '%a %b %d %H:%M:%S %Y %Z' ) , ncol , 0 ) html += " <tr>\n" html , i = cols ( html , platform . system ( ) , 'OS' , ncol , 0 ) html , i = cols ( html , multiprocessing . cpu_count ( ) , 'CPU(s)' , ncol , i ) for pckg in self . _get_packages ( self . add_pckg ) : html , i = cols ( html , pckg . __version__ , pckg . __name__ , ncol , i ) while i % ncol != 0 : html += " <td style= " + border + "></td>\n" html += " <td style= " + border + "></td>\n" i += 1 html += " </tr>\n" html = colspan ( html , sys . version , ncol , 1 ) if mklinfo : html = colspan ( html , mklinfo , ncol , 2 ) html += "</table>" return html
HTML - rendered versions information .
24,661
def _get_packages ( add_pckg ) : r pckgs = [ numpy , scipy , empymod ] for module in [ IPython , numexpr , matplotlib ] : if module : pckgs += [ module ] if add_pckg is not None : if isinstance ( add_pckg , tuple ) : add_pckg = list ( add_pckg ) if not isinstance ( add_pckg , list ) : add_pckg = [ add_pckg , ] pckgs += add_pckg return pckgs
r Create list of packages .
24,662
def tofile ( self , path = 'filters' ) : r name = self . savename path = os . path . abspath ( path ) os . makedirs ( path , exist_ok = True ) basefile = os . path . join ( path , name + '_base.txt' ) with open ( basefile , 'w' ) as f : self . base . tofile ( f , sep = "\n" ) for val in [ 'j0' , 'j1' , 'sin' , 'cos' ] : if hasattr ( self , val ) : attrfile = os . path . join ( path , name + '_' + val + '.txt' ) with open ( attrfile , 'w' ) as f : getattr ( self , val ) . tofile ( f , sep = "\n" )
r Save filter values to ascii - files .
24,663
def fromfile ( self , path = 'filters' ) : r name = self . savename path = os . path . abspath ( path ) basefile = os . path . join ( path , name + '_base.txt' ) with open ( basefile , 'r' ) as f : self . base = np . fromfile ( f , sep = "\n" ) for val in [ 'j0' , 'j1' , 'sin' , 'cos' ] : attrfile = os . path . join ( path , name + '_' + val + '.txt' ) if os . path . isfile ( attrfile ) : with open ( attrfile , 'r' ) as f : setattr ( self , val , np . fromfile ( f , sep = "\n" ) ) self . factor = np . around ( np . average ( self . base [ 1 : ] / self . base [ : - 1 ] ) , 15 )
r Load filter values from ascii - files .
24,664
def fht ( zsrc , zrec , lsrc , lrec , off , factAng , depth , ab , etaH , etaV , zetaH , zetaV , xdirect , fhtarg , use_ne_eval , msrc , mrec ) : r fhtfilt = fhtarg [ 0 ] pts_per_dec = fhtarg [ 1 ] lambd = fhtarg [ 2 ] int_pts = fhtarg [ 3 ] PJ = kernel . wavenumber ( zsrc , zrec , lsrc , lrec , depth , etaH , etaV , zetaH , zetaV , lambd , ab , xdirect , msrc , mrec , use_ne_eval ) fEM = dlf ( PJ , lambd , off , fhtfilt , pts_per_dec , factAng = factAng , ab = ab , int_pts = int_pts ) return fEM , 1 , True
r Hankel Transform using the Digital Linear Filter method .
24,665
def hquad ( zsrc , zrec , lsrc , lrec , off , factAng , depth , ab , etaH , etaV , zetaH , zetaV , xdirect , quadargs , use_ne_eval , msrc , mrec ) : r rtol , atol , limit , a , b , pts_per_dec = quadargs la = np . log10 ( a ) lb = np . log10 ( b ) ilambd = np . logspace ( la , lb , ( lb - la ) * pts_per_dec + 1 ) PJ0 , PJ1 , PJ0b = kernel . wavenumber ( zsrc , zrec , lsrc , lrec , depth , etaH , etaV , zetaH , zetaV , np . atleast_2d ( ilambd ) , ab , xdirect , msrc , mrec , use_ne_eval ) if PJ0 is not None : sPJ0r = iuSpline ( np . log ( ilambd ) , PJ0 . real ) sPJ0i = iuSpline ( np . log ( ilambd ) , PJ0 . imag ) else : sPJ0r = None sPJ0i = None if PJ1 is not None : sPJ1r = iuSpline ( np . log ( ilambd ) , PJ1 . real ) sPJ1i = iuSpline ( np . log ( ilambd ) , PJ1 . imag ) else : sPJ1r = None sPJ1i = None if PJ0b is not None : sPJ0br = iuSpline ( np . log ( ilambd ) , PJ0b . real ) sPJ0bi = iuSpline ( np . log ( ilambd ) , PJ0b . imag ) else : sPJ0br = None sPJ0bi = None fEM = np . zeros ( off . size , dtype = complex ) conv = True iinp = { 'a' : a , 'b' : b , 'epsabs' : atol , 'epsrel' : rtol , 'limit' : limit } for i in range ( off . size ) : fEM [ i ] , tc = quad ( sPJ0r , sPJ0i , sPJ1r , sPJ1i , sPJ0br , sPJ0bi , ab , off [ i ] , factAng [ i ] , iinp ) conv *= tc return fEM , 1 , conv
r Hankel Transform using the QUADPACK library .
24,666
def ffht ( fEM , time , freq , ftarg ) : r ffhtfilt = ftarg [ 0 ] pts_per_dec = ftarg [ 1 ] kind = ftarg [ 2 ] if pts_per_dec == 0 : fEM = fEM . reshape ( time . size , - 1 ) tEM = dlf ( fEM , 2 * np . pi * freq , time , ffhtfilt , pts_per_dec , kind = kind ) return tEM , True
r Fourier Transform using the Digital Linear Filter method .
24,667
def fft ( fEM , time , freq , ftarg ) : r dfreq , nfreq , ntot , pts_per_dec = ftarg if pts_per_dec : sfEMr = iuSpline ( np . log ( freq ) , fEM . real ) sfEMi = iuSpline ( np . log ( freq ) , fEM . imag ) freq = np . arange ( 1 , nfreq + 1 ) * dfreq fEM = sfEMr ( np . log ( freq ) ) + 1j * sfEMi ( np . log ( freq ) ) fEM = np . pad ( fEM , ( 0 , ntot - nfreq ) , 'linear_ramp' ) ifftEM = fftpack . ifft ( np . r_ [ fEM [ 1 : ] , 0 , fEM [ : : - 1 ] . conj ( ) ] ) . real stEM = 2 * ntot * fftpack . fftshift ( ifftEM * dfreq , 0 ) dt = 1 / ( 2 * ntot * dfreq ) ifEM = iuSpline ( np . linspace ( - ntot , ntot - 1 , 2 * ntot ) * dt , stEM ) tEM = ifEM ( time ) / 2 * np . pi return tEM , True
r Fourier Transform using the Fast Fourier Transform .
24,668
def quad ( sPJ0r , sPJ0i , sPJ1r , sPJ1i , sPJ0br , sPJ0bi , ab , off , factAng , iinp ) : r def quad_PJ0 ( klambd , sPJ0 , koff ) : r return sPJ0 ( np . log ( klambd ) ) * special . j0 ( koff * klambd ) def quad_PJ1 ( klambd , sPJ1 , ab , koff , kang ) : r tP1 = kang * sPJ1 ( np . log ( klambd ) ) if ab in [ 11 , 12 , 21 , 22 , 14 , 24 , 15 , 25 ] : tP1 /= koff return tP1 * special . j1 ( koff * klambd ) def quad_PJ0b ( klambd , sPJ0b , koff , kang ) : r return kang * sPJ0b ( np . log ( klambd ) ) * special . j0 ( koff * klambd ) conv = True out = np . array ( 0.0 + 0.0j ) iinp [ 'full_output' ] = 1 if sPJ0r is not None : re = integrate . quad ( quad_PJ0 , args = ( sPJ0r , off ) , ** iinp ) im = integrate . quad ( quad_PJ0 , args = ( sPJ0i , off ) , ** iinp ) out += re [ 0 ] + 1j * im [ 0 ] if ( len ( re ) or len ( im ) ) > 3 : conv = False if sPJ1r is not None : re = integrate . quad ( quad_PJ1 , args = ( sPJ1r , ab , off , factAng ) , ** iinp ) im = integrate . quad ( quad_PJ1 , args = ( sPJ1i , ab , off , factAng ) , ** iinp ) out += re [ 0 ] + 1j * im [ 0 ] if ( len ( re ) or len ( im ) ) > 3 : conv = False if sPJ0br is not None : re = integrate . quad ( quad_PJ0b , args = ( sPJ0br , off , factAng ) , ** iinp ) im = integrate . quad ( quad_PJ0b , args = ( sPJ0bi , off , factAng ) , ** iinp ) out += re [ 0 ] + 1j * im [ 0 ] if ( len ( re ) or len ( im ) ) > 3 : conv = False return out , conv
r Quadrature for Hankel transform .
24,669
def get_spline_values ( filt , inp , nr_per_dec = None ) : r if nr_per_dec == 0 : return filt . base / inp [ : , None ] , inp outmax = filt . base [ - 1 ] / inp . min ( ) outmin = filt . base [ 0 ] / inp . max ( ) if nr_per_dec < 0 : pts_per_dec = 1 / np . log ( filt . factor ) nout = int ( np . ceil ( np . log ( outmax / outmin ) * pts_per_dec ) + 1 ) else : pts_per_dec = nr_per_dec nout = int ( np . ceil ( np . log10 ( outmax / outmin ) * pts_per_dec ) + 1 ) if nr_per_dec < 0 : if nout - filt . base . size < 3 : nout = filt . base . size + 3 else : if nout < 4 : nout = 4 if nr_per_dec < 0 : out = np . exp ( np . arange ( np . log ( outmin ) , np . log ( outmin ) + nout / pts_per_dec , 1 / pts_per_dec ) ) new_inp = inp . max ( ) * np . exp ( - np . arange ( nout - filt . base . size + 1 ) / pts_per_dec ) else : out = 10 ** np . arange ( np . log10 ( outmin ) , np . log10 ( outmin ) + nout / pts_per_dec , 1 / pts_per_dec ) new_inp = filt . base / inp [ : , None ] return np . atleast_2d ( out ) , new_inp
r Return required calculation points .
24,670
def fhti ( rmin , rmax , n , q , mu ) : r logrc = ( rmin + rmax ) / 2 nc = ( n + 1 ) / 2. dlogr = ( rmax - rmin ) / n dlnr = dlogr * np . log ( 10. ) y = 1j * np . pi / ( 2.0 * dlnr ) zp = special . loggamma ( ( mu + 1.0 + q ) / 2.0 + y ) zm = special . loggamma ( ( mu + 1.0 - q ) / 2.0 + y ) arg = np . log ( 2.0 ) / dlnr + ( zp . imag + zm . imag ) / np . pi kr = np . exp ( ( arg - np . round ( arg ) ) * dlnr ) freq = 10 ** ( logrc + ( np . arange ( 1 , n + 1 ) - nc ) * dlogr ) / ( 2 * np . pi ) logkc = np . log10 ( kr ) - logrc tcalc = 10 ** ( logkc + ( np . arange ( 1 , n + 1 ) - nc ) * dlogr ) rk = 10 ** ( logrc - logkc ) * np . pi / 2 return freq , tcalc , dlnr , kr , rk
r Return parameters required for FFTLog .
24,671
def _actual_get_cpu_info_from_cpuid ( queue ) : sys . stdout = open ( os . devnull , 'w' ) sys . stderr = open ( os . devnull , 'w' ) arch , bits = _parse_arch ( DataSource . arch_string_raw ) if not arch in [ 'X86_32' , 'X86_64' ] : queue . put ( _obj_to_b64 ( { } ) ) return cpuid = CPUID ( ) if cpuid . is_selinux_enforcing : queue . put ( _obj_to_b64 ( { } ) ) return max_extension_support = cpuid . get_max_extension_support ( ) cache_info = cpuid . get_cache ( max_extension_support ) info = cpuid . get_info ( ) processor_brand = cpuid . get_processor_brand ( max_extension_support ) hz_actual = cpuid . get_raw_hz ( ) hz_actual = _to_decimal_string ( hz_actual ) hz_advertised , scale = _parse_cpu_brand_string ( processor_brand ) info = { 'vendor_id_raw' : cpuid . get_vendor_id ( ) , 'hardware_raw' : '' , 'brand_raw' : processor_brand , 'hz_advertised_friendly' : _hz_short_to_friendly ( hz_advertised , scale ) , 'hz_actual_friendly' : _hz_short_to_friendly ( hz_actual , 0 ) , 'hz_advertised' : _hz_short_to_full ( hz_advertised , scale ) , 'hz_actual' : _hz_short_to_full ( hz_actual , 0 ) , 'l2_cache_size' : _to_friendly_bytes ( cache_info [ 'size_kb' ] ) , 'l2_cache_line_size' : cache_info [ 'line_size_b' ] , 'l2_cache_associativity' : hex ( cache_info [ 'associativity' ] ) , 'stepping' : info [ 'stepping' ] , 'model' : info [ 'model' ] , 'family' : info [ 'family' ] , 'processor_type' : info [ 'processor_type' ] , 'extended_model' : info [ 'extended_model' ] , 'extended_family' : info [ 'extended_family' ] , 'flags' : cpuid . get_flags ( max_extension_support ) } info = { k : v for k , v in info . items ( ) if v } queue . put ( _obj_to_b64 ( info ) )
Warning! This function has the potential to crash the Python runtime . Do not call it directly . Use the _get_cpu_info_from_cpuid function instead . It will safely call this function in another process .
24,672
def get_cpu_info_json ( ) : import json output = None if getattr ( sys , 'frozen' , False ) : info = _get_cpu_info_internal ( ) output = json . dumps ( info ) output = "{0}" . format ( output ) else : from subprocess import Popen , PIPE command = [ sys . executable , __file__ , '--json' ] p1 = Popen ( command , stdout = PIPE , stderr = PIPE , stdin = PIPE ) output = p1 . communicate ( ) [ 0 ] if p1 . returncode != 0 : return "{}" if not IS_PY2 : output = output . decode ( encoding = 'UTF-8' ) return output
Returns the CPU info by using the best sources of information for your OS . Returns the result in a json string
24,673
def get_cpu_info ( ) : import json output = get_cpu_info_json ( ) output = json . loads ( output , object_hook = _utf_to_str ) return output
Returns the CPU info by using the best sources of information for your OS . Returns the result in a dict
24,674
def _verbs_with_subjects ( doc ) : verb_subj = [ ] for possible_subject in doc : if ( possible_subject . dep_ == 'nsubj' and possible_subject . head . pos_ == 'VERB' ) : verb_subj . append ( [ possible_subject . head , possible_subject ] ) return verb_subj
Given a spacy document return the verbs that have subjects
24,675
def mangle_agreement ( correct_sentence ) : bad_sents = [ ] doc = nlp ( correct_sentence ) verbs = [ ( i , v ) for ( i , v ) in enumerate ( doc ) if v . tag_ . startswith ( 'VB' ) ] for i , v in verbs : for alt_verb in lexeme ( doc [ i ] . text ) : if alt_verb == doc [ i ] . text : continue if ( tenses ( alt_verb ) == tenses ( v . text ) or ( alt_verb . startswith ( v . text ) and alt_verb . endswith ( "n't" ) ) ) : continue new_sent = str ( doc [ : i ] ) + " {} " . format ( alt_verb ) + str ( doc [ i + 1 : ] ) new_sent = new_sent . replace ( ' ,' , ',' ) bad_sents . append ( new_sent ) return bad_sents
Given a correct sentence return a sentence or sentences with a subject verb agreement error
24,676
def _build_trigram_indices ( trigram_index ) : result = { } trigram_count = 0 for key , val in csv . reader ( open ( trigram_index ) ) : result [ key ] = int ( val ) trigram_count += 1 return result , trigram_count
Build a dictionary of trigrams and their indices from a csv
24,677
def _begins_with_one_of ( sentence , parts_of_speech ) : doc = nlp ( sentence ) if doc [ 0 ] . tag_ in parts_of_speech : return True return False
Return True if the sentence or fragment begins with one of the parts of speech in the list else False
24,678
def get_language_tool_feedback ( sentence ) : payload = { 'language' : 'en-US' , 'text' : sentence } try : r = requests . post ( LT_SERVER , data = payload ) except requests . exceptions . ConnectionError as e : raise requests . exceptions . ConnectionError ( ) if r . status_code >= 200 and r . status_code < 300 : return r . json ( ) . get ( 'matches' , [ ] ) return [ ]
Get matches from languagetool
24,679
def is_participle_clause_fragment ( sentence ) : if not _begins_with_one_of ( sentence , [ 'VBG' , 'VBN' , 'JJ' ] ) : return 0.0 if _begins_with_one_of ( sentence , [ 'JJ' ] ) : doc = nlp ( sentence ) fw = [ w for w in doc ] [ 0 ] if fw . dep_ == 'amod' : return 0.0 if _begins_with_one_of ( sentence , [ 'VBG' ] ) : doc = nlp ( sentence ) fw = [ w for w in doc ] [ 0 ] if fw . dep_ . endswith ( 'subj' ) : return 0.0 fc = [ c for c in doc . noun_chunks ] if str ( fw ) in str ( fc ) : return 0.0 positive_prob = models [ 'participle' ] . predict ( [ _text_to_vector ( sentence , trigram2idx [ 'participle' ] , trigram_count [ 'participle' ] ) ] ) [ 0 ] [ 1 ] return float ( positive_prob )
Supply a sentence or fragment and recieve a confidence interval
24,680
def check ( sentence ) : result = Feedback ( ) is_missing_verb = detect_missing_verb ( sentence ) is_infinitive = detect_infinitive_phrase ( sentence ) is_participle = is_participle_clause_fragment ( sentence ) lang_tool_feedback = get_language_tool_feedback ( sentence ) subject_and_verb_agree = get_subject_verb_agreement_feedback ( sentence ) if is_missing_verb : result . matches [ 'missing_verb' ] = True result . human_readable = MISSING_VERB_ADVICE . replace ( '\n' , '' ) result . primary_error = 'MISSING_VERB_ERROR' result . specific_error = 'MISSING_VERB' if is_participle > .5 : result . matches [ 'participle_phrase' ] = is_participle result . human_readable = PARTICIPLE_FRAGMENT_ADVICE . replace ( '\n' , '' ) result . primary_error = 'FRAGMENT_ERROR' result . specific_error = 'PARTICIPLE_PHRASE' if lang_tool_feedback : result . matches [ 'lang_tool' ] = lang_tool_feedback for ltf in lang_tool_feedback : if ltf [ 'rule' ] [ 'id' ] == 'SENTENCE_FRAGMENT' : result . human_readable = lang_tool_feedback [ 0 ] [ 'message' ] result . primary_error = 'FRAGMENT_ERROR' result . specific_error = 'SUBORDINATE_CLAUSE' if is_infinitive : result . matches [ 'infinitive_phrase' ] = True result . human_readable = INFINITIVE_PHRASE_ADVICE . replace ( '\n' , '' ) result . primary_error = 'INFINITIVE_PHRASE_ERROR' result . specific_error = 'INFINITIVE_PHRASE' if not subject_and_verb_agree : result . matches [ 'subject_verb_agreement' ] = subject_and_verb_agree result . human_readable = SUBJECT_VERB_AGREEMENT_ADVICE . replace ( '\n' , '' ) result . primary_error = 'SUBJECT_VERB_AGREEMENT_ERROR' result . specific_error = 'SUBJECT_VERB_AGREEMENT' if lang_tool_feedback : result . matches [ 'lang_tool' ] = lang_tool_feedback for ltf in lang_tool_feedback : if ltf [ 'rule' ] [ 'id' ] == 'MORFOLOGIK_RULE_EN_US' : result . human_readable = ltf [ 'message' ] result . primary_error = 'SPELLING_ERROR' result . specific_error = 'SPELLING_ERROR' if not result . primary_error : result . human_readable = ltf [ 'message' ] result . primary_error = 'OTHER_ERROR' result . specific_error = ltf [ 'rule' ] [ 'id' ] if not result . matches : result . human_readable = STRONG_SENTENCE_ADVICE . replace ( '\n' , '' ) return result
Supply a sentence or fragment and recieve feedback
24,681
def list_submissions ( ) : submissions = [ ] try : submissions = session . query ( Submission ) . all ( ) except SQLAlchemyError as e : session . rollback ( ) return render_template ( 'list_submissions.html' , submissions = submissions )
List the past submissions with information about them
24,682
def get_submissions ( ) : print ( request . args . to_dict ( ) ) print ( request . args . get ( 'search[value]' ) ) print ( request . args . get ( 'draw' , 1 ) ) if request . args . get ( 'correct_filter' , 'all' ) == 'all' : correct_filter = [ True , False ] elif request . args [ 'correct_filter' ] == 'correct' : correct_filter = [ True ] else : correct_filter = [ False ] if request . args . get ( 'order[0][column]' , '0' ) == '0' : column = 'id' elif request . args [ 'order[0][column]' ] == '1' : column = 'text' else : column = 'primary_error' order_str = "{} {}" . format ( column , request . args . get ( 'order[0][dir]' , 'desc' ) ) search_val = request . args . get ( 'search[value]' ) draw = request . args . get ( 'draw' , 1 ) filtered_len = session . query ( Submission ) . filter ( Submission . text . startswith ( search_val ) ) . filter ( Submission . correct . in_ ( correct_filter ) ) . count ( ) subs = session . query ( Submission ) . filter ( Submission . text . startswith ( search_val ) ) . filter ( Submission . correct . in_ ( correct_filter ) ) . order_by ( order_str ) . offset ( request . args . get ( 'start' , 0 ) ) . limit ( request . args . get ( 'length' , 10 ) ) . all ( ) submissions = { 'draw' : draw , 'recordsTotal' : 0 , 'recordsFiltered' : 0 , 'data' : [ ] } i = 0 for i , submission in enumerate ( subs ) : submissions [ 'data' ] . append ( [ submission . id , submission . text , submission . primary_error , submission . correct ] ) submissions [ 'recordsTotal' ] = session . query ( Submission ) . count ( ) submissions [ 'recordsFiltered' ] = filtered_len return jsonify ( submissions )
API endpoint to get submissions in JSON format
24,683
def check_sentence ( ) : text = '' if request . method == 'POST' : text = request . form [ 'text' ] if not text : error = 'No input' flash_message = error else : fb = check ( request . form [ 'text' ] ) correct = False if request . form . get ( 'is_correct' ) and not fb . primary_error : correct = True elif not request . form . get ( 'is_correct' ) and fb . primary_error : correct = True sub = Submission ( text = text , correct = correct , primary_error = fb . primary_error , specific_error = fb . specific_error ) session . add ( sub ) session . commit ( ) if not fb . primary_error : fb . human_readable = "No errors were found." flash_message = fb . human_readable flash ( flash_message ) return render_template ( 'check_sentence.html' , text = text )
Sole porcupine endpoint
24,684
def raise_double_modal_error ( verb_phrase_doc ) : prev_word = None for word in verb_phrase : if word . tag_ == 'MD' and prev_word . tag == 'MD' : raise ( 'DoubleModalError' ) prev_word = word
A modal auxilary verb should not follow another modal auxilary verb
24,685
def raise_modal_error ( verb_phrase_doc ) : verb_phrase = verb_phrase_doc . text . lower ( ) bad_strings = [ 'should had' , 'should has' , 'could had' , 'could has' , 'would ' 'had' , 'would has' ] [ "should" , "could" , "would" ] for bs in bad_strings : if bs in verb_phrase : raise ( 'ShouldCouldWouldError' )
Given a verb phrase raise an error if the modal auxilary has an issue with it
24,686
def split_infinitive_warning ( sentence_str ) : sent_doc = textacy . Doc ( sentence_str , lang = 'en_core_web_lg' ) inf_pattern = r'<PART><ADV><VERB>' infinitives = textacy . extract . pos_regex_matches ( sent_doc , inf_pattern ) for inf in infinitives : if inf [ 0 ] . text . lower ( ) != 'to' : continue if inf [ - 1 ] . tag_ != 'VB' : continue return 'SplitInfinitiveWarning'
Return a warning for a split infinitive else None
24,687
def raise_infinitive_error ( sentence_str ) : sent_doc = textacy . Doc ( sentence_str , lang = 'en_core_web_lg' ) inf_pattern = r'<PART|ADP><VERB>' infinitives = textacy . extract . pos_regex_matches ( sent_doc , inf_pattern ) for inf in infinitives : if inf [ 0 ] . text . lower ( ) != 'to' : continue if inf [ - 1 ] . tag_ != 'VB' : raise Exception ( 'InfinitivePhraseError' )
Given a string check that all infinitives are properly formatted
24,688
def drop_modifiers ( sentence_str ) : tdoc = textacy . Doc ( sentence_str , lang = 'en_core_web_lg' ) new_sent = tdoc . text unusual_char = '形' for tag in tdoc : if tag . dep_ . endswith ( 'mod' ) : new_sent = new_sent [ : tag . idx ] + unusual_char * len ( tag . text ) + new_sent [ tag . idx + len ( tag . text ) : ] new_sent = new_sent . replace ( unusual_char , '' ) new_sent = textacy . preprocess . normalize_whitespace ( new_sent ) return new_sent
Given a string drop the modifiers and return a string without them
24,689
def cluster ( list_of_texts , num_clusters = 3 ) : pipeline = Pipeline ( [ ( "vect" , CountVectorizer ( ) ) , ( "tfidf" , TfidfTransformer ( ) ) , ( "clust" , KMeans ( n_clusters = num_clusters ) ) ] ) try : clusters = pipeline . fit_predict ( list_of_texts ) except ValueError : clusters = list ( range ( len ( list_of_texts ) ) ) return clusters
Cluster a list of texts into a predefined number of clusters .
24,690
def find_topics ( token_lists , num_topics = 10 ) : dictionary = Dictionary ( token_lists ) print ( 'Number of unique words in original documents:' , len ( dictionary ) ) dictionary . filter_extremes ( no_below = 2 , no_above = 0.7 ) print ( 'Number of unique words after removing rare and common words:' , len ( dictionary ) ) corpus = [ dictionary . doc2bow ( tokens ) for tokens in token_lists ] model = LdaModel ( corpus = corpus , id2word = dictionary , num_topics = num_topics , chunksize = 100 , passes = 5 , random_state = 1 ) print_topics ( model ) return model , dictionary
Find the topics in a list of texts with Latent Dirichlet Allocation .
24,691
def fetch_bookshelf ( start_url , output_dir ) : try : os . mkdir ( OUTPUT_DIR + output_dir ) except OSError as e : raise ( e ) r = requests . get ( start_url ) soup = bs ( r . text , 'html.parser' ) book_links = soup . find_all ( class_ = re . compile ( "extiw" ) ) new_links = [ ] for el in book_links : link = el [ 'href' ] title = el . text bookid = link . split ( '/' ) [ - 1 ] if bookid . isdigit ( ) : new_link = NEW_LINK_BASE . format ( bookid , bookid ) new_links . append ( [ title , new_link ] ) for link_tup in new_links : time . sleep ( .10 ) r1 = requests . get ( link_tup [ 1 ] ) new_filename = link_tup [ 0 ] . lower ( ) . replace ( ' ' , '-' ) . replace ( '\n' , '-' ) new_new_filename = '' for char in new_filename : if char in 'abcdefghijklmnopqrstuvwxyz-' : new_new_filename += char new_filename = new_new_filename [ : MAX_FILENAME_LEN ] + '.txt' with open ( OUTPUT_DIR + output_dir + '/' + new_filename , 'w+' ) as output_file : output_file . write ( r1 . text ) return None
Fetch all the books off of a gutenberg project bookshelf page
24,692
def lemmatize ( text , lowercase = True , remove_stopwords = True ) : doc = nlp ( text ) if lowercase and remove_stopwords : lemmas = [ t . lemma_ . lower ( ) for t in doc if not ( t . is_stop or t . orth_ . lower ( ) in STOPWORDS ) ] elif lowercase : lemmas = [ t . lemma_ . lower ( ) for t in doc ] elif remove_stopwords : lemmas = [ t . lemma_ for t in doc if not ( t . is_stop or t . orth_ . lower ( ) in STOPWORDS ) ] else : lemmas = [ t . lemma_ for t in doc ] return lemmas
Return the lemmas of the tokens in a text .
24,693
def inflate ( deflated_vector ) : dv = json . loads ( deflated_vector ) result = np . zeros ( 5555 ) for n in dv [ 'indices' ] : result [ int ( n ) ] = dv [ 'indices' ] [ n ] return result
Given a defalated vector inflate it into a np array and return it
24,694
def text_to_vector ( sent_str ) : r = requests . get ( "{}/sva/vector" . format ( VECTORIZE_API ) , params = { 's' : sent_str } ) return inflate ( r . text )
Given a string get it s defalted vector inflate it then return the inflated vector
24,695
def detect_missing_verb ( sentence ) : doc = nlp ( sentence ) for w in doc : if w . tag_ . startswith ( 'VB' ) and w . dep_ == 'ROOT' : return False return True
Return True if the sentence appears to be missing a main verb
24,696
def detect_infinitive_phrase ( sentence ) : if not 'to' in sentence . lower ( ) : return False doc = nlp ( sentence ) prev_word = None for w in doc : if prev_word == 'to' : if w . dep_ == 'ROOT' and w . tag_ . startswith ( 'VB' ) : return True else : return False prev_word = w . text . lower ( )
Given a string return true if it is an infinitive phrase fragment
24,697
def perform_srl ( responses , prompt ) : predictor = Predictor . from_path ( "https://s3-us-west-2.amazonaws.com/allennlp/models/srl-model-2018.05.25.tar.gz" ) sentences = [ { "sentence" : prompt + " " + response } for response in responses ] output = predictor . predict_batch_json ( sentences ) full_output = [ { "sentence" : prompt + response , "response" : response , "srl" : srl } for ( response , srl ) in zip ( responses , output ) ] return full_output
Perform semantic role labeling on a list of responses given a prompt .
24,698
def detokenize ( s ) : print ( s ) s = re . sub ( "\s+([;:,\.\?!])" , "\\1" , s ) s = re . sub ( "\s+(n't)" , "\\1" , s ) return s
Detokenize a string by removing spaces before punctuation .
24,699
def start ( self ) : if self . threadPool : self . threadPool . addTask ( self ) time . sleep ( 0 ) else : raise TaskError ( "start(): nothing to start for task %s" % self )
This method starts a task executing and returns immediately . Subclass should override this method if it has an asynchronous way to start the task and return immediately .