idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
43,900 | def newton ( f , x , verbose = False , tol = 1e-6 , maxit = 5 , jactype = 'serial' ) : if verbose : print = lambda txt : old_print ( txt ) else : print = lambda txt : None it = 0 error = 10 converged = False maxbacksteps = 30 x0 = x if jactype == 'sparse' : from scipy . sparse . linalg import spsolve as solve elif jactype == 'full' : from numpy . linalg import solve else : solve = serial_solve while it < maxit and not converged : [ v , dv ] = f ( x ) error_0 = abs ( v ) . max ( ) if error_0 < tol : if verbose : print ( "> System was solved after iteration {}. Residual={}" . format ( it , error_0 ) ) converged = True else : it += 1 dx = solve ( dv , v ) for bck in range ( maxbacksteps ) : xx = x - dx * ( 2 ** ( - bck ) ) vm = f ( xx ) [ 0 ] err = abs ( vm ) . max ( ) if err < error_0 : break x = xx if verbose : print ( "\t> {} | {} | {}" . format ( it , err , bck ) ) if not converged : import warnings warnings . warn ( "Did not converge" ) return [ x , it ] | Solve nonlinear system using safeguarded Newton iterations |
43,901 | def qzordered ( A , B , crit = 1.0 ) : "Eigenvalues bigger than crit are sorted in the top-left." TOL = 1e-10 def select ( alpha , beta ) : return alpha ** 2 > crit * beta ** 2 [ S , T , alpha , beta , U , V ] = ordqz ( A , B , output = 'real' , sort = select ) eigval = abs ( numpy . diag ( S ) / numpy . diag ( T ) ) return [ S , T , U , V , eigval ] | Eigenvalues bigger than crit are sorted in the top - left . |
43,902 | def ordqz ( A , B , sort = 'lhp' , output = 'real' , overwrite_a = False , overwrite_b = False , check_finite = True ) : import warnings import numpy as np from numpy import asarray_chkfinite from scipy . linalg . misc import LinAlgError , _datacopied from scipy . linalg . lapack import get_lapack_funcs from scipy . _lib . six import callable from scipy . linalg . _decomp_qz import _qz , _select_function lwork = None result , typ = _qz ( A , B , output = output , lwork = lwork , sort = None , overwrite_a = overwrite_a , overwrite_b = overwrite_b , check_finite = check_finite ) AA , BB , Q , Z = result [ 0 ] , result [ 1 ] , result [ - 4 ] , result [ - 3 ] if typ not in 'cz' : alpha , beta = result [ 3 ] + result [ 4 ] * 1.j , result [ 5 ] else : alpha , beta = result [ 3 ] , result [ 4 ] sfunction = _select_function ( sort ) select = sfunction ( alpha , beta ) tgsen , = get_lapack_funcs ( ( 'tgsen' , ) , ( AA , BB ) ) if lwork is None or lwork == - 1 : result = tgsen ( select , AA , BB , Q , Z , lwork = - 1 ) lwork = result [ - 3 ] [ 0 ] . real . astype ( np . int ) lwork += 1 liwork = None if liwork is None or liwork == - 1 : result = tgsen ( select , AA , BB , Q , Z , liwork = - 1 ) liwork = result [ - 2 ] [ 0 ] result = tgsen ( select , AA , BB , Q , Z , lwork = lwork , liwork = liwork ) info = result [ - 1 ] if info < 0 : raise ValueError ( "Illegal value in argument %d of tgsen" % - info ) elif info == 1 : raise ValueError ( "Reordering of (A, B) failed because the transformed" " matrix pair (A, B) would be too far from " "generalized Schur form; the problem is very " "ill-conditioned. (A, B) may have been partially " "reorded. If requested, 0 is returned in DIF(*), " "PL, and PR." ) if typ in [ 'f' , 'd' ] : alpha = result [ 2 ] + result [ 3 ] * 1.j return ( result [ 0 ] , result [ 1 ] , alpha , result [ 4 ] , result [ 5 ] , result [ 6 ] ) else : return result [ 0 ] , result [ 1 ] , result [ 2 ] , result [ 3 ] , result [ 4 ] , result [ 5 ] | QZ decomposition for a pair of matrices with reordering . |
43,903 | def parameterized_expectations_direct ( model , verbose = False , initial_dr = None , pert_order = 1 , grid = { } , distribution = { } , maxit = 100 , tol = 1e-8 ) : t1 = time . time ( ) g = model . functions [ 'transition' ] d = model . functions [ 'direct_response' ] h = model . functions [ 'expectation' ] parms = model . calibration [ 'parameters' ] if initial_dr is None : if pert_order == 1 : initial_dr = approximate_controls ( model ) if pert_order > 1 : raise Exception ( "Perturbation order > 1 not supported (yet)." ) approx = model . get_grid ( ** grid ) grid = approx . grid interp_type = approx . interpolation dr = create_interpolator ( approx , interp_type ) expect = create_interpolator ( approx , interp_type ) distrib = model . get_distribution ( ** distribution ) nodes , weights = distrib . discretize ( ) N = grid . shape [ 0 ] z = np . zeros ( ( N , len ( model . symbols [ 'expectations' ] ) ) ) x_0 = initial_dr ( grid ) x_0 = x_0 . real h_0 = h ( grid , x_0 , parms ) it = 0 err = 10 err_0 = 10 if verbose : headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |' headline = headline . format ( 'N' , ' Error' , 'Gain' , 'Time' ) stars = '-' * len ( headline ) print ( stars ) print ( headline ) print ( stars ) fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |' while err > tol and it <= maxit : it += 1 t_start = time . time ( ) expect . set_values ( h_0 ) z [ ... ] = 0 for i in range ( weights . shape [ 0 ] ) : e = nodes [ i , : ] S = g ( grid , x_0 , e , parms ) z += weights [ i ] * expect ( S ) new_x = d ( grid , z , parms ) new_h = h ( grid , new_x , parms ) err = ( abs ( new_h - h_0 ) . max ( ) ) x_0 = new_x h_0 = new_h err_SA = err / err_0 err_0 = err t_finish = time . time ( ) elapsed = t_finish - t_start if verbose : print ( fmt_str . format ( it , err , err_SA , elapsed ) ) if it == maxit : import warnings warnings . warn ( UserWarning ( "Maximum number of iterations reached" ) ) t2 = time . time ( ) if verbose : print ( stars ) print ( 'Elapsed: {} seconds.' . format ( t2 - t1 ) ) print ( stars ) dr . set_values ( x_0 ) return dr | Finds a global solution for model using parameterized expectations function . Requires the model to be written with controls as a direct function of the model objects . |
43,904 | def numdiff ( fun , args ) : epsilon = 1e-8 args = list ( args ) v0 = fun ( * args ) N = v0 . shape [ 0 ] l_v = len ( v0 ) dvs = [ ] for i , a in enumerate ( args ) : l_a = ( a ) . shape [ 1 ] dv = numpy . zeros ( ( N , l_v , l_a ) ) nargs = list ( args ) for j in range ( l_a ) : xx = args [ i ] . copy ( ) xx [ : , j ] += epsilon nargs [ i ] = xx dv [ : , : , j ] = ( fun ( * nargs ) - v0 ) / epsilon dvs . append ( dv ) return [ v0 ] + dvs | Vectorized numerical differentiation |
43,905 | def bandpass_filter ( data , k , w1 , w2 ) : data = np . asarray ( data ) low_w = np . pi * 2 / w2 high_w = np . pi * 2 / w1 bweights = np . zeros ( 2 * k + 1 ) bweights [ k ] = ( high_w - low_w ) / np . pi j = np . arange ( 1 , int ( k ) + 1 ) weights = 1 / ( np . pi * j ) * ( sin ( high_w * j ) - sin ( low_w * j ) ) bweights [ k + j ] = weights bweights [ : k ] = weights [ : : - 1 ] bweights -= bweights . mean ( ) return fftconvolve ( bweights , data , mode = 'valid' ) | This function will apply a bandpass filter to data . It will be kth order and will select the band between w1 and w2 . |
43,906 | def dprint ( s ) : import inspect frameinfo = inspect . stack ( ) [ 1 ] callerframe = frameinfo . frame d = callerframe . f_locals if ( isinstance ( s , str ) ) : val = eval ( s , d ) else : val = s cc = frameinfo . code_context [ 0 ] import re regex = re . compile ( "dprint\((.*)\)" ) res = regex . search ( cc ) s = res . group ( 1 ) text = '' text += bcolors . OKBLUE + "At <{}>\n" . format ( str ( frameinfo ) ) + bcolors . ENDC text += bcolors . WARNING + "{}: " . format ( s ) + bcolors . ENDC text += str ( val ) text += str ( ) print ( text ) | Prints s with additional debugging informations |
43,907 | def non_decreasing_series ( n , size ) : if size == 1 : return [ [ a ] for a in range ( n ) ] else : lc = non_decreasing_series ( n , size - 1 ) ll = [ ] for l in lc : last = l [ - 1 ] for i in range ( last , n ) : e = l + [ i ] ll . append ( e ) return ll | Lists all combinations of 0 ... n - 1 in increasing order |
43,908 | def higher_order_diff ( eqs , syms , order = 2 ) : import numpy eqs = list ( [ sympy . sympify ( eq ) for eq in eqs ] ) syms = list ( [ sympy . sympify ( s ) for s in syms ] ) neq = len ( eqs ) p = len ( syms ) D = [ numpy . array ( eqs ) ] orders = [ ] for i in range ( 1 , order + 1 ) : par = D [ i - 1 ] mat = numpy . empty ( [ neq ] + [ p ] * i , dtype = object ) for ind in non_decreasing_series ( p , i ) : ind_parent = ind [ : - 1 ] k = ind [ - 1 ] for line in range ( neq ) : ii = [ line ] + ind iid = [ line ] + ind_parent eeq = par [ tuple ( iid ) ] mat [ tuple ( ii ) ] = eeq . diff ( syms [ k ] ) D . append ( mat ) return D | Takes higher order derivatives of a list of equations w . r . t a list of paramters |
43,909 | def get_ranked_players ( ) : rankings_page = requests . get ( RANKINGS_URL ) root = etree . HTML ( rankings_page . text ) player_rows = root . xpath ( '//div[@id="ranked"]//tr' ) for row in player_rows [ 1 : ] : player_row = row . xpath ( 'td[@class!="country"]//text()' ) yield _Player ( name = player_row [ 1 ] , country = row [ 1 ] [ 0 ] . get ( 'title' ) , triple_crowns = player_row [ 3 ] , monthly_win = player_row [ 4 ] , biggest_cash = player_row [ 5 ] , plb_score = player_row [ 6 ] , biggest_score = player_row [ 7 ] , average_score = player_row [ 8 ] , previous_rank = player_row [ 9 ] , ) | Get the list of the first 100 ranked players . |
43,910 | def difference ( cls , first , second ) : first , second = cls ( first ) , cls ( second ) rank_list = list ( cls ) return abs ( rank_list . index ( first ) - rank_list . index ( second ) ) | Tells the numerical difference between two ranks . |
43,911 | def make_random ( cls ) : self = object . __new__ ( cls ) self . rank = Rank . make_random ( ) self . suit = Suit . make_random ( ) return self | Returns a random Card instance . |
43,912 | def twoplustwo_player ( username ) : from . website . twoplustwo import ForumMember , AmbiguousUserNameError , UserNotFoundError try : member = ForumMember ( username ) except UserNotFoundError : raise click . ClickException ( 'User "%s" not found!' % username ) except AmbiguousUserNameError as e : click . echo ( 'Got multiple users with similar names!' , err = True ) for ind , user in enumerate ( e . users ) : click . echo ( '{}. {}' . format ( ind + 1 , user . name ) , err = True ) number = click . prompt ( 'Which would you like to see [{}-{}]' . format ( 1 , len ( e . users ) ) , prompt_suffix = '? ' , type = click . IntRange ( 1 , len ( e . users ) ) , err = True ) userid = e . users [ int ( number ) - 1 ] . id member = ForumMember . from_userid ( userid ) click . echo ( err = True ) _print_header ( 'Two plus two forum member' ) _print_values ( ( 'Username' , member . username ) , ( 'Forum id' , member . id ) , ( 'Location' , member . location ) , ( 'Total posts' , member . total_posts ) , ( 'Posts per day' , member . posts_per_day ) , ( 'Rank' , member . rank ) , ( 'Last activity' , member . last_activity ) , ( 'Join date' , member . join_date ) , ( 'Usergroups' , member . public_usergroups ) , ( 'Profile picture' , member . profile_picture ) , ( 'Avatar' , member . avatar ) , ) | Get profile information about a Two plus Two Forum member given the username . |
43,913 | def p5list ( num ) : from . website . pocketfives import get_ranked_players format_str = '{:>4.4} {!s:<15.13}{!s:<18.15}{!s:<9.6}{!s:<10.7}' '{!s:<14.11}{!s:<12.9}{!s:<12.9}{!s:<12.9}{!s:<4.4}' click . echo ( format_str . format ( 'Rank' , 'Player name' , 'Country' , 'Triple' , 'Monthly' , 'Biggest cash' , 'PLB score' , 'Biggest s' , 'Average s' , 'Prev' ) ) underlines = [ '-' * 20 ] * 10 click . echo ( format_str . format ( * underlines ) ) for ind , player in enumerate ( get_ranked_players ( ) ) : click . echo ( format_str . format ( str ( ind + 1 ) + '.' , * player ) ) if ind == num - 1 : break | List pocketfives ranked players max 100 if no NUM or NUM if specified . |
43,914 | def psstatus ( ) : from . website . pokerstars import get_status _print_header ( 'PokerStars status' ) status = get_status ( ) _print_values ( ( 'Info updated' , status . updated ) , ( 'Tables' , status . tables ) , ( 'Players' , status . players ) , ( 'Active tournaments' , status . active_tournaments ) , ( 'Total tournaments' , status . total_tournaments ) , ( 'Clubs' , status . clubs ) , ( 'Club members' , status . club_members ) , ) site_format_str = '{0.id:<12} {0.tables:<7,} {0.players:<8,} {0.active_tournaments:,}' click . echo ( '\nSite Tables Players Tournaments' ) click . echo ( '----------- ------ ------- -----------' ) for site in status . sites : click . echo ( site_format_str . format ( site ) ) | Shows PokerStars status such as number of players tournaments . |
43,915 | def notes ( self ) : return tuple ( self . _get_note_data ( note ) for note in self . root . iter ( 'note' ) ) | Tuple of notes .. |
43,916 | def labels ( self ) : return tuple ( _Label ( label . get ( 'id' ) , label . get ( 'color' ) , label . text ) for label in self . root . iter ( 'label' ) ) | Tuple of labels . |
43,917 | def add_note ( self , player , text , label = None , update = None ) : if label is not None and ( label not in self . label_names ) : raise LabelNotFoundError ( 'Invalid label: {}' . format ( label ) ) if update is None : update = datetime . utcnow ( ) update = update . strftime ( '%s' ) label_id = self . _get_label_id ( label ) new_note = etree . Element ( 'note' , player = player , label = label_id , update = update ) new_note . text = text self . root . append ( new_note ) | Add a note to the xml . If update param is None it will be the current time . |
43,918 | def append_note ( self , player , text ) : note = self . _find_note ( player ) note . text += text | Append text to an already existing note . |
43,919 | def prepend_note ( self , player , text ) : note = self . _find_note ( player ) note . text = text + note . text | Prepend text to an already existing note . |
43,920 | def get_label ( self , name ) : label_tag = self . _find_label ( name ) return _Label ( label_tag . get ( 'id' ) , label_tag . get ( 'color' ) , label_tag . text ) | Find the label by name . |
43,921 | def add_label ( self , name , color ) : color_upper = color . upper ( ) if not self . _color_re . match ( color_upper ) : raise ValueError ( 'Invalid color: {}' . format ( color ) ) labels_tag = self . root [ 0 ] last_id = int ( labels_tag [ - 1 ] . get ( 'id' ) ) new_id = str ( last_id + 1 ) new_label = etree . Element ( 'label' , id = new_id , color = color_upper ) new_label . text = name labels_tag . append ( new_label ) | Add a new label . It s id will automatically be calculated . |
43,922 | def del_label ( self , name ) : labels_tag = self . root [ 0 ] labels_tag . remove ( self . _find_label ( name ) ) | Delete a label by name . |
43,923 | def save ( self , filename ) : with open ( filename , 'w' ) as fp : fp . write ( str ( self ) ) | Save the note XML to a file . |
43,924 | def board ( self ) : board = [ ] if self . flop : board . extend ( self . flop . cards ) if self . turn : board . append ( self . turn ) if self . river : board . append ( self . river ) return tuple ( board ) if board else None | Calculates board from flop turn and river . |
43,925 | def _parse_date ( self , date_string ) : date = datetime . strptime ( date_string , self . _DATE_FORMAT ) self . date = self . _TZ . localize ( date ) . astimezone ( pytz . UTC ) | Parse the date_string and return a datetime object as UTC . |
43,926 | def _split_raw ( self ) : self . _splitted = self . _split_re . split ( self . raw ) self . _sections = [ ind for ind , elem in enumerate ( self . _splitted ) if not elem ] | Split hand history by sections . |
43,927 | def _get_timezone ( self , root ) : tz_str = root . xpath ( '//div[@class="smallfont" and @align="center"]' ) [ 0 ] . text hours = int ( self . _tz_re . search ( tz_str ) . group ( 1 ) ) return tzoffset ( tz_str , hours * 60 ) | Find timezone informatation on bottom of the page . |
43,928 | def get_current_tournaments ( ) : schedule_page = requests . get ( TOURNAMENTS_XML_URL ) root = etree . XML ( schedule_page . content ) for tour in root . iter ( '{*}tournament' ) : yield _Tournament ( start_date = tour . findtext ( '{*}start_date' ) , name = tour . findtext ( '{*}name' ) , game = tour . findtext ( '{*}game' ) , buyin = tour . findtext ( '{*}buy_in_fee' ) , players = tour . get ( 'players' ) ) | Get the next 200 tournaments from pokerstars . |
43,929 | def _filter_file ( src , dest , subst ) : substre = re . compile ( r'\$(%s)' % '|' . join ( subst . keys ( ) ) ) def repl ( m ) : return subst [ m . group ( 1 ) ] with open ( src , "rt" ) as sf , open ( dest , "wt" ) as df : while True : l = sf . readline ( ) if not l : break df . write ( re . sub ( substre , repl , l ) ) | Copy src to dest doing substitutions on the fly . |
43,930 | def _fixup_graphql_error ( self , data ) : original_data = data errors = data . get ( 'errors' ) original_errors = errors if not isinstance ( errors , list ) : self . logger . warning ( 'data["errors"] is not a list! Fix up data=%r' , data ) data = data . copy ( ) data [ 'errors' ] = [ { 'message' : str ( errors ) } ] return data for i , error in enumerate ( errors ) : if not isinstance ( error , dict ) : self . logger . warning ( 'Error #%d: is not a dict: %r. Fix up!' , i , error ) if data is original_data : data = data . copy ( ) if errors is original_errors : errors = errors . copy ( ) data [ 'errors' ] = errors errors [ i ] = { 'message' : str ( error ) } continue message = error . get ( 'message' ) if not isinstance ( message , str ) : if data is original_data : data = data . copy ( ) if errors is original_errors : errors = errors . copy ( ) data [ 'errors' ] = errors message = str ( error ) if message is None else str ( message ) error = error . copy ( ) error [ 'message' ] = message errors [ i ] = error return data | Given a possible GraphQL error payload make sure it s in shape . |
43,931 | def snippet ( code , locations , sep = ' | ' , colmark = ( '-' , '^' ) , context = 5 ) : if not locations : return [ ] lines = code . split ( '\n' ) offset = int ( len ( lines ) / 10 ) + 1 linenofmt = '%{}d' . format ( offset ) s = [ ] for loc in locations : line = max ( 0 , loc . get ( 'line' , 1 ) - 1 ) column = max ( 0 , loc . get ( 'column' , 1 ) - 1 ) start_line = max ( 0 , line - context ) for i , ln in enumerate ( lines [ start_line : line + 1 ] , start_line ) : s . append ( '{}{}{}' . format ( linenofmt % i , sep , ln ) ) s . append ( '{}{}{}' . format ( ' ' * ( offset + len ( sep ) ) , colmark [ 0 ] * column , colmark [ 1 ] ) ) return s | Given a code and list of locations convert to snippet lines . |
43,932 | def _create_non_null_wrapper ( name , t ) : 'creates type wrapper for non-null of given type' def __new__ ( cls , json_data , selection_list = None ) : if json_data is None : raise ValueError ( name + ' received null value' ) return t ( json_data , selection_list ) def __to_graphql_input__ ( value , indent = 0 , indent_string = ' ' ) : return t . __to_graphql_input__ ( value , indent , indent_string ) return type ( name , ( t , ) , { '__new__' : __new__ , '_%s__auto_register' % name : False , '__to_graphql_input__' : __to_graphql_input__ , } ) | creates type wrapper for non - null of given type |
43,933 | def _create_list_of_wrapper ( name , t ) : 'creates type wrapper for list of given type' def __new__ ( cls , json_data , selection_list = None ) : if json_data is None : return None return [ t ( v , selection_list ) for v in json_data ] def __to_graphql_input__ ( value , indent = 0 , indent_string = ' ' ) : r = [ ] for v in value : r . append ( t . __to_graphql_input__ ( v , indent , indent_string ) ) return '[' + ', ' . join ( r ) + ']' def __to_json_value__ ( value ) : if value is None : return None return [ t . __to_json_value__ ( v ) for v in value ] return type ( name , ( t , ) , { '__new__' : __new__ , '_%s__auto_register' % name : False , '__to_graphql_input__' : __to_graphql_input__ , '__to_json_value__' : __to_json_value__ , } ) | creates type wrapper for list of given type |
43,934 | def add_query_to_url ( url , extra_query ) : split = urllib . parse . urlsplit ( url ) merged_query = urllib . parse . parse_qsl ( split . query ) if isinstance ( extra_query , dict ) : for k , v in extra_query . items ( ) : if not isinstance ( v , ( tuple , list ) ) : merged_query . append ( ( k , v ) ) else : for cv in v : merged_query . append ( ( k , cv ) ) else : merged_query . extend ( extra_query ) merged_split = urllib . parse . SplitResult ( split . scheme , split . netloc , split . path , urllib . parse . urlencode ( merged_query ) , split . fragment , ) return merged_split . geturl ( ) | Adds an extra query to URL returning the new URL . |
43,935 | def connection_args ( * lst , ** mapping ) : pd = ArgDict ( * lst , ** mapping ) pd . setdefault ( 'after' , String ) pd . setdefault ( 'before' , String ) pd . setdefault ( 'first' , Int ) pd . setdefault ( 'last' , Int ) return pd | Returns the default parameters for connection . |
43,936 | def msjd ( theta ) : s = 0. for p in theta . dtype . names : s += np . sum ( np . diff ( theta [ p ] , axis = 0 ) ** 2 ) return s | Mean squared jumping distance . |
43,937 | def loglik ( self , theta , t = None ) : if t is None : t = self . T - 1 l = np . zeros ( shape = theta . shape [ 0 ] ) for s in range ( t + 1 ) : l += self . logpyt ( theta , s ) return l | log - likelihood at given parameter values . |
43,938 | def logpost ( self , theta , t = None ) : return self . prior . logpdf ( theta ) + self . loglik ( theta , t ) | Posterior log - density at given parameter values . |
43,939 | def copyto ( self , src , where = None ) : for n , _ in enumerate ( self . l ) : if where [ n ] : self . l [ n ] = src . l [ n ] | Same syntax and functionality as numpy . copyto |
43,940 | def copy ( self ) : attrs = { k : self . __dict__ [ k ] . copy ( ) for k in self . containers } attrs . update ( { k : cp . deepcopy ( self . __dict__ [ k ] ) for k in self . shared } ) return self . __class__ ( ** attrs ) | Returns a copy of the object . |
43,941 | def copyto ( self , src , where = None ) : for k in self . containers : v = self . __dict__ [ k ] if isinstance ( v , np . ndarray ) : np . copyto ( v , src . __dict__ [ k ] , where = where ) else : v . copyto ( src . __dict__ [ k ] , where = where ) | Emulates function copyto in NumPy . |
43,942 | def copyto_at ( self , n , src , m ) : for k in self . containers : self . __dict__ [ k ] [ n ] = src . __dict__ [ k ] [ m ] | Copy to at a given location . |
43,943 | def Metropolis ( self , compute_target , mh_options ) : opts = mh_options . copy ( ) nsteps = opts . pop ( 'nsteps' , 0 ) delta_dist = opts . pop ( 'delta_dist' , 0.1 ) proposal = self . choose_proposal ( ** opts ) xout = self . copy ( ) xp = self . __class__ ( theta = np . empty_like ( self . theta ) ) step_ars = [ ] for _ in self . mcmc_iterate ( nsteps , self . arr , xout . arr , delta_dist ) : xp . arr [ : , : ] , delta_lp = proposal . step ( xout . arr ) compute_target ( xp ) lp_acc = xp . lpost - xout . lpost + delta_lp accept = ( np . log ( stats . uniform . rvs ( size = self . N ) ) < lp_acc ) xout . copyto ( xp , where = accept ) step_ars . append ( np . mean ( accept ) ) xout . acc_rates = self . acc_rates + [ step_ars ] return xout | Performs a certain number of Metropolis steps . |
43,944 | def backward ( self ) : if not self . filt : self . forward ( ) self . smth = [ self . filt [ - 1 ] ] log_trans = np . log ( self . hmm . trans_mat ) ctg = np . zeros ( self . hmm . dim ) for filt , next_ft in reversed ( list ( zip ( self . filt [ : - 1 ] , self . logft [ 1 : ] ) ) ) : new_ctg = np . empty ( self . hmm . dim ) for k in range ( self . hmm . dim ) : new_ctg [ k ] = rs . log_sum_exp ( log_trans [ k , : ] + next_ft + ctg ) ctg = new_ctg smth = rs . exp_and_normalise ( np . log ( filt ) + ctg ) self . smth . append ( smth ) self . smth . reverse ( ) | Backward recursion . |
43,945 | def predict_step ( F , covX , filt ) : pred_mean = np . matmul ( filt . mean , F . T ) pred_cov = dotdot ( F , filt . cov , F . T ) + covX return MeanAndCov ( mean = pred_mean , cov = pred_cov ) | Predictive step of Kalman filter . |
43,946 | def filter_step ( G , covY , pred , yt ) : data_pred_mean = np . matmul ( pred . mean , G . T ) data_pred_cov = dotdot ( G , pred . cov , G . T ) + covY if covY . shape [ 0 ] == 1 : logpyt = dists . Normal ( loc = data_pred_mean , scale = np . sqrt ( data_pred_cov ) ) . logpdf ( yt ) else : logpyt = dists . MvNormal ( loc = data_pred_mean , cov = data_pred_cov ) . logpdf ( yt ) residual = yt - data_pred_mean gain = dotdot ( pred . cov , G . T , inv ( data_pred_cov ) ) filt_mean = pred . mean + np . matmul ( residual , gain . T ) filt_cov = pred . cov - dotdot ( gain , G , pred . cov ) return MeanAndCov ( mean = filt_mean , cov = filt_cov ) , logpyt | Filtering step of Kalman filter . |
43,947 | def check_shapes ( self ) : assert self . covX . shape == ( self . dx , self . dx ) , error_msg assert self . covY . shape == ( self . dy , self . dy ) , error_msg assert self . F . shape == ( self . dx , self . dx ) , error_msg assert self . G . shape == ( self . dy , self . dx ) , error_msg assert self . mu0 . shape == ( self . dx , ) , error_msg assert self . cov0 . shape == ( self . dx , self . dx ) , error_msg | Check all dimensions are correct . |
43,948 | def sobol ( N , dim , scrambled = 1 ) : while ( True ) : seed = np . random . randint ( 2 ** 32 ) out = lowdiscrepancy . sobol ( N , dim , scrambled , seed , 1 , 0 ) if ( scrambled == 0 ) or ( ( out < 1. ) . all ( ) and ( out > 0. ) . all ( ) ) : return out | Sobol sequence . |
43,949 | def smoothing_worker ( method = None , N = 100 , seed = None , fk = None , fk_info = None , add_func = None , log_gamma = None ) : T = fk . T if fk_info is None : fk_info = fk . __class__ ( ssm = fk . ssm , data = fk . data [ : : - 1 ] ) if seed : random . seed ( seed ) est = np . zeros ( T - 1 ) if method == 'FFBS_QMC' : pf = particles . SQMC ( fk = fk , N = N , store_history = True ) else : pf = particles . SMC ( fk = fk , N = N , store_history = True ) tic = time . clock ( ) pf . run ( ) if method in [ 'FFBS_ON' , 'FFBS_ON2' , 'FFBS_QMC' ] : if method . startswith ( 'FFBS_ON' ) : z = pf . hist . backward_sampling ( N , linear_cost = ( method == 'FFBS_ON' ) ) else : z = pf . hist . backward_sampling_qmc ( N ) for t in range ( T - 1 ) : est [ t ] = np . mean ( add_func ( t , z [ t ] , z [ t + 1 ] ) ) elif method in [ 'two-filter_ON2' , 'two-filter_ON' , 'two-filter_ON_prop' ] : infopf = particles . SMC ( fk = fk_info , N = N , store_history = True ) infopf . run ( ) for t in range ( T - 1 ) : psi = lambda x , xf : add_func ( t , x , xf ) if method == 'two-filter_ON2' : est [ t ] = pf . hist . twofilter_smoothing ( t , infopf , psi , log_gamma ) else : ti = T - 2 - t if method == 'two-filter_ON_prop' : modif_fwd = stats . norm . logpdf ( pf . hist . X [ t ] , loc = np . mean ( infopf . hist . X [ ti + 1 ] ) , scale = np . std ( infopf . hist . X [ ti + 1 ] ) ) modif_info = stats . norm . logpdf ( infopf . hist . X [ ti ] , loc = np . mean ( pf . hist . X [ t + 1 ] ) , scale = np . std ( pf . hist . X [ t + 1 ] ) ) else : modif_fwd , modif_info = None , None est [ t ] = pf . hist . twofilter_smoothing ( t , infopf , psi , log_gamma , linear_cost = True , modif_forward = modif_fwd , modif_info = modif_info ) else : print ( 'no such method?' ) cpu_time = time . clock ( ) - tic print ( method + ' took %.2f s for N=%i' % ( cpu_time , N ) ) return { 'est' : est , 'cpu' : cpu_time } | Generic worker for off - line smoothing algorithms . |
43,950 | def save ( self , X = None , w = None , A = None ) : self . X . append ( X ) self . wgt . append ( w ) self . A . append ( A ) | Save one page of history at a given time . |
43,951 | def extract_one_trajectory ( self ) : traj = [ ] for t in reversed ( range ( self . T ) ) : if t == self . T - 1 : n = rs . multinomial_once ( self . wgt [ - 1 ] . W ) else : n = self . A [ t + 1 ] [ n ] traj . append ( self . X [ t ] [ n ] ) return traj [ : : - 1 ] | Extract a single trajectory from the particle history . |
43,952 | def compute_trajectories ( self ) : self . B = np . empty ( ( self . T , self . N ) , 'int' ) self . B [ - 1 , : ] = self . A [ - 1 ] for t in reversed ( range ( self . T - 1 ) ) : self . B [ t , : ] = self . A [ t + 1 ] [ self . B [ t + 1 ] ] | Compute the N trajectories that constitute the current genealogy . |
43,953 | def twofilter_smoothing ( self , t , info , phi , loggamma , linear_cost = False , return_ess = False , modif_forward = None , modif_info = None ) : ti = self . T - 2 - t if t < 0 or t >= self . T - 1 : raise ValueError ( 'two-filter smoothing: t must be in range 0,...,T-2' ) lwinfo = info . hist . wgt [ ti ] . lw - loggamma ( info . hist . X [ ti ] ) if linear_cost : return self . _twofilter_smoothing_ON ( t , ti , info , phi , lwinfo , return_ess , modif_forward , modif_info ) else : return self . _twofilter_smoothing_ON2 ( t , ti , info , phi , lwinfo ) | Two - filter smoothing . |
43,954 | def multiSMC ( nruns = 10 , nprocs = 0 , out_func = None , ** args ) : def f ( ** args ) : pf = SMC ( ** args ) pf . run ( ) return out_func ( pf ) if out_func is None : out_func = lambda x : x return utils . multiplexer ( f = f , nruns = nruns , nprocs = nprocs , seeding = True , ** args ) | Run SMC algorithms in parallel for different combinations of parameters . |
43,955 | def reset_weights ( self ) : if self . fk . isAPF : lw = ( rs . log_mean_exp ( self . logetat , W = self . W ) - self . logetat [ self . A ] ) self . wgts = rs . Weights ( lw = lw ) else : self . wgts = rs . Weights ( ) | Reset weights after a resampling step . |
43,956 | def log_sum_exp ( v ) : m = v . max ( ) return m + np . log ( np . sum ( np . exp ( v - m ) ) ) | Log of the sum of the exp of the arguments . |
43,957 | def log_sum_exp_ab ( a , b ) : if a > b : return a + np . log ( 1. + np . exp ( b - a ) ) else : return b + np . log ( 1. + np . exp ( a - b ) ) | log_sum_exp for two scalars . |
43,958 | def wmean_and_var ( W , x ) : m = np . average ( x , weights = W , axis = 0 ) m2 = np . average ( x ** 2 , weights = W , axis = 0 ) v = m2 - m ** 2 return { 'mean' : m , 'var' : v } | Component - wise weighted mean and variance . |
43,959 | def wmean_and_var_str_array ( W , x ) : m = np . empty ( shape = x . shape [ 1 : ] , dtype = x . dtype ) v = np . empty_like ( m ) for p in x . dtype . names : m [ p ] , v [ p ] = wmean_and_var ( W , x [ p ] ) . values ( ) return { 'mean' : m , 'var' : v } | Weighted mean and variance of each component of a structured array . |
43,960 | def wquantiles ( W , x , alphas = ( 0.25 , 0.50 , 0.75 ) ) : if len ( x . shape ) == 1 : return _wquantiles ( W , x , alphas = alphas ) elif len ( x . shape ) == 2 : return np . array ( [ _wquantiles ( W , x [ : , i ] , alphas = alphas ) for i in range ( x . shape [ 1 ] ) ] ) | Quantiles for weighted data . |
43,961 | def wquantiles_str_array ( W , x , alphas = ( 0.25 , 0.50 , 0 , 75 ) ) : return { p : wquantiles ( W , x [ p ] , alphas ) for p in x . dtype . names } | quantiles for weighted data stored in a structured array . |
43,962 | def resampling_scheme ( func ) : @ functools . wraps ( func ) def modif_func ( W , M = None ) : M = W . shape [ 0 ] if M is None else M return func ( W , M ) rs_funcs [ func . __name__ ] = modif_func modif_func . __doc__ = rs_doc % func . __name__ . capitalize ( ) return modif_func | Decorator for resampling schemes . |
43,963 | def inverse_cdf ( su , W ) : j = 0 s = W [ 0 ] M = su . shape [ 0 ] A = np . empty ( M , 'int' ) for n in range ( M ) : while su [ n ] > s : j += 1 s += W [ j ] A [ n ] = j return A | Inverse CDF algorithm for a finite distribution . |
43,964 | def hilbert_array ( xint ) : N , d = xint . shape h = np . zeros ( N , int64 ) for n in range ( N ) : h [ n ] = Hilbert_to_int ( xint [ n , : ] ) return h | Compute Hilbert indices . |
43,965 | def mean_sq_jump_dist ( self , discard_frac = 0.1 ) : discard = int ( self . niter * discard_frac ) return msjd ( self . chain . theta [ discard : ] ) | Mean squared jumping distance estimated from chain . |
43,966 | def update ( self , v ) : self . t += 1 g = self . gamma ( ) self . mu = ( 1. - g ) * self . mu + g * v mv = v - self . mu self . Sigma = ( ( 1. - g ) * self . Sigma + g * np . dot ( mv [ : , np . newaxis ] , mv [ np . newaxis , : ] ) ) try : self . L = cholesky ( self . Sigma , lower = True ) except LinAlgError : self . L = self . L0 | Adds point v |
43,967 | def cartesian_lists ( d ) : return [ { k : v for k , v in zip ( d . keys ( ) , args ) } for args in itertools . product ( * d . values ( ) ) ] | turns a dict of lists into a list of dicts that represents the cartesian product of the initial lists |
43,968 | def cartesian_args ( args , listargs , dictargs ) : ils = { k : [ v , ] for k , v in args . items ( ) } ils . update ( listargs ) ils . update ( { k : v . values ( ) for k , v in dictargs . items ( ) } ) ols = listargs . copy ( ) ols . update ( { k : v . keys ( ) for k , v in dictargs . items ( ) } ) return cartesian_lists ( ils ) , cartesian_lists ( ols ) | Compute a list of inputs and outputs for a function with kw arguments . |
43,969 | def worker ( qin , qout , f ) : while not qin . empty ( ) : i , args = qin . get ( ) qout . put ( ( i , f ( ** args ) ) ) | Worker for muliprocessing . A worker repeatedly picks a dict of arguments in the queue and computes f for this set of arguments until the input queue is empty . |
43,970 | def distinct_seeds ( k ) : seeds = [ ] for _ in range ( k ) : while True : s = random . randint ( 2 ** 32 - 1 ) if s not in seeds : break seeds . append ( s ) return seeds | returns k distinct seeds for random number generation |
43,971 | def multiplexer ( f = None , nruns = 1 , nprocs = 1 , seeding = None , ** args ) : if not callable ( f ) : raise ValueError ( 'multiplexer: function f missing, or not callable' ) if seeding is None : seeding = ( nruns > 1 ) fixedargs , listargs , dictargs = { } , { } , { } listargs [ 'run' ] = list ( range ( nruns ) ) for k , v in args . items ( ) : if isinstance ( v , list ) : listargs [ k ] = v elif isinstance ( v , dict ) : dictargs [ k ] = v else : fixedargs [ k ] = v inputs , outputs = cartesian_args ( fixedargs , listargs , dictargs ) for ip in inputs : ip . pop ( 'run' ) if seeding : seeds = distinct_seeds ( len ( inputs ) ) for ip , op , s in zip ( inputs , outputs , seeds ) : ip [ 'seed' ] = s op [ 'seed' ] = s return distribute_work ( f , inputs , outputs , nprocs = nprocs ) | Evaluate a function for different parameters optionally in parallel . |
43,972 | def simulate ( self , T ) : x = [ ] for t in range ( T ) : law_x = self . PX0 ( ) if t == 0 else self . PX ( t , x [ - 1 ] ) x . append ( law_x . rvs ( size = 1 ) ) y = self . simulate_given_x ( x ) return x , y | Simulate state and observation processes . |
43,973 | def interpoled_resampling ( W , x ) : N = W . shape [ 0 ] idx = np . argsort ( x ) xs = x [ idx ] ws = W [ idx ] cs = np . cumsum ( avg_n_nplusone ( ws ) ) u = random . rand ( N ) xrs = np . empty ( N ) where = np . searchsorted ( cs , u ) for n in range ( N ) : m = where [ n ] if m == 0 : xrs [ n ] = xs [ 0 ] elif m == N : xrs [ n ] = xs [ - 1 ] else : xrs [ n ] = interpol ( cs [ m - 1 ] , cs [ m ] , xs [ m - 1 ] , xs [ m ] , u [ n ] ) return xrs | Resampling based on an interpolated CDF as described in Malik and Pitt . |
43,974 | def sort_items ( self , items , args = False ) : if self . settings [ 'sort' ] . lower ( ) == 'src' : return def alpha ( i ) : return i . name def permission ( i ) : if args : if i . intent == 'in' : return 'b' if i . intent == 'inout' : return 'c' if i . intent == 'out' : return 'd' if i . intent == '' : return 'e' perm = getattr ( i , 'permission' , '' ) if perm == 'public' : return 'b' if perm == 'protected' : return 'c' if perm == 'private' : return 'd' return 'a' def permission_alpha ( i ) : return permission ( i ) + '-' + i . name def itype ( i ) : if i . obj == 'variable' : retstr = i . vartype if retstr == 'class' : retstr = 'type' if i . kind : retstr = retstr + '-' + str ( i . kind ) if i . strlen : retstr = retstr + '-' + str ( i . strlen ) if i . proto : retstr = retstr + '-' + i . proto [ 0 ] return retstr elif i . obj == 'proc' : if i . proctype != 'Function' : return i . proctype . lower ( ) else : return i . proctype . lower ( ) + '-' + itype ( i . retvar ) else : return i . obj def itype_alpha ( i ) : return itype ( i ) + '-' + i . name if self . settings [ 'sort' ] . lower ( ) == 'alpha' : items . sort ( key = alpha ) elif self . settings [ 'sort' ] . lower ( ) == 'permission' : items . sort ( key = permission ) elif self . settings [ 'sort' ] . lower ( ) == 'permission-alpha' : items . sort ( key = permission_alpha ) elif self . settings [ 'sort' ] . lower ( ) == 'type' : items . sort ( key = itype ) elif self . settings [ 'sort' ] . lower ( ) == 'type-alpha' : items . sort ( key = itype_alpha ) | Sort the self s contents as contained in the list items as specified in self s meta - data . |
43,975 | def contents_size ( self ) : count = 0 if hasattr ( self , 'variables' ) : count += 1 if hasattr ( self , 'types' ) : count += 1 if hasattr ( self , 'modules' ) : count += 1 if hasattr ( self , 'submodules' ) : count += 1 if hasattr ( self , 'subroutines' ) : count += 1 if hasattr ( self , 'modprocedures' ) : count += 1 if hasattr ( self , 'functions' ) : count += 1 if hasattr ( self , 'interfaces' ) : count += 1 if hasattr ( self , 'absinterfaces' ) : count += 1 if hasattr ( self , 'programs' ) : count += 1 if hasattr ( self , 'boundprocs' ) : count += 1 if hasattr ( self , 'finalprocs' ) : count += 1 if hasattr ( self , 'enums' ) : count += 1 if hasattr ( self , 'procedure' ) : count += 1 if hasattr ( self , 'constructor' ) : count += 1 if hasattr ( self , 'modfunctions' ) : count += 1 if hasattr ( self , 'modsubroutines' ) : count += 1 if hasattr ( self , 'modprocs' ) : count += 1 if getattr ( self , 'src' , None ) : count += 1 return count | Returns the number of different categories to be shown in the contents side - bar in the HTML documentation . |
43,976 | def sort ( self ) : if hasattr ( self , 'variables' ) : sort_items ( self , self . variables ) if hasattr ( self , 'modules' ) : sort_items ( self , self . modules ) if hasattr ( self , 'submodules' ) : sort_items ( self , self . submodules ) if hasattr ( self , 'common' ) : sort_items ( self , self . common ) if hasattr ( self , 'subroutines' ) : sort_items ( self , self . subroutines ) if hasattr ( self , 'modprocedures' ) : sort_items ( self , self . modprocedures ) if hasattr ( self , 'functions' ) : sort_items ( self , self . functions ) if hasattr ( self , 'interfaces' ) : sort_items ( self , self . interfaces ) if hasattr ( self , 'absinterfaces' ) : sort_items ( self , self . absinterfaces ) if hasattr ( self , 'types' ) : sort_items ( self , self . types ) if hasattr ( self , 'programs' ) : sort_items ( self , self . programs ) if hasattr ( self , 'blockdata' ) : sort_items ( self , self . blockdata ) if hasattr ( self , 'boundprocs' ) : sort_items ( self , self . boundprocs ) if hasattr ( self , 'finalprocs' ) : sort_items ( self , self . finalprocs ) if hasattr ( self , 'args' ) : pass | Sorts components of the object . |
43,977 | def make_links ( self , project ) : self . doc = ford . utils . sub_links ( self . doc , project ) if 'summary' in self . meta : self . meta [ 'summary' ] = ford . utils . sub_links ( self . meta [ 'summary' ] , project ) for item in self . iterator ( 'variables' , 'types' , 'enums' , 'modules' , 'submodules' , 'subroutines' , 'functions' , 'interfaces' , 'absinterfaces' , 'programs' , 'boundprocs' , 'args' , 'bindings' ) : if isinstance ( item , FortranBase ) : item . make_links ( project ) if hasattr ( self , 'retvar' ) : if self . retvar : if isinstance ( self . retvar , FortranBase ) : self . retvar . make_links ( project ) if hasattr ( self , 'procedure' ) : if isinstance ( self . procedure , FortranBase ) : self . procedure . make_links ( project ) | Process intra - site links to documentation of other parts of the program . |
43,978 | def iterator ( self , * argv ) : for arg in argv : if hasattr ( self , arg ) : for item in getattr ( self , arg ) : yield item | Iterator returning any list of elements via attribute lookup in self |
43,979 | def get_used_entities ( self , use_specs ) : if len ( use_specs . strip ( ) ) == 0 : return ( self . pub_procs , self . pub_absints , self . pub_types , self . pub_vars ) only = bool ( self . ONLY_RE . match ( use_specs ) ) use_specs = self . ONLY_RE . sub ( '' , use_specs ) ulist = self . SPLIT_RE . split ( use_specs ) ulist [ - 1 ] = ulist [ - 1 ] . strip ( ) uspecs = { } for item in ulist : match = self . RENAME_RE . search ( item ) if match : uspecs [ match . group ( 1 ) . lower ( ) ] = match . group ( 2 ) else : uspecs [ item . lower ( ) ] = item ret_procs = { } ret_absints = { } ret_types = { } ret_vars = { } for name , obj in self . pub_procs . items ( ) : name = name . lower ( ) if only : if name in uspecs : ret_procs [ name ] = obj else : ret_procs [ name ] = obj for name , obj in self . pub_absints . items ( ) : name = name . lower ( ) if only : if name in uspecs : ret_absints [ name ] = obj else : ret_absints [ name ] = obj for name , obj in self . pub_types . items ( ) : name = name . lower ( ) if only : if name in uspecs : ret_types [ name ] = obj else : ret_types [ name ] = obj for name , obj in self . pub_vars . items ( ) : name = name . lower ( ) if only : if name in uspecs : ret_vars [ name ] = obj else : ret_vars [ name ] = obj return ( ret_procs , ret_absints , ret_types , ret_vars ) | Returns the entities which are imported by a use statement . These are contained in dicts . |
43,980 | def get_name ( self , item ) : if not isinstance ( item , ford . sourceform . FortranBase ) : raise Exception ( '{} is not of a type derived from FortranBase' . format ( str ( item ) ) ) if item in self . _items : return self . _items [ item ] else : if item . get_dir ( ) not in self . _counts : self . _counts [ item . get_dir ( ) ] = { } if item . name in self . _counts [ item . get_dir ( ) ] : num = self . _counts [ item . get_dir ( ) ] [ item . name ] + 1 else : num = 1 self . _counts [ item . get_dir ( ) ] [ item . name ] = num name = item . name . lower ( ) . replace ( '<' , 'lt' ) name = name . replace ( '>' , 'gt' ) name = name . replace ( '/' , 'SLASH' ) if name == '' : name = '__unnamed__' if num > 1 : name = name + '~' + str ( num ) self . _items [ item ] = name return name | Return the name for this item registered with this NameSelector . If no name has previously been registered then generate a new one . |
43,981 | def main ( proj_data , proj_docs , md ) : if proj_data [ 'relative' ] : proj_data [ 'project_url' ] = '.' project = ford . fortran_project . Project ( proj_data ) if len ( project . files ) < 1 : print ( "Error: No source files with appropriate extension found in specified directory." ) sys . exit ( 1 ) if proj_data [ 'relative' ] : project . markdown ( md , '..' ) else : project . markdown ( md , proj_data [ 'project_url' ] ) project . correlate ( ) if proj_data [ 'relative' ] : project . make_links ( '..' ) else : project . make_links ( proj_data [ 'project_url' ] ) if proj_data [ 'relative' ] : ford . sourceform . set_base_url ( '.' ) if 'summary' in proj_data : proj_data [ 'summary' ] = md . convert ( proj_data [ 'summary' ] ) proj_data [ 'summary' ] = ford . utils . sub_links ( ford . utils . sub_macros ( ford . utils . sub_notes ( proj_data [ 'summary' ] ) , proj_data [ 'project_url' ] ) , project ) if 'author_description' in proj_data : proj_data [ 'author_description' ] = md . convert ( proj_data [ 'author_description' ] ) proj_data [ 'author_description' ] = ford . utils . sub_links ( ford . utils . sub_macros ( ford . utils . sub_notes ( proj_data [ 'author_description' ] ) , proj_data [ 'project_url' ] ) , project ) proj_docs_ = ford . utils . sub_links ( ford . utils . sub_macros ( ford . utils . sub_notes ( proj_docs ) , proj_data [ 'project_url' ] ) , project ) if 'page_dir' in proj_data : page_tree = ford . pagetree . get_page_tree ( os . path . normpath ( proj_data [ 'page_dir' ] ) , md ) print ( ) else : page_tree = None proj_data [ 'pages' ] = page_tree docs = ford . output . Documentation ( proj_data , proj_docs_ , project , page_tree ) docs . writeout ( ) print ( '' ) return 0 | Main driver of FORD . |
43,982 | def convertToFree ( stream , length_limit = True ) : linestack = [ ] for line in stream : convline = FortranLine ( line , length_limit ) if convline . is_regular : if convline . isContinuation and linestack : linestack [ 0 ] . continueLine ( ) for l in linestack : yield str ( l ) linestack = [ ] linestack . append ( convline ) for l in linestack : yield str ( l ) | Convert stream from fixed source form to free source form . |
43,983 | def continueLine ( self ) : if not ( self . isLong and self . is_regular ) : self . line_conv = self . line_conv . rstrip ( ) + " &\n" else : temp = self . line_conv [ : 72 ] . rstrip ( ) + " &" self . line_conv = temp . ljust ( 72 ) + self . excess_line | Insert line continuation symbol at end of line . |
43,984 | def id_mods ( obj , modlist , intrinsic_mods = { } , submodlist = [ ] ) : for i in range ( len ( obj . uses ) ) : for candidate in modlist : if obj . uses [ i ] [ 0 ] . lower ( ) == candidate . name . lower ( ) : obj . uses [ i ] = [ candidate , obj . uses [ i ] [ 1 ] ] break else : if obj . uses [ i ] [ 0 ] . lower ( ) in intrinsic_mods : obj . uses [ i ] = [ intrinsic_mods [ obj . uses [ i ] [ 0 ] . lower ( ) ] , obj . uses [ i ] [ 1 ] ] continue if getattr ( obj , 'ancestor' , None ) : for submod in submodlist : if obj . ancestor . lower ( ) == submod . name . lower ( ) : obj . ancestor = submod break if hasattr ( obj , 'ancestor_mod' ) : for mod in modlist : if obj . ancestor_mod . lower ( ) == mod . name . lower ( ) : obj . ancestor_mod = mod break for modproc in getattr ( obj , 'modprocedures' , [ ] ) : id_mods ( modproc , modlist , intrinsic_mods ) for func in getattr ( obj , 'functions' , [ ] ) : id_mods ( func , modlist , intrinsic_mods ) for subroutine in getattr ( obj , 'subroutines' , [ ] ) : id_mods ( subroutine , modlist , intrinsic_mods ) | Match USE statements up with the right modules |
43,985 | def allfiles ( self ) : for f in self . files : yield f for f in self . extra_files : yield f | Instead of duplicating files it is much more efficient to create the itterator on the fly |
43,986 | def make_links ( self , base_url = '..' ) : ford . sourceform . set_base_url ( base_url ) for src in self . allfiles : src . make_links ( self ) | Substitute intrasite links to documentation for other parts of the program . |
43,987 | def sub_notes ( docs ) : def substitute ( match ) : ret = "</p><div class=\"alert alert-{}\" role=\"alert\"><h4>{}</h4>" "<p>{}</p></div>" . format ( NOTE_TYPE [ match . group ( 1 ) . lower ( ) ] , match . group ( 1 ) . capitalize ( ) , match . group ( 2 ) ) if len ( match . groups ( ) ) >= 4 and not match . group ( 4 ) : ret += '\n<p>' return ret for regex in NOTE_RE : docs = regex . sub ( substitute , docs ) return docs | Substitutes the special controls for notes warnings todos and bugs with the corresponding div . |
43,988 | def paren_split ( sep , string ) : if len ( sep ) != 1 : raise Exception ( "Separation string must be one character long" ) retlist = [ ] level = 0 blevel = 0 left = 0 for i in range ( len ( string ) ) : if string [ i ] == "(" : level += 1 elif string [ i ] == ")" : level -= 1 elif string [ i ] == "[" : blevel += 1 elif string [ i ] == "]" : blevel -= 1 elif string [ i ] == sep and level == 0 and blevel == 0 : retlist . append ( string [ left : i ] ) left = i + 1 retlist . append ( string [ left : ] ) return retlist | Splits the string into pieces divided by sep when sep is outside of parentheses . |
43,989 | def quote_split ( sep , string ) : if len ( sep ) != 1 : raise Exception ( "Separation string must be one character long" ) retlist = [ ] squote = False dquote = False left = 0 i = 0 while i < len ( string ) : if string [ i ] == '"' and not dquote : if not squote : squote = True elif ( i + 1 ) < len ( string ) and string [ i + 1 ] == '"' : i += 1 else : squote = False elif string [ i ] == "'" and not squote : if not dquote : dquote = True elif ( i + 1 ) < len ( string ) and string [ i + 1 ] == "'" : i += 1 else : dquote = False elif string [ i ] == sep and not dquote and not squote : retlist . append ( string [ left : i ] ) left = i + 1 i += 1 retlist . append ( string [ left : ] ) return retlist | Splits the strings into pieces divided by sep when sep in not inside quotes . |
43,990 | def split_path ( path ) : def recurse_path ( path , retlist ) : if len ( retlist ) > 100 : fullpath = os . path . join ( * ( [ path , ] + retlist ) ) print ( "Directory '{}' contains too many levels" . format ( fullpath ) ) exit ( 1 ) head , tail = os . path . split ( path ) if len ( tail ) > 0 : retlist . insert ( 0 , tail ) recurse_path ( head , retlist ) elif len ( head ) > 1 : recurse_path ( head , retlist ) else : return retlist = [ ] path = os . path . realpath ( os . path . normpath ( path ) ) drive , path = os . path . splitdrive ( path ) if len ( drive ) > 0 : retlist . append ( drive ) recurse_path ( path , retlist ) return retlist | Splits the argument into its constituent directories and returns them as a list . |
43,991 | def sub_macros ( string , base_url ) : macros = { '|url|' : base_url , '|media|' : os . path . join ( base_url , 'media' ) , '|page|' : os . path . join ( base_url , 'page' ) } for key , val in macros . items ( ) : string = string . replace ( key , val ) return string | Replaces macros in documentation with their appropriate values . These macros are used for things like providing URLs . |
43,992 | def copytree ( src , dst ) : def touch ( path ) : now = time . time ( ) try : os . utime ( path , ( now , now ) ) except os . error : os . makedirs ( os . path . dirname ( path ) ) open ( path , "w" ) . close ( ) os . utime ( path , ( now , now ) ) for root , dirs , files in os . walk ( src ) : relsrcdir = os . path . relpath ( root , src ) dstdir = os . path . join ( dst , relsrcdir ) if not os . path . exists ( dstdir ) : try : os . makedirs ( dstdir ) except OSError as ex : if ex . errno != errno . EEXIST : raise for ff in files : shutil . copy ( os . path . join ( root , ff ) , os . path . join ( dstdir , ff ) ) touch ( os . path . join ( dstdir , ff ) ) | Replaces shutil . copytree to avoid problems on certain file systems . |
43,993 | def export_hmaps_csv ( key , dest , sitemesh , array , comment ) : curves = util . compose_arrays ( sitemesh , array ) writers . write_csv ( dest , curves , comment = comment ) return [ dest ] | Export the hazard maps of the given realization into CSV . |
43,994 | def export_hcurves_by_imt_csv ( key , kind , rlzs_assoc , fname , sitecol , array , oq , checksum ) : nsites = len ( sitecol ) fnames = [ ] for imt , imls in oq . imtls . items ( ) : slc = oq . imtls ( imt ) dest = add_imt ( fname , imt ) lst = [ ( 'lon' , F32 ) , ( 'lat' , F32 ) , ( 'depth' , F32 ) ] for iml in imls : lst . append ( ( 'poe-%s' % iml , F32 ) ) hcurves = numpy . zeros ( nsites , lst ) for sid , lon , lat , dep in zip ( range ( nsites ) , sitecol . lons , sitecol . lats , sitecol . depths ) : hcurves [ sid ] = ( lon , lat , dep ) + tuple ( array [ sid , slc ] ) fnames . append ( writers . write_csv ( dest , hcurves , comment = _comment ( rlzs_assoc , kind , oq . investigation_time ) + ( ', imt="%s", checksum=%d' % ( imt , checksum ) ) , header = [ name for ( name , dt ) in lst ] ) ) return fnames | Export the curves of the given realization into CSV . |
43,995 | def export_hcurves_csv ( ekey , dstore ) : oq = dstore [ 'oqparam' ] info = get_info ( dstore ) rlzs_assoc = dstore [ 'csm_info' ] . get_rlzs_assoc ( ) R = len ( rlzs_assoc . realizations ) sitecol = dstore [ 'sitecol' ] sitemesh = get_mesh ( sitecol ) key , kind , fmt = get_kkf ( ekey ) fnames = [ ] checksum = dstore . get_attr ( '/' , 'checksum32' ) hmap_dt = oq . hmap_dt ( ) for kind in oq . get_kinds ( kind , R ) : fname = hazard_curve_name ( dstore , ( key , fmt ) , kind , rlzs_assoc ) comment = _comment ( rlzs_assoc , kind , oq . investigation_time ) if ( key in ( 'hmaps' , 'uhs' ) and oq . uniform_hazard_spectra or oq . hazard_maps ) : hmap = extract ( dstore , 'hmaps?kind=' + kind ) [ kind ] if key == 'uhs' and oq . poes and oq . uniform_hazard_spectra : uhs_curves = calc . make_uhs ( hmap , info ) writers . write_csv ( fname , util . compose_arrays ( sitemesh , uhs_curves ) , comment = comment + ', checksum=%d' % checksum ) fnames . append ( fname ) elif key == 'hmaps' and oq . poes and oq . hazard_maps : fnames . extend ( export_hmaps_csv ( ekey , fname , sitemesh , hmap . flatten ( ) . view ( hmap_dt ) , comment + ', checksum=%d' % checksum ) ) elif key == 'hcurves' : hcurves = extract ( dstore , 'hcurves?kind=' + kind ) [ kind ] fnames . extend ( export_hcurves_by_imt_csv ( ekey , kind , rlzs_assoc , fname , sitecol , hcurves , oq , checksum ) ) return sorted ( fnames ) | Exports the hazard curves into several . csv files |
43,996 | def save_disagg_to_csv ( metadata , matrices ) : skip_keys = ( 'Mag' , 'Dist' , 'Lon' , 'Lat' , 'Eps' , 'TRT' ) base_header = ',' . join ( '%s=%s' % ( key , value ) for key , value in metadata . items ( ) if value is not None and key not in skip_keys ) for disag_tup , ( poe , iml , matrix , fname ) in matrices . items ( ) : header = '%s,poe=%.7f,iml=%.7e\n' % ( base_header , poe , iml ) if disag_tup == ( 'Mag' , 'Lon' , 'Lat' ) : matrix = numpy . swapaxes ( matrix , 0 , 1 ) matrix = numpy . swapaxes ( matrix , 1 , 2 ) disag_tup = ( 'Lon' , 'Lat' , 'Mag' ) axis = [ metadata [ v ] for v in disag_tup ] header += ',' . join ( v for v in disag_tup ) header += ',poe' axis = [ ( ax [ : - 1 ] + ax [ 1 : ] ) / 2. if ax . dtype == float else ax for ax in axis ] values = None if len ( axis ) == 1 : values = numpy . array ( [ axis [ 0 ] , matrix . flatten ( ) ] ) . T else : grids = numpy . meshgrid ( * axis , indexing = 'ij' ) values = [ g . flatten ( ) for g in grids ] values . append ( matrix . flatten ( ) ) values = numpy . array ( values ) . T writers . write_csv ( fname , values , comment = header , fmt = '%.5E' ) | Save disaggregation matrices to multiple . csv files . |
43,997 | def _interp_function ( self , y_ip1 , y_i , t_ip1 , t_i , imt_per ) : return y_i + ( y_ip1 - y_i ) / ( t_ip1 - t_i ) * ( imt_per - t_i ) | Generic interpolation function used in equation 19 of 2013 report . |
43,998 | def _get_SRF_tau ( self , imt_per ) : if imt_per < 1 : srf = 0.87 elif 1 <= imt_per < 5 : srf = self . _interp_function ( 0.58 , 0.87 , 5 , 1 , imt_per ) elif 5 <= imt_per <= 10 : srf = 0.58 else : srf = 1 return srf | Table 6 and equation 19 of 2013 report . |
43,999 | def _get_SRF_phi ( self , imt_per ) : if imt_per < 0.6 : srf = 0.8 elif 0.6 <= imt_per < 1 : srf = self . _interp_function ( 0.7 , 0.8 , 1 , 0.6 , imt_per ) elif 1 <= imt_per <= 10 : srf = self . _interp_function ( 0.6 , 0.7 , 10 , 1 , imt_per ) else : srf = 1 return srf | Table 7 and equation 19 of 2013 report . NB change in notation 2013 report calls this term sigma but it is referred to here as phi . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.