idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
231,800
def parse ( self ) : log . debug ( self ) self . parse_composite ( ) self . split_line ( ) self . convert_coordinates ( ) self . convert_meta ( ) self . make_shape ( ) log . debug ( self )
Convert line to shape object
56
6
231,801
def split_line ( self ) : # coordinate of the # symbol or end of the line (-1) if not found hash_or_end = self . line . find ( "#" ) temp = self . line [ self . region_end : hash_or_end ] . strip ( " |" ) self . coord_str = regex_paren . sub ( "" , temp ) # don't want any meta_str if there is no metadata found if hash_or_end >= 0 : self . meta_str = self . line [ hash_or_end : ] else : self . meta_str = ""
Split line into coordinates and meta string
130
7
231,802
def convert_coordinates ( self ) : coord_list = [ ] # strip out "null" elements, i.e. ''. It might be possible to eliminate # these some other way, i.e. with regex directly, but I don't know how. # We need to copy in order not to burn up the iterators elements = [ x for x in regex_splitter . split ( self . coord_str ) if x ] element_parsers = self . language_spec [ self . region_type ] for ii , ( element , element_parser ) in enumerate ( zip ( elements , element_parsers ) ) : if element_parser is coordinate : unit = self . coordinate_units [ self . coordsys ] [ ii % 2 ] coord_list . append ( element_parser ( element , unit ) ) elif self . coordinate_units [ self . coordsys ] [ 0 ] is u . dimensionless_unscaled : coord_list . append ( element_parser ( element , unit = u . dimensionless_unscaled ) ) else : coord_list . append ( element_parser ( element ) ) if self . region_type in [ 'ellipse' , 'box' ] and len ( coord_list ) % 2 == 1 : coord_list [ - 1 ] = CoordinateParser . parse_angular_length_quantity ( elements [ len ( coord_list ) - 1 ] ) # Reset iterator for ellipse and annulus # Note that this cannot be done with copy.deepcopy on python2 if self . region_type in [ 'ellipse' , 'annulus' ] : self . language_spec [ self . region_type ] = itertools . chain ( ( coordinate , coordinate ) , itertools . cycle ( ( radius , ) ) ) self . coord = coord_list
Convert coordinate string to objects
396
6
231,803
def convert_meta ( self ) : meta_ = DS9Parser . parse_meta ( self . meta_str ) self . meta = copy . deepcopy ( self . global_meta ) self . meta . update ( meta_ ) # the 'include' is not part of the metadata string; # it is pre-parsed as part of the shape type and should always # override the global one self . include = self . meta . get ( 'include' , True ) if self . include == '' else self . include != '-' self . meta [ 'include' ] = self . include
Convert meta string to dict
125
6
231,804
def _validate ( val , name , expected = 'any' ) : if not isinstance ( val , PixCoord ) : raise TypeError ( '{} must be a PixCoord' . format ( name ) ) if expected == 'any' : pass elif expected == 'scalar' : if not val . isscalar : raise ValueError ( '{} must be a scalar PixCoord' . format ( name ) ) elif expected == 'not scalar' : if val . isscalar : raise ValueError ( '{} must be a non-scalar PixCoord' . format ( name ) ) else : raise ValueError ( 'Invalid argument for `expected`: {}' . format ( expected ) ) return val
Validate that a given object is an appropriate PixCoord .
162
13
231,805
def to_sky ( self , wcs , origin = _DEFAULT_WCS_ORIGIN , mode = _DEFAULT_WCS_MODE ) : return SkyCoord . from_pixel ( xp = self . x , yp = self . y , wcs = wcs , origin = origin , mode = mode , )
Convert this PixCoord to ~astropy . coordinates . SkyCoord .
72
17
231,806
def from_sky ( cls , skycoord , wcs , origin = _DEFAULT_WCS_ORIGIN , mode = _DEFAULT_WCS_MODE ) : x , y = skycoord . to_pixel ( wcs = wcs , origin = origin , mode = mode ) return cls ( x = x , y = y )
Create PixCoord from ~astropy . coordinates . SkyCoord .
76
15
231,807
def separation ( self , other ) : dx = other . x - self . x dy = other . y - self . y return np . hypot ( dx , dy )
r Separation to another pixel coordinate .
35
8
231,808
def skycoord_to_pixel_scale_angle ( skycoord , wcs , small_offset = 1 * u . arcsec ) : # Convert to pixel coordinates x , y = skycoord_to_pixel ( skycoord , wcs , mode = skycoord_to_pixel_mode ) pixcoord = PixCoord ( x = x , y = y ) # We take a point directly 'above' (in latitude) the position requested # and convert it to pixel coordinates, then we use that to figure out the # scale and position angle of the coordinate system at the location of # the points. # Find the coordinates as a representation object r_old = skycoord . represent_as ( 'unitspherical' ) # Add a a small perturbation in the latitude direction (since longitude # is more difficult because it is not directly an angle). dlat = small_offset r_new = UnitSphericalRepresentation ( r_old . lon , r_old . lat + dlat ) coords_offset = skycoord . realize_frame ( r_new ) # Find pixel coordinates of offset coordinates x_offset , y_offset = skycoord_to_pixel ( coords_offset , wcs , mode = skycoord_to_pixel_mode ) # Find vector dx = x_offset - x dy = y_offset - y # Find the length of the vector scale = np . hypot ( dx , dy ) / dlat . to ( 'degree' ) . value # Find the position angle angle = np . arctan2 ( dy , dx ) * u . radian return pixcoord , scale , angle
Convert a set of SkyCoord coordinates into pixel coordinates pixel scales and position angles .
347
18
231,809
def assert_angle ( name , q ) : if isinstance ( q , u . Quantity ) : if q . unit . physical_type == 'angle' : pass else : raise ValueError ( "{0} should have angular units" . format ( name ) ) else : raise TypeError ( "{0} should be a Quantity instance" . format ( name ) )
Check that q is an angular ~astropy . units . Quantity .
76
14
231,810
def _silence ( ) : old_stdout = sys . stdout old_stderr = sys . stderr sys . stdout = _DummyFile ( ) sys . stderr = _DummyFile ( ) exception_occurred = False try : yield except : exception_occurred = True # Go ahead and clean up so that exception handling can work normally sys . stdout = old_stdout sys . stderr = old_stderr raise if not exception_occurred : sys . stdout = old_stdout sys . stderr = old_stderr
A context manager that silences sys . stdout and sys . stderr .
128
17
231,811
def use_astropy_helpers ( * * kwargs ) : global BOOTSTRAPPER config = BOOTSTRAPPER . config config . update ( * * kwargs ) # Create a new bootstrapper with the updated configuration and run it BOOTSTRAPPER = _Bootstrapper ( * * config ) BOOTSTRAPPER . run ( )
Ensure that the astropy_helpers module is available and is importable . This supports automatic submodule initialization if astropy_helpers is included in a project as a git submodule or will download it from PyPI if necessary .
79
49
231,812
def config ( self ) : return dict ( ( optname , getattr ( self , optname ) ) for optname , _ in CFG_OPTIONS if hasattr ( self , optname ) )
A dict containing the options this _Bootstrapper was configured with .
44
14
231,813
def get_local_directory_dist ( self ) : if not os . path . isdir ( self . path ) : return log . info ( 'Attempting to import astropy_helpers from {0} {1!r}' . format ( 'submodule' if self . is_submodule else 'directory' , self . path ) ) dist = self . _directory_import ( ) if dist is None : log . warn ( 'The requested path {0!r} for importing {1} does not ' 'exist, or does not contain a copy of the {1} ' 'package.' . format ( self . path , PACKAGE_NAME ) ) elif self . auto_upgrade and not self . is_submodule : # A version of astropy-helpers was found on the available path, but # check to see if a bugfix release is available on PyPI upgrade = self . _do_upgrade ( dist ) if upgrade is not None : dist = upgrade return dist
Handle importing a vendored package from a subdirectory of the source distribution .
213
15
231,814
def get_local_file_dist ( self ) : if not os . path . isfile ( self . path ) : return log . info ( 'Attempting to unpack and import astropy_helpers from ' '{0!r}' . format ( self . path ) ) try : dist = self . _do_download ( find_links = [ self . path ] ) except Exception as e : if DEBUG : raise log . warn ( 'Failed to import {0} from the specified archive {1!r}: ' '{2}' . format ( PACKAGE_NAME , self . path , str ( e ) ) ) dist = None if dist is not None and self . auto_upgrade : # A version of astropy-helpers was found on the available path, but # check to see if a bugfix release is available on PyPI upgrade = self . _do_upgrade ( dist ) if upgrade is not None : dist = upgrade return dist
Handle importing from a source archive ; this also uses setup_requires but points easy_install directly to the source archive .
206
24
231,815
def _directory_import ( self ) : # Return True on success, False on failure but download is allowed, and # otherwise raise SystemExit path = os . path . abspath ( self . path ) # Use an empty WorkingSet rather than the man # pkg_resources.working_set, since on older versions of setuptools this # will invoke a VersionConflict when trying to install an upgrade ws = pkg_resources . WorkingSet ( [ ] ) ws . add_entry ( path ) dist = ws . by_key . get ( DIST_NAME ) if dist is None : # We didn't find an egg-info/dist-info in the given path, but if a # setup.py exists we can generate it setup_py = os . path . join ( path , 'setup.py' ) if os . path . isfile ( setup_py ) : # We use subprocess instead of run_setup from setuptools to # avoid segmentation faults - see the following for more details: # https://github.com/cython/cython/issues/2104 sp . check_output ( [ sys . executable , 'setup.py' , 'egg_info' ] , cwd = path ) for dist in pkg_resources . find_distributions ( path , True ) : # There should be only one... return dist return dist
Import astropy_helpers from the given path which will be added to sys . path .
295
19
231,816
def _check_submodule ( self ) : if ( self . path is None or ( os . path . exists ( self . path ) and not os . path . isdir ( self . path ) ) ) : return False if self . use_git : return self . _check_submodule_using_git ( ) else : return self . _check_submodule_no_git ( )
Check if the given path is a git submodule .
84
11
231,817
def sdot ( U , V ) : nu = U . ndim #nv = V.ndim return np . tensordot ( U , V , axes = ( nu - 1 , 0 ) )
Computes the tensorproduct reducing last dimensoin of U with first dimension of V . For matrices it is equal to regular matrix product .
43
31
231,818
def set_values ( self , x ) : x = numpy . atleast_2d ( x ) x = x . real # ahem C_inv = self . __C_inv__ theta = numpy . dot ( x , C_inv ) self . theta = theta return theta
Updates self . theta parameter . No returns values
67
11
231,819
def tauchen ( N , mu , rho , sigma , m = 2 ) : Z = np . zeros ( ( N , 1 ) ) Zprob = np . zeros ( ( N , N ) ) a = ( 1 - rho ) * mu Z [ - 1 ] = m * math . sqrt ( sigma ** 2 / ( 1 - ( rho ** 2 ) ) ) Z [ 0 ] = - 1 * Z [ - 1 ] zstep = ( Z [ - 1 ] - Z [ 0 ] ) / ( N - 1 ) for i in range ( 1 , N ) : Z [ i ] = Z [ 0 ] + zstep * ( i ) Z = Z + a / ( 1 - rho ) for j in range ( 0 , N ) : for k in range ( 0 , N ) : if k == 0 : Zprob [ j , k ] = sp . stats . norm . cdf ( ( Z [ 0 ] - a - rho * Z [ j ] + zstep / 2 ) / sigma ) elif k == ( N - 1 ) : Zprob [ j , k ] = 1 - sp . stats . norm . cdf ( ( Z [ - 1 ] - a - rho * Z [ j ] - zstep / 2 ) / sigma ) else : up = sp . stats . norm . cdf ( ( Z [ k ] - a - rho * Z [ j ] + zstep / 2 ) / sigma ) down = sp . stats . norm . cdf ( ( Z [ k ] - a - rho * Z [ j ] - zstep / 2 ) / sigma ) Zprob [ j , k ] = up - down return ( ( Z , Zprob ) )
Approximate an AR1 process by a finite markov chain using Tauchen s method .
379
19
231,820
def rouwenhorst ( rho , sigma , N ) : from numpy import sqrt , linspace , array , zeros sigma = float ( sigma ) if N == 1 : nodes = array ( [ 0.0 ] ) transitions = array ( [ [ 1.0 ] ] ) return [ nodes , transitions ] p = ( rho + 1 ) / 2 q = p nu = sqrt ( ( N - 1 ) / ( 1 - rho ** 2 ) ) * sigma nodes = linspace ( - nu , nu , N ) sig_a = sigma n = 1 # mat0 = array( [[1]] ) mat0 = array ( [ [ p , 1 - p ] , [ 1 - q , q ] ] ) if N == 2 : return [ nodes , mat0 ] for n in range ( 3 , N + 1 ) : mat = zeros ( ( n , n ) ) mat_A = mat . copy ( ) mat_B = mat . copy ( ) mat_C = mat . copy ( ) mat_D = mat . copy ( ) mat_A [ : - 1 , : - 1 ] = mat0 mat_B [ : - 1 , 1 : ] = mat0 mat_C [ 1 : , : - 1 ] = mat0 mat_D [ 1 : , 1 : ] = mat0 mat0 = p * mat_A + ( 1 - p ) * mat_B + ( 1 - q ) * mat_C + q * mat_D mat0 [ 1 : - 1 , : ] = mat0 [ 1 : - 1 , : ] / 2 P = mat0 return [ nodes , P ]
Approximate an AR1 process by a finite markov chain using Rouwenhorst s method .
356
21
231,821
def tensor_markov ( * args ) : if len ( args ) > 2 : m1 = args [ 0 ] m2 = args [ 1 ] tail = args [ 2 : ] prod = tensor_markov ( m1 , m2 ) return tensor_markov ( prod , tail ) elif len ( args ) == 2 : m1 , m2 = args n1 , t1 = m1 n2 , t2 = m2 n1 = np . array ( n1 , dtype = float ) n2 = np . array ( n2 , dtype = float ) t1 = np . array ( t1 , dtype = float ) t2 = np . array ( t2 , dtype = float ) assert ( n1 . shape [ 0 ] == t1 . shape [ 0 ] == t1 . shape [ 1 ] ) assert ( n2 . shape [ 0 ] == t2 . shape [ 0 ] == t2 . shape [ 1 ] ) t = np . kron ( t1 , t2 ) p = t1 . shape [ 0 ] q = t2 . shape [ 0 ] np . tile ( n2 , ( 1 , p ) ) # n = np.row_stack([ # np.repeat(n1, q, axis=1), # np.tile( n2, (1,p)) # ]) n = np . column_stack ( [ np . repeat ( n1 , q , axis = 0 ) , np . tile ( n2 , ( p , 1 ) ) ] ) return [ n , t ] else : raise Exception ( "Incorrect number of arguments. Expected at least 2. Found {}." . format ( len ( args ) ) )
Computes the product of two independent markov chains .
365
11
231,822
def dynare_import ( filename , full_output = False , debug = False ) : import os basename = os . path . basename ( filename ) fname = re . compile ( '(.*)\.(.*)' ) . match ( basename ) . group ( 1 ) f = open ( filename ) txt = f . read ( ) model = parse_dynare_text ( txt , full_output = full_output , debug = debug ) model . name = fname return model
Imports model defined in specified file
105
7
231,823
def _shocks_to_epsilons ( model , shocks , T ) : n_e = len ( model . calibration [ 'exogenous' ] ) # if we have a DataFrame, convert it to a dict and rely on the method below if isinstance ( shocks , pd . DataFrame ) : shocks = { k : shocks [ k ] . tolist ( ) for k in shocks . columns } # handle case where shocks might be a dict. Be careful to handle case where # value arrays are not the same length if isinstance ( shocks , dict ) : epsilons = np . zeros ( ( T + 1 , n_e ) ) for ( i , k ) in enumerate ( model . symbols [ "exogenous" ] ) : if k in shocks : this_shock = shocks [ k ] epsilons [ : len ( this_shock ) , i ] = this_shock epsilons [ len ( this_shock ) : , i ] = this_shock [ - 1 ] else : # otherwise set to value in calibration epsilons [ : , i ] = model . calibration [ "exogenous" ] [ i ] return epsilons # read from calibration if not given if shocks is None : shocks = model . calibration [ "exogenous" ] # now we just assume that shocks is array-like and try using the output of # np.asarray(shocks) shocks = np . asarray ( shocks ) shocks = shocks . reshape ( ( - 1 , n_e ) ) # until last period, exogenous shock takes its last value epsilons = np . zeros ( ( T + 1 , n_e ) ) epsilons [ : ( shocks . shape [ 0 ] - 1 ) , : ] = shocks [ 1 : , : ] epsilons [ ( shocks . shape [ 0 ] - 1 ) : , : ] = shocks [ - 1 : , : ] return epsilons
Helper function to support input argument shocks being one of many different data types . Will always return a T n_e matrix .
418
25
231,824
def clear_all ( ) : frame = inspect . currentframe ( ) . f_back try : if frame . f_globals . get ( 'variables_order' ) : # we should avoid to declare symbols twice ! del frame . f_globals [ 'variables_order' ] if frame . f_globals . get ( 'parameters_order' ) : # we should avoid to declare symbols twice ! del frame . f_globals [ 'parameters_order' ] finally : del frame
Clears all parameters variables and shocks defined previously
113
9
231,825
def nonlinear_system ( model , initial_dr = None , maxit = 10 , tol = 1e-8 , grid = { } , distribution = { } , verbose = True ) : if verbose : headline = '|{0:^4} | {1:10} | {2:8} |' headline = headline . format ( 'N' , ' Error' , 'Time' ) stars = '-' * len ( headline ) print ( stars ) print ( headline ) print ( stars ) # format string for within loop fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} |' f = model . functions [ 'arbitrage' ] g = model . functions [ 'transition' ] p = model . calibration [ 'parameters' ] distrib = model . get_distribution ( * * distribution ) nodes , weights = distrib . discretize ( ) approx = model . get_grid ( * * grid ) ms = create_interpolator ( approx , approx . interpolation ) grid = ms . grid if initial_dr is None : dr = approximate_controls ( model ) else : dr = initial_dr ms . set_values ( dr ( grid ) ) x = dr ( grid ) x0 = x . copy ( ) it = 0 err = 10 a0 = x0 . copy ( ) . reshape ( ( x0 . shape [ 0 ] * x0 . shape [ 1 ] , ) ) a = a0 . copy ( ) while err > tol and it < maxit : it += 1 t1 = time . time ( ) r , da = residuals ( f , g , grid , a . reshape ( x0 . shape ) , ms , nodes , weights , p , diff = True ) [ : 2 ] r = r . flatten ( ) err = abs ( r ) . max ( ) t2 = time . time ( ) if verbose : print ( fmt_str . format ( it , err , t2 - t1 ) ) if err > tol : a -= scipy . sparse . linalg . spsolve ( da , r ) if verbose : print ( stars ) return ms
Finds a global solution for model by solving one large system of equations using a simple newton algorithm .
480
21
231,826
def gauss_hermite_nodes ( orders , sigma , mu = None ) : if isinstance ( orders , int ) : orders = [ orders ] import numpy if mu is None : mu = numpy . array ( [ 0 ] * sigma . shape [ 0 ] ) herms = [ hermgauss ( i ) for i in orders ] points = [ h [ 0 ] * numpy . sqrt ( 2 ) for h in herms ] weights = [ h [ 1 ] / numpy . sqrt ( numpy . pi ) for h in herms ] if len ( orders ) == 1 : # Note: if sigma is 2D, x will always be 2D, even if sigma is only 1x1. # print(points.shape) x = numpy . array ( points [ 0 ] ) * numpy . sqrt ( float ( sigma ) ) if sigma . ndim == 2 : x = x [ : , None ] w = weights [ 0 ] return [ x , w ] else : x = cartesian ( points ) . T from functools import reduce w = reduce ( numpy . kron , weights ) zero_columns = numpy . where ( sigma . sum ( axis = 0 ) == 0 ) [ 0 ] for i in zero_columns : sigma [ i , i ] = 1.0 C = numpy . linalg . cholesky ( sigma ) x = numpy . dot ( C , x ) + mu [ : , numpy . newaxis ] x = numpy . ascontiguousarray ( x . T ) for i in zero_columns : x [ : , i ] = 0 return [ x , w ]
Computes the weights and nodes for Gauss Hermite quadrature .
367
15
231,827
def newton ( f , x , verbose = False , tol = 1e-6 , maxit = 5 , jactype = 'serial' ) : if verbose : print = lambda txt : old_print ( txt ) else : print = lambda txt : None it = 0 error = 10 converged = False maxbacksteps = 30 x0 = x if jactype == 'sparse' : from scipy . sparse . linalg import spsolve as solve elif jactype == 'full' : from numpy . linalg import solve else : solve = serial_solve while it < maxit and not converged : [ v , dv ] = f ( x ) # TODO: rewrite starting here # print("Time to evaluate {}".format(ss-tt)0) error_0 = abs ( v ) . max ( ) if error_0 < tol : if verbose : print ( "> System was solved after iteration {}. Residual={}" . format ( it , error_0 ) ) converged = True else : it += 1 dx = solve ( dv , v ) # norm_dx = abs(dx).max() for bck in range ( maxbacksteps ) : xx = x - dx * ( 2 ** ( - bck ) ) vm = f ( xx ) [ 0 ] err = abs ( vm ) . max ( ) if err < error_0 : break x = xx if verbose : print ( "\t> {} | {} | {}" . format ( it , err , bck ) ) if not converged : import warnings warnings . warn ( "Did not converge" ) return [ x , it ]
Solve nonlinear system using safeguarded Newton iterations
361
10
231,828
def qzordered ( A , B , crit = 1.0 ) : TOL = 1e-10 def select ( alpha , beta ) : return alpha ** 2 > crit * beta ** 2 [ S , T , alpha , beta , U , V ] = ordqz ( A , B , output = 'real' , sort = select ) eigval = abs ( numpy . diag ( S ) / numpy . diag ( T ) ) return [ S , T , U , V , eigval ]
Eigenvalues bigger than crit are sorted in the top - left .
111
14
231,829
def ordqz ( A , B , sort = 'lhp' , output = 'real' , overwrite_a = False , overwrite_b = False , check_finite = True ) : import warnings import numpy as np from numpy import asarray_chkfinite from scipy . linalg . misc import LinAlgError , _datacopied from scipy . linalg . lapack import get_lapack_funcs from scipy . _lib . six import callable from scipy . linalg . _decomp_qz import _qz , _select_function #NOTE: should users be able to set these? lwork = None result , typ = _qz ( A , B , output = output , lwork = lwork , sort = None , overwrite_a = overwrite_a , overwrite_b = overwrite_b , check_finite = check_finite ) AA , BB , Q , Z = result [ 0 ] , result [ 1 ] , result [ - 4 ] , result [ - 3 ] if typ not in 'cz' : alpha , beta = result [ 3 ] + result [ 4 ] * 1.j , result [ 5 ] else : alpha , beta = result [ 3 ] , result [ 4 ] sfunction = _select_function ( sort ) select = sfunction ( alpha , beta ) tgsen , = get_lapack_funcs ( ( 'tgsen' , ) , ( AA , BB ) ) if lwork is None or lwork == - 1 : result = tgsen ( select , AA , BB , Q , Z , lwork = - 1 ) lwork = result [ - 3 ] [ 0 ] . real . astype ( np . int ) # looks like wrong value passed to ZTGSYL if not lwork += 1 liwork = None if liwork is None or liwork == - 1 : result = tgsen ( select , AA , BB , Q , Z , liwork = - 1 ) liwork = result [ - 2 ] [ 0 ] result = tgsen ( select , AA , BB , Q , Z , lwork = lwork , liwork = liwork ) info = result [ - 1 ] if info < 0 : raise ValueError ( "Illegal value in argument %d of tgsen" % - info ) elif info == 1 : raise ValueError ( "Reordering of (A, B) failed because the transformed" " matrix pair (A, B) would be too far from " "generalized Schur form; the problem is very " "ill-conditioned. (A, B) may have been partially " "reorded. If requested, 0 is returned in DIF(*), " "PL, and PR." ) # for real results has a, b, alphar, alphai, beta, q, z, m, pl, pr, dif, # work, iwork, info if typ in [ 'f' , 'd' ] : alpha = result [ 2 ] + result [ 3 ] * 1.j return ( result [ 0 ] , result [ 1 ] , alpha , result [ 4 ] , result [ 5 ] , result [ 6 ] ) # for complex results has a, b, alpha, beta, q, z, m, pl, pr, dif, work, # iwork, info else : return result [ 0 ] , result [ 1 ] , result [ 2 ] , result [ 3 ] , result [ 4 ] , result [ 5 ]
QZ decomposition for a pair of matrices with reordering .
756
14
231,830
def parameterized_expectations_direct ( model , verbose = False , initial_dr = None , pert_order = 1 , grid = { } , distribution = { } , maxit = 100 , tol = 1e-8 ) : t1 = time . time ( ) g = model . functions [ 'transition' ] d = model . functions [ 'direct_response' ] h = model . functions [ 'expectation' ] parms = model . calibration [ 'parameters' ] if initial_dr is None : if pert_order == 1 : initial_dr = approximate_controls ( model ) if pert_order > 1 : raise Exception ( "Perturbation order > 1 not supported (yet)." ) approx = model . get_grid ( * * grid ) grid = approx . grid interp_type = approx . interpolation dr = create_interpolator ( approx , interp_type ) expect = create_interpolator ( approx , interp_type ) distrib = model . get_distribution ( * * distribution ) nodes , weights = distrib . discretize ( ) N = grid . shape [ 0 ] z = np . zeros ( ( N , len ( model . symbols [ 'expectations' ] ) ) ) x_0 = initial_dr ( grid ) x_0 = x_0 . real # just in case ... h_0 = h ( grid , x_0 , parms ) it = 0 err = 10 err_0 = 10 if verbose : headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |' headline = headline . format ( 'N' , ' Error' , 'Gain' , 'Time' ) stars = '-' * len ( headline ) print ( stars ) print ( headline ) print ( stars ) # format string for within loop fmt_str = '|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |' while err > tol and it <= maxit : it += 1 t_start = time . time ( ) # dr.set_values(x_0) expect . set_values ( h_0 ) z [ ... ] = 0 for i in range ( weights . shape [ 0 ] ) : e = nodes [ i , : ] S = g ( grid , x_0 , e , parms ) # evaluate expectation over the future state z += weights [ i ] * expect ( S ) # TODO: check that control is admissible new_x = d ( grid , z , parms ) new_h = h ( grid , new_x , parms ) # update error err = ( abs ( new_h - h_0 ) . max ( ) ) # Update guess for decision rule and expectations function x_0 = new_x h_0 = new_h # print error information if `verbose` err_SA = err / err_0 err_0 = err t_finish = time . time ( ) elapsed = t_finish - t_start if verbose : print ( fmt_str . format ( it , err , err_SA , elapsed ) ) if it == maxit : import warnings warnings . warn ( UserWarning ( "Maximum number of iterations reached" ) ) # compute final fime and do final printout if `verbose` t2 = time . time ( ) if verbose : print ( stars ) print ( 'Elapsed: {} seconds.' . format ( t2 - t1 ) ) print ( stars ) # Interpolation for the decision rule dr . set_values ( x_0 ) return dr
Finds a global solution for model using parameterized expectations function . Requires the model to be written with controls as a direct function of the model objects .
796
30
231,831
def numdiff ( fun , args ) : # vectorized version epsilon = 1e-8 args = list ( args ) v0 = fun ( * args ) N = v0 . shape [ 0 ] l_v = len ( v0 ) dvs = [ ] for i , a in enumerate ( args ) : l_a = ( a ) . shape [ 1 ] dv = numpy . zeros ( ( N , l_v , l_a ) ) nargs = list ( args ) #.copy() for j in range ( l_a ) : xx = args [ i ] . copy ( ) xx [ : , j ] += epsilon nargs [ i ] = xx dv [ : , : , j ] = ( fun ( * nargs ) - v0 ) / epsilon dvs . append ( dv ) return [ v0 ] + dvs
Vectorized numerical differentiation
191
4
231,832
def bandpass_filter ( data , k , w1 , w2 ) : data = np . asarray ( data ) low_w = np . pi * 2 / w2 high_w = np . pi * 2 / w1 bweights = np . zeros ( 2 * k + 1 ) bweights [ k ] = ( high_w - low_w ) / np . pi j = np . arange ( 1 , int ( k ) + 1 ) weights = 1 / ( np . pi * j ) * ( sin ( high_w * j ) - sin ( low_w * j ) ) bweights [ k + j ] = weights bweights [ : k ] = weights [ : : - 1 ] bweights -= bweights . mean ( ) return fftconvolve ( bweights , data , mode = 'valid' )
This function will apply a bandpass filter to data . It will be kth order and will select the band between w1 and w2 .
179
29
231,833
def dprint ( s ) : import inspect frameinfo = inspect . stack ( ) [ 1 ] callerframe = frameinfo . frame d = callerframe . f_locals if ( isinstance ( s , str ) ) : val = eval ( s , d ) else : val = s cc = frameinfo . code_context [ 0 ] import re regex = re . compile ( "dprint\((.*)\)" ) res = regex . search ( cc ) s = res . group ( 1 ) text = '' text += bcolors . OKBLUE + "At <{}>\n" . format ( str ( frameinfo ) ) + bcolors . ENDC text += bcolors . WARNING + "{}: " . format ( s ) + bcolors . ENDC text += str ( val ) text += str ( ) print ( text )
Prints s with additional debugging informations
180
8
231,834
def non_decreasing_series ( n , size ) : if size == 1 : return [ [ a ] for a in range ( n ) ] else : lc = non_decreasing_series ( n , size - 1 ) ll = [ ] for l in lc : last = l [ - 1 ] for i in range ( last , n ) : e = l + [ i ] ll . append ( e ) return ll
Lists all combinations of 0 ... n - 1 in increasing order
93
13
231,835
def higher_order_diff ( eqs , syms , order = 2 ) : import numpy eqs = list ( [ sympy . sympify ( eq ) for eq in eqs ] ) syms = list ( [ sympy . sympify ( s ) for s in syms ] ) neq = len ( eqs ) p = len ( syms ) D = [ numpy . array ( eqs ) ] orders = [ ] for i in range ( 1 , order + 1 ) : par = D [ i - 1 ] mat = numpy . empty ( [ neq ] + [ p ] * i , dtype = object ) #.append( numpy.zeros(orders)) for ind in non_decreasing_series ( p , i ) : ind_parent = ind [ : - 1 ] k = ind [ - 1 ] for line in range ( neq ) : ii = [ line ] + ind iid = [ line ] + ind_parent eeq = par [ tuple ( iid ) ] mat [ tuple ( ii ) ] = eeq . diff ( syms [ k ] ) D . append ( mat ) return D
Takes higher order derivatives of a list of equations w . r . t a list of paramters
246
20
231,836
def get_ranked_players ( ) : rankings_page = requests . get ( RANKINGS_URL ) root = etree . HTML ( rankings_page . text ) player_rows = root . xpath ( '//div[@id="ranked"]//tr' ) for row in player_rows [ 1 : ] : player_row = row . xpath ( 'td[@class!="country"]//text()' ) yield _Player ( name = player_row [ 1 ] , country = row [ 1 ] [ 0 ] . get ( 'title' ) , triple_crowns = player_row [ 3 ] , monthly_win = player_row [ 4 ] , biggest_cash = player_row [ 5 ] , plb_score = player_row [ 6 ] , biggest_score = player_row [ 7 ] , average_score = player_row [ 8 ] , previous_rank = player_row [ 9 ] , )
Get the list of the first 100 ranked players .
203
10
231,837
def difference ( cls , first , second ) : # so we always get a Rank instance even if string were passed in first , second = cls ( first ) , cls ( second ) rank_list = list ( cls ) return abs ( rank_list . index ( first ) - rank_list . index ( second ) )
Tells the numerical difference between two ranks .
70
9
231,838
def make_random ( cls ) : self = object . __new__ ( cls ) self . rank = Rank . make_random ( ) self . suit = Suit . make_random ( ) return self
Returns a random Card instance .
44
6
231,839
def twoplustwo_player ( username ) : from . website . twoplustwo import ForumMember , AmbiguousUserNameError , UserNotFoundError try : member = ForumMember ( username ) except UserNotFoundError : raise click . ClickException ( 'User "%s" not found!' % username ) except AmbiguousUserNameError as e : click . echo ( 'Got multiple users with similar names!' , err = True ) for ind , user in enumerate ( e . users ) : click . echo ( '{}. {}' . format ( ind + 1 , user . name ) , err = True ) number = click . prompt ( 'Which would you like to see [{}-{}]' . format ( 1 , len ( e . users ) ) , prompt_suffix = '? ' , type = click . IntRange ( 1 , len ( e . users ) ) , err = True ) userid = e . users [ int ( number ) - 1 ] . id member = ForumMember . from_userid ( userid ) click . echo ( err = True ) # empty line after input _print_header ( 'Two plus two forum member' ) _print_values ( ( 'Username' , member . username ) , ( 'Forum id' , member . id ) , ( 'Location' , member . location ) , ( 'Total posts' , member . total_posts ) , ( 'Posts per day' , member . posts_per_day ) , ( 'Rank' , member . rank ) , ( 'Last activity' , member . last_activity ) , ( 'Join date' , member . join_date ) , ( 'Usergroups' , member . public_usergroups ) , ( 'Profile picture' , member . profile_picture ) , ( 'Avatar' , member . avatar ) , )
Get profile information about a Two plus Two Forum member given the username .
390
14
231,840
def p5list ( num ) : from . website . pocketfives import get_ranked_players format_str = '{:>4.4} {!s:<15.13}{!s:<18.15}{!s:<9.6}{!s:<10.7}' '{!s:<14.11}{!s:<12.9}{!s:<12.9}{!s:<12.9}{!s:<4.4}' click . echo ( format_str . format ( 'Rank' , 'Player name' , 'Country' , 'Triple' , 'Monthly' , 'Biggest cash' , 'PLB score' , 'Biggest s' , 'Average s' , 'Prev' ) ) # just generate the appropriate number of underlines and cut them with format_str underlines = [ '-' * 20 ] * 10 click . echo ( format_str . format ( * underlines ) ) for ind , player in enumerate ( get_ranked_players ( ) ) : click . echo ( format_str . format ( str ( ind + 1 ) + '.' , * player ) ) if ind == num - 1 : break
List pocketfives ranked players max 100 if no NUM or NUM if specified .
262
16
231,841
def psstatus ( ) : from . website . pokerstars import get_status _print_header ( 'PokerStars status' ) status = get_status ( ) _print_values ( ( 'Info updated' , status . updated ) , ( 'Tables' , status . tables ) , ( 'Players' , status . players ) , ( 'Active tournaments' , status . active_tournaments ) , ( 'Total tournaments' , status . total_tournaments ) , ( 'Clubs' , status . clubs ) , ( 'Club members' , status . club_members ) , ) site_format_str = '{0.id:<12} {0.tables:<7,} {0.players:<8,} {0.active_tournaments:,}' click . echo ( '\nSite Tables Players Tournaments' ) click . echo ( '----------- ------ ------- -----------' ) for site in status . sites : click . echo ( site_format_str . format ( site ) )
Shows PokerStars status such as number of players tournaments .
217
12
231,842
def notes ( self ) : return tuple ( self . _get_note_data ( note ) for note in self . root . iter ( 'note' ) )
Tuple of notes ..
34
5
231,843
def labels ( self ) : return tuple ( _Label ( label . get ( 'id' ) , label . get ( 'color' ) , label . text ) for label in self . root . iter ( 'label' ) )
Tuple of labels .
48
5
231,844
def add_note ( self , player , text , label = None , update = None ) : if label is not None and ( label not in self . label_names ) : raise LabelNotFoundError ( 'Invalid label: {}' . format ( label ) ) if update is None : update = datetime . utcnow ( ) # converted to timestamp, rounded to ones update = update . strftime ( '%s' ) label_id = self . _get_label_id ( label ) new_note = etree . Element ( 'note' , player = player , label = label_id , update = update ) new_note . text = text self . root . append ( new_note )
Add a note to the xml . If update param is None it will be the current time .
149
19
231,845
def append_note ( self , player , text ) : note = self . _find_note ( player ) note . text += text
Append text to an already existing note .
28
9
231,846
def prepend_note ( self , player , text ) : note = self . _find_note ( player ) note . text = text + note . text
Prepend text to an already existing note .
33
9
231,847
def get_label ( self , name ) : label_tag = self . _find_label ( name ) return _Label ( label_tag . get ( 'id' ) , label_tag . get ( 'color' ) , label_tag . text )
Find the label by name .
55
6
231,848
def add_label ( self , name , color ) : color_upper = color . upper ( ) if not self . _color_re . match ( color_upper ) : raise ValueError ( 'Invalid color: {}' . format ( color ) ) labels_tag = self . root [ 0 ] last_id = int ( labels_tag [ - 1 ] . get ( 'id' ) ) new_id = str ( last_id + 1 ) new_label = etree . Element ( 'label' , id = new_id , color = color_upper ) new_label . text = name labels_tag . append ( new_label )
Add a new label . It s id will automatically be calculated .
138
13
231,849
def del_label ( self , name ) : labels_tag = self . root [ 0 ] labels_tag . remove ( self . _find_label ( name ) )
Delete a label by name .
36
6
231,850
def save ( self , filename ) : with open ( filename , 'w' ) as fp : fp . write ( str ( self ) )
Save the note XML to a file .
31
8
231,851
def board ( self ) : board = [ ] if self . flop : board . extend ( self . flop . cards ) if self . turn : board . append ( self . turn ) if self . river : board . append ( self . river ) return tuple ( board ) if board else None
Calculates board from flop turn and river .
62
11
231,852
def _parse_date ( self , date_string ) : date = datetime . strptime ( date_string , self . _DATE_FORMAT ) self . date = self . _TZ . localize ( date ) . astimezone ( pytz . UTC )
Parse the date_string and return a datetime object as UTC .
60
15
231,853
def _split_raw ( self ) : self . _splitted = self . _split_re . split ( self . raw ) # search split locations (basically empty strings) self . _sections = [ ind for ind , elem in enumerate ( self . _splitted ) if not elem ]
Split hand history by sections .
65
6
231,854
def _get_timezone ( self , root ) : tz_str = root . xpath ( '//div[@class="smallfont" and @align="center"]' ) [ 0 ] . text hours = int ( self . _tz_re . search ( tz_str ) . group ( 1 ) ) return tzoffset ( tz_str , hours * 60 )
Find timezone informatation on bottom of the page .
83
12
231,855
def get_current_tournaments ( ) : schedule_page = requests . get ( TOURNAMENTS_XML_URL ) root = etree . XML ( schedule_page . content ) for tour in root . iter ( '{*}tournament' ) : yield _Tournament ( start_date = tour . findtext ( '{*}start_date' ) , name = tour . findtext ( '{*}name' ) , game = tour . findtext ( '{*}game' ) , buyin = tour . findtext ( '{*}buy_in_fee' ) , players = tour . get ( 'players' ) )
Get the next 200 tournaments from pokerstars .
142
9
231,856
def _filter_file ( src , dest , subst ) : substre = re . compile ( r'\$(%s)' % '|' . join ( subst . keys ( ) ) ) def repl ( m ) : return subst [ m . group ( 1 ) ] with open ( src , "rt" ) as sf , open ( dest , "wt" ) as df : while True : l = sf . readline ( ) if not l : break df . write ( re . sub ( substre , repl , l ) )
Copy src to dest doing substitutions on the fly .
115
11
231,857
def _fixup_graphql_error ( self , data ) : original_data = data errors = data . get ( 'errors' ) original_errors = errors if not isinstance ( errors , list ) : self . logger . warning ( 'data["errors"] is not a list! Fix up data=%r' , data ) data = data . copy ( ) data [ 'errors' ] = [ { 'message' : str ( errors ) } ] return data for i , error in enumerate ( errors ) : if not isinstance ( error , dict ) : self . logger . warning ( 'Error #%d: is not a dict: %r. Fix up!' , i , error ) if data is original_data : data = data . copy ( ) if errors is original_errors : errors = errors . copy ( ) data [ 'errors' ] = errors errors [ i ] = { 'message' : str ( error ) } continue message = error . get ( 'message' ) if not isinstance ( message , str ) : if data is original_data : data = data . copy ( ) if errors is original_errors : errors = errors . copy ( ) data [ 'errors' ] = errors message = str ( error ) if message is None else str ( message ) error = error . copy ( ) error [ 'message' ] = message errors [ i ] = error return data
Given a possible GraphQL error payload make sure it s in shape .
294
14
231,858
def snippet ( code , locations , sep = ' | ' , colmark = ( '-' , '^' ) , context = 5 ) : if not locations : return [ ] lines = code . split ( '\n' ) offset = int ( len ( lines ) / 10 ) + 1 linenofmt = '%{}d' . format ( offset ) s = [ ] for loc in locations : line = max ( 0 , loc . get ( 'line' , 1 ) - 1 ) column = max ( 0 , loc . get ( 'column' , 1 ) - 1 ) start_line = max ( 0 , line - context ) for i , ln in enumerate ( lines [ start_line : line + 1 ] , start_line ) : s . append ( '{}{}{}' . format ( linenofmt % i , sep , ln ) ) s . append ( '{}{}{}' . format ( ' ' * ( offset + len ( sep ) ) , colmark [ 0 ] * column , colmark [ 1 ] ) ) return s
Given a code and list of locations convert to snippet lines .
227
12
231,859
def _create_non_null_wrapper ( name , t ) : def __new__ ( cls , json_data , selection_list = None ) : if json_data is None : raise ValueError ( name + ' received null value' ) return t ( json_data , selection_list ) def __to_graphql_input__ ( value , indent = 0 , indent_string = ' ' ) : return t . __to_graphql_input__ ( value , indent , indent_string ) return type ( name , ( t , ) , { '__new__' : __new__ , '_%s__auto_register' % name : False , '__to_graphql_input__' : __to_graphql_input__ , } )
creates type wrapper for non - null of given type
165
11
231,860
def _create_list_of_wrapper ( name , t ) : def __new__ ( cls , json_data , selection_list = None ) : if json_data is None : return None return [ t ( v , selection_list ) for v in json_data ] def __to_graphql_input__ ( value , indent = 0 , indent_string = ' ' ) : r = [ ] for v in value : r . append ( t . __to_graphql_input__ ( v , indent , indent_string ) ) return '[' + ', ' . join ( r ) + ']' def __to_json_value__ ( value ) : if value is None : return None return [ t . __to_json_value__ ( v ) for v in value ] return type ( name , ( t , ) , { '__new__' : __new__ , '_%s__auto_register' % name : False , '__to_graphql_input__' : __to_graphql_input__ , '__to_json_value__' : __to_json_value__ , } )
creates type wrapper for list of given type
244
9
231,861
def add_query_to_url ( url , extra_query ) : split = urllib . parse . urlsplit ( url ) merged_query = urllib . parse . parse_qsl ( split . query ) if isinstance ( extra_query , dict ) : for k , v in extra_query . items ( ) : if not isinstance ( v , ( tuple , list ) ) : merged_query . append ( ( k , v ) ) else : for cv in v : merged_query . append ( ( k , cv ) ) else : merged_query . extend ( extra_query ) merged_split = urllib . parse . SplitResult ( split . scheme , split . netloc , split . path , urllib . parse . urlencode ( merged_query ) , split . fragment , ) return merged_split . geturl ( )
Adds an extra query to URL returning the new URL .
188
11
231,862
def connection_args ( * lst , * * mapping ) : pd = ArgDict ( * lst , * * mapping ) pd . setdefault ( 'after' , String ) pd . setdefault ( 'before' , String ) pd . setdefault ( 'first' , Int ) pd . setdefault ( 'last' , Int ) return pd
Returns the default parameters for connection .
80
7
231,863
def msjd ( theta ) : s = 0. for p in theta . dtype . names : s += np . sum ( np . diff ( theta [ p ] , axis = 0 ) ** 2 ) return s
Mean squared jumping distance .
49
6
231,864
def loglik ( self , theta , t = None ) : if t is None : t = self . T - 1 l = np . zeros ( shape = theta . shape [ 0 ] ) for s in range ( t + 1 ) : l += self . logpyt ( theta , s ) return l
log - likelihood at given parameter values .
68
8
231,865
def logpost ( self , theta , t = None ) : return self . prior . logpdf ( theta ) + self . loglik ( theta , t )
Posterior log - density at given parameter values .
36
11
231,866
def copyto ( self , src , where = None ) : for n , _ in enumerate ( self . l ) : if where [ n ] : self . l [ n ] = src . l [ n ]
Same syntax and functionality as numpy . copyto
45
10
231,867
def copy ( self ) : attrs = { k : self . __dict__ [ k ] . copy ( ) for k in self . containers } attrs . update ( { k : cp . deepcopy ( self . __dict__ [ k ] ) for k in self . shared } ) return self . __class__ ( * * attrs )
Returns a copy of the object .
73
7
231,868
def copyto ( self , src , where = None ) : for k in self . containers : v = self . __dict__ [ k ] if isinstance ( v , np . ndarray ) : np . copyto ( v , src . __dict__ [ k ] , where = where ) else : v . copyto ( src . __dict__ [ k ] , where = where )
Emulates function copyto in NumPy .
83
9
231,869
def copyto_at ( self , n , src , m ) : for k in self . containers : self . __dict__ [ k ] [ n ] = src . __dict__ [ k ] [ m ]
Copy to at a given location .
45
7
231,870
def Metropolis ( self , compute_target , mh_options ) : opts = mh_options . copy ( ) nsteps = opts . pop ( 'nsteps' , 0 ) delta_dist = opts . pop ( 'delta_dist' , 0.1 ) proposal = self . choose_proposal ( * * opts ) xout = self . copy ( ) xp = self . __class__ ( theta = np . empty_like ( self . theta ) ) step_ars = [ ] for _ in self . mcmc_iterate ( nsteps , self . arr , xout . arr , delta_dist ) : xp . arr [ : , : ] , delta_lp = proposal . step ( xout . arr ) compute_target ( xp ) lp_acc = xp . lpost - xout . lpost + delta_lp accept = ( np . log ( stats . uniform . rvs ( size = self . N ) ) < lp_acc ) xout . copyto ( xp , where = accept ) step_ars . append ( np . mean ( accept ) ) xout . acc_rates = self . acc_rates + [ step_ars ] return xout
Performs a certain number of Metropolis steps .
262
10
231,871
def backward ( self ) : if not self . filt : self . forward ( ) self . smth = [ self . filt [ - 1 ] ] log_trans = np . log ( self . hmm . trans_mat ) ctg = np . zeros ( self . hmm . dim ) # cost to go (log-lik of y_{t+1:T} given x_t=k) for filt , next_ft in reversed ( list ( zip ( self . filt [ : - 1 ] , self . logft [ 1 : ] ) ) ) : new_ctg = np . empty ( self . hmm . dim ) for k in range ( self . hmm . dim ) : new_ctg [ k ] = rs . log_sum_exp ( log_trans [ k , : ] + next_ft + ctg ) ctg = new_ctg smth = rs . exp_and_normalise ( np . log ( filt ) + ctg ) self . smth . append ( smth ) self . smth . reverse ( )
Backward recursion .
234
5
231,872
def predict_step ( F , covX , filt ) : pred_mean = np . matmul ( filt . mean , F . T ) pred_cov = dotdot ( F , filt . cov , F . T ) + covX return MeanAndCov ( mean = pred_mean , cov = pred_cov )
Predictive step of Kalman filter .
74
9
231,873
def filter_step ( G , covY , pred , yt ) : # data prediction data_pred_mean = np . matmul ( pred . mean , G . T ) data_pred_cov = dotdot ( G , pred . cov , G . T ) + covY if covY . shape [ 0 ] == 1 : logpyt = dists . Normal ( loc = data_pred_mean , scale = np . sqrt ( data_pred_cov ) ) . logpdf ( yt ) else : logpyt = dists . MvNormal ( loc = data_pred_mean , cov = data_pred_cov ) . logpdf ( yt ) # filter residual = yt - data_pred_mean gain = dotdot ( pred . cov , G . T , inv ( data_pred_cov ) ) filt_mean = pred . mean + np . matmul ( residual , gain . T ) filt_cov = pred . cov - dotdot ( gain , G , pred . cov ) return MeanAndCov ( mean = filt_mean , cov = filt_cov ) , logpyt
Filtering step of Kalman filter .
251
8
231,874
def check_shapes ( self ) : assert self . covX . shape == ( self . dx , self . dx ) , error_msg assert self . covY . shape == ( self . dy , self . dy ) , error_msg assert self . F . shape == ( self . dx , self . dx ) , error_msg assert self . G . shape == ( self . dy , self . dx ) , error_msg assert self . mu0 . shape == ( self . dx , ) , error_msg assert self . cov0 . shape == ( self . dx , self . dx ) , error_msg
Check all dimensions are correct .
130
6
231,875
def sobol ( N , dim , scrambled = 1 ) : while ( True ) : seed = np . random . randint ( 2 ** 32 ) out = lowdiscrepancy . sobol ( N , dim , scrambled , seed , 1 , 0 ) if ( scrambled == 0 ) or ( ( out < 1. ) . all ( ) and ( out > 0. ) . all ( ) ) : # no need to test if scrambled==0 return out
Sobol sequence .
95
5
231,876
def smoothing_worker ( method = None , N = 100 , seed = None , fk = None , fk_info = None , add_func = None , log_gamma = None ) : T = fk . T if fk_info is None : fk_info = fk . __class__ ( ssm = fk . ssm , data = fk . data [ : : - 1 ] ) if seed : random . seed ( seed ) est = np . zeros ( T - 1 ) if method == 'FFBS_QMC' : pf = particles . SQMC ( fk = fk , N = N , store_history = True ) else : pf = particles . SMC ( fk = fk , N = N , store_history = True ) tic = time . clock ( ) pf . run ( ) if method in [ 'FFBS_ON' , 'FFBS_ON2' , 'FFBS_QMC' ] : if method . startswith ( 'FFBS_ON' ) : z = pf . hist . backward_sampling ( N , linear_cost = ( method == 'FFBS_ON' ) ) else : z = pf . hist . backward_sampling_qmc ( N ) for t in range ( T - 1 ) : est [ t ] = np . mean ( add_func ( t , z [ t ] , z [ t + 1 ] ) ) elif method in [ 'two-filter_ON2' , 'two-filter_ON' , 'two-filter_ON_prop' ] : infopf = particles . SMC ( fk = fk_info , N = N , store_history = True ) infopf . run ( ) for t in range ( T - 1 ) : psi = lambda x , xf : add_func ( t , x , xf ) if method == 'two-filter_ON2' : est [ t ] = pf . hist . twofilter_smoothing ( t , infopf , psi , log_gamma ) else : ti = T - 2 - t # t+1 for info filter if method == 'two-filter_ON_prop' : modif_fwd = stats . norm . logpdf ( pf . hist . X [ t ] , loc = np . mean ( infopf . hist . X [ ti + 1 ] ) , scale = np . std ( infopf . hist . X [ ti + 1 ] ) ) modif_info = stats . norm . logpdf ( infopf . hist . X [ ti ] , loc = np . mean ( pf . hist . X [ t + 1 ] ) , scale = np . std ( pf . hist . X [ t + 1 ] ) ) else : modif_fwd , modif_info = None , None est [ t ] = pf . hist . twofilter_smoothing ( t , infopf , psi , log_gamma , linear_cost = True , modif_forward = modif_fwd , modif_info = modif_info ) else : print ( 'no such method?' ) cpu_time = time . clock ( ) - tic print ( method + ' took %.2f s for N=%i' % ( cpu_time , N ) ) return { 'est' : est , 'cpu' : cpu_time }
Generic worker for off - line smoothing algorithms .
744
10
231,877
def save ( self , X = None , w = None , A = None ) : self . X . append ( X ) self . wgt . append ( w ) self . A . append ( A )
Save one page of history at a given time .
43
10
231,878
def extract_one_trajectory ( self ) : traj = [ ] for t in reversed ( range ( self . T ) ) : if t == self . T - 1 : n = rs . multinomial_once ( self . wgt [ - 1 ] . W ) else : n = self . A [ t + 1 ] [ n ] traj . append ( self . X [ t ] [ n ] ) return traj [ : : - 1 ]
Extract a single trajectory from the particle history .
99
10
231,879
def compute_trajectories ( self ) : self . B = np . empty ( ( self . T , self . N ) , 'int' ) self . B [ - 1 , : ] = self . A [ - 1 ] for t in reversed ( range ( self . T - 1 ) ) : self . B [ t , : ] = self . A [ t + 1 ] [ self . B [ t + 1 ] ]
Compute the N trajectories that constitute the current genealogy .
91
13
231,880
def twofilter_smoothing ( self , t , info , phi , loggamma , linear_cost = False , return_ess = False , modif_forward = None , modif_info = None ) : ti = self . T - 2 - t # t+1 in reverse if t < 0 or t >= self . T - 1 : raise ValueError ( 'two-filter smoothing: t must be in range 0,...,T-2' ) lwinfo = info . hist . wgt [ ti ] . lw - loggamma ( info . hist . X [ ti ] ) if linear_cost : return self . _twofilter_smoothing_ON ( t , ti , info , phi , lwinfo , return_ess , modif_forward , modif_info ) else : return self . _twofilter_smoothing_ON2 ( t , ti , info , phi , lwinfo )
Two - filter smoothing .
208
6
231,881
def multiSMC ( nruns = 10 , nprocs = 0 , out_func = None , * * args ) : def f ( * * args ) : pf = SMC ( * * args ) pf . run ( ) return out_func ( pf ) if out_func is None : out_func = lambda x : x return utils . multiplexer ( f = f , nruns = nruns , nprocs = nprocs , seeding = True , * * args )
Run SMC algorithms in parallel for different combinations of parameters .
109
12
231,882
def reset_weights ( self ) : if self . fk . isAPF : lw = ( rs . log_mean_exp ( self . logetat , W = self . W ) - self . logetat [ self . A ] ) self . wgts = rs . Weights ( lw = lw ) else : self . wgts = rs . Weights ( )
Reset weights after a resampling step .
85
10
231,883
def log_sum_exp ( v ) : m = v . max ( ) return m + np . log ( np . sum ( np . exp ( v - m ) ) )
Log of the sum of the exp of the arguments .
38
11
231,884
def log_sum_exp_ab ( a , b ) : if a > b : return a + np . log ( 1. + np . exp ( b - a ) ) else : return b + np . log ( 1. + np . exp ( a - b ) )
log_sum_exp for two scalars .
59
10
231,885
def wmean_and_var ( W , x ) : m = np . average ( x , weights = W , axis = 0 ) m2 = np . average ( x ** 2 , weights = W , axis = 0 ) v = m2 - m ** 2 return { 'mean' : m , 'var' : v }
Component - wise weighted mean and variance .
70
8
231,886
def wmean_and_var_str_array ( W , x ) : m = np . empty ( shape = x . shape [ 1 : ] , dtype = x . dtype ) v = np . empty_like ( m ) for p in x . dtype . names : m [ p ] , v [ p ] = wmean_and_var ( W , x [ p ] ) . values ( ) return { 'mean' : m , 'var' : v }
Weighted mean and variance of each component of a structured array .
103
13
231,887
def wquantiles ( W , x , alphas = ( 0.25 , 0.50 , 0.75 ) ) : if len ( x . shape ) == 1 : return _wquantiles ( W , x , alphas = alphas ) elif len ( x . shape ) == 2 : return np . array ( [ _wquantiles ( W , x [ : , i ] , alphas = alphas ) for i in range ( x . shape [ 1 ] ) ] )
Quantiles for weighted data .
104
6
231,888
def wquantiles_str_array ( W , x , alphas = ( 0.25 , 0.50 , 0 , 75 ) ) : return { p : wquantiles ( W , x [ p ] , alphas ) for p in x . dtype . names }
quantiles for weighted data stored in a structured array .
59
11
231,889
def resampling_scheme ( func ) : @ functools . wraps ( func ) def modif_func ( W , M = None ) : M = W . shape [ 0 ] if M is None else M return func ( W , M ) rs_funcs [ func . __name__ ] = modif_func modif_func . __doc__ = rs_doc % func . __name__ . capitalize ( ) return modif_func
Decorator for resampling schemes .
97
9
231,890
def inverse_cdf ( su , W ) : j = 0 s = W [ 0 ] M = su . shape [ 0 ] A = np . empty ( M , 'int' ) for n in range ( M ) : while su [ n ] > s : j += 1 s += W [ j ] A [ n ] = j return A
Inverse CDF algorithm for a finite distribution .
73
10
231,891
def hilbert_array ( xint ) : N , d = xint . shape h = np . zeros ( N , int64 ) for n in range ( N ) : h [ n ] = Hilbert_to_int ( xint [ n , : ] ) return h
Compute Hilbert indices .
60
5
231,892
def mean_sq_jump_dist ( self , discard_frac = 0.1 ) : discard = int ( self . niter * discard_frac ) return msjd ( self . chain . theta [ discard : ] )
Mean squared jumping distance estimated from chain .
49
9
231,893
def update ( self , v ) : self . t += 1 g = self . gamma ( ) self . mu = ( 1. - g ) * self . mu + g * v mv = v - self . mu self . Sigma = ( ( 1. - g ) * self . Sigma + g * np . dot ( mv [ : , np . newaxis ] , mv [ np . newaxis , : ] ) ) try : self . L = cholesky ( self . Sigma , lower = True ) except LinAlgError : self . L = self . L0
Adds point v
123
3
231,894
def cartesian_lists ( d ) : return [ { k : v for k , v in zip ( d . keys ( ) , args ) } for args in itertools . product ( * d . values ( ) ) ]
turns a dict of lists into a list of dicts that represents the cartesian product of the initial lists
48
22
231,895
def cartesian_args ( args , listargs , dictargs ) : ils = { k : [ v , ] for k , v in args . items ( ) } ils . update ( listargs ) ils . update ( { k : v . values ( ) for k , v in dictargs . items ( ) } ) ols = listargs . copy ( ) ols . update ( { k : v . keys ( ) for k , v in dictargs . items ( ) } ) return cartesian_lists ( ils ) , cartesian_lists ( ols )
Compute a list of inputs and outputs for a function with kw arguments .
123
16
231,896
def worker ( qin , qout , f ) : while not qin . empty ( ) : i , args = qin . get ( ) qout . put ( ( i , f ( * * args ) ) )
Worker for muliprocessing . A worker repeatedly picks a dict of arguments in the queue and computes f for this set of arguments until the input queue is empty .
47
36
231,897
def distinct_seeds ( k ) : seeds = [ ] for _ in range ( k ) : while True : s = random . randint ( 2 ** 32 - 1 ) if s not in seeds : break seeds . append ( s ) return seeds
returns k distinct seeds for random number generation
52
9
231,898
def multiplexer ( f = None , nruns = 1 , nprocs = 1 , seeding = None , * * args ) : if not callable ( f ) : raise ValueError ( 'multiplexer: function f missing, or not callable' ) if seeding is None : seeding = ( nruns > 1 ) # extra arguments (meant to be arguments for f) fixedargs , listargs , dictargs = { } , { } , { } listargs [ 'run' ] = list ( range ( nruns ) ) for k , v in args . items ( ) : if isinstance ( v , list ) : listargs [ k ] = v elif isinstance ( v , dict ) : dictargs [ k ] = v else : fixedargs [ k ] = v # cartesian product inputs , outputs = cartesian_args ( fixedargs , listargs , dictargs ) for ip in inputs : ip . pop ( 'run' ) # run is not an argument of f, just an id for output # distributing different seeds if seeding : seeds = distinct_seeds ( len ( inputs ) ) for ip , op , s in zip ( inputs , outputs , seeds ) : ip [ 'seed' ] = s op [ 'seed' ] = s # the actual work happens here return distribute_work ( f , inputs , outputs , nprocs = nprocs )
Evaluate a function for different parameters optionally in parallel .
296
12
231,899
def simulate ( self , T ) : x = [ ] for t in range ( T ) : law_x = self . PX0 ( ) if t == 0 else self . PX ( t , x [ - 1 ] ) x . append ( law_x . rvs ( size = 1 ) ) y = self . simulate_given_x ( x ) return x , y
Simulate state and observation processes .
81
7