idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
21,200
def parse ( date , dayfirst = True ) : if not date : return None if isinstance ( date , FlexiDate ) : return date if isinstance ( date , int ) : return FlexiDate ( year = date ) elif isinstance ( date , datetime . datetime ) : parser = PythonDateTimeParser ( ) return parser . parse ( date ) elif isinstance ( date , datetime . date ) : parser = PythonDateParser ( ) return parser . parse ( date ) else : # assuming its a string parser = DateutilDateParser ( ) out = parser . parse ( date , * * { 'dayfirst' : dayfirst } ) if out is not None : return out # msg = 'Unable to parse %s' % date # raise ValueError(date) val = 'UNPARSED: %s' % date val = val . encode ( 'ascii' , 'ignore' ) return FlexiDate ( qualifier = val )
Parse a date into a FlexiDate .
205
10
21,201
def as_datetime ( self ) : year = int ( self . year ) month = int ( self . month ) if self . month else 1 day = int ( self . day ) if self . day else 1 hour = int ( self . hour ) if self . hour else 0 minute = int ( self . minute ) if self . minute else 0 second = int ( self . second ) if self . second else 0 microsecond = int ( self . microsecond ) if self . microsecond else 0 return datetime . datetime ( year , month , day , hour , minute , second , microsecond )
Get as python datetime . datetime .
126
9
21,202
def md5sum ( string ) : h = hashlib . new ( 'md5' ) h . update ( string . encode ( 'utf-8' ) ) return h . hexdigest ( )
Generate the md5 checksum for a string
43
10
21,203
def file_md5 ( filename ) : with zopen ( filename , 'r' ) as f : file_string = f . read ( ) try : # attempt to decode byte object file_string = file_string . decode ( ) except AttributeError : pass return ( md5sum ( file_string ) )
Generate the md5 checksum for a file
67
10
21,204
def validate_checksum ( filename , md5sum ) : filename = match_filename ( filename ) md5_hash = file_md5 ( filename = filename ) if md5_hash != md5sum : raise ValueError ( 'md5 checksums are inconsistent: {}' . format ( filename ) )
Compares the md5 checksum of a file with an expected value . If the calculated and expected checksum values are not equal ValueError is raised . If the filename foo is not found will try to read a gzipped file named foo . gz . In this case the checksum is calculated for the unzipped file .
65
68
21,205
def to_matrix ( xx , yy , zz , xy , yz , xz ) : matrix = np . array ( [ [ xx , xy , xz ] , [ xy , yy , yz ] , [ xz , yz , zz ] ] ) return matrix
Convert a list of matrix components to a symmetric 3x3 matrix . Inputs should be in the order xx yy zz xy yz xz .
66
35
21,206
def absorption_coefficient ( dielectric ) : energies_in_eV = np . array ( dielectric [ 0 ] ) real_dielectric = parse_dielectric_data ( dielectric [ 1 ] ) imag_dielectric = parse_dielectric_data ( dielectric [ 2 ] ) epsilon_1 = np . mean ( real_dielectric , axis = 1 ) epsilon_2 = np . mean ( imag_dielectric , axis = 1 ) return ( 2.0 * np . sqrt ( 2.0 ) * pi * eV_to_recip_cm * energies_in_eV * np . sqrt ( - epsilon_1 + np . sqrt ( epsilon_1 ** 2 + epsilon_2 ** 2 ) ) )
Calculate the optical absorption coefficient from an input set of pymatgen vasprun dielectric constant data .
181
25
21,207
def dr ( self , atom1 , atom2 ) : return self . cell . dr ( atom1 . r , atom2 . r )
Calculate the distance between two atoms .
29
9
21,208
def area_of_a_triangle_in_cartesian_space ( a , b , c ) : return 0.5 * np . linalg . norm ( np . cross ( b - a , c - a ) )
Returns the area of a triangle defined by three points in Cartesian space .
50
15
21,209
def points_are_in_a_straight_line ( points , tolerance = 1e-7 ) : a = points [ 0 ] b = points [ 1 ] for c in points [ 2 : ] : if area_of_a_triangle_in_cartesian_space ( a , b , c ) > tolerance : return False return True
Check whether a set of points fall on a straight line . Calculates the areas of triangles formed by triplets of the points . Returns False is any of these areas are larger than the tolerance .
74
39
21,210
def two_point_effective_mass ( cartesian_k_points , eigenvalues ) : assert ( cartesian_k_points . shape [ 0 ] == 2 ) assert ( eigenvalues . size == 2 ) dk = cartesian_k_points [ 1 ] - cartesian_k_points [ 0 ] mod_dk = np . sqrt ( np . dot ( dk , dk ) ) delta_e = ( eigenvalues [ 1 ] - eigenvalues [ 0 ] ) * ev_to_hartree * 2.0 effective_mass = mod_dk * mod_dk / delta_e return effective_mass
Calculate the effective mass given eigenvalues at two k - points . Reimplemented from Aron Walsh s original effective mass Fortran code .
138
32
21,211
def least_squares_effective_mass ( cartesian_k_points , eigenvalues ) : if not points_are_in_a_straight_line ( cartesian_k_points ) : raise ValueError ( 'k-points are not collinear' ) dk = cartesian_k_points - cartesian_k_points [ 0 ] mod_dk = np . linalg . norm ( dk , axis = 1 ) delta_e = eigenvalues - eigenvalues [ 0 ] effective_mass = 1.0 / ( np . polyfit ( mod_dk , eigenvalues , 2 ) [ 0 ] * ev_to_hartree * 2.0 ) return effective_mass
Calculate the effective mass using a least squares quadratic fit .
154
15
21,212
def read_from_file ( self , filename , negative_occupancies = 'warn' ) : valid_negative_occupancies = [ 'warn' , 'raise' , 'ignore' , 'zero' ] if negative_occupancies not in valid_negative_occupancies : raise ValueError ( '"{}" is not a valid value for the keyword `negative_occupancies`.' . format ( negative_occupancies ) ) with open ( filename , 'r' ) as file_in : file_in . readline ( ) self . number_of_k_points , self . number_of_bands , self . number_of_ions = [ int ( f ) for f in get_numbers_from_string ( file_in . readline ( ) ) ] self . read_in = file_in . read ( ) self . parse_k_points ( ) self . parse_bands ( ) self . parse_occupancy ( ) if np . any ( self . occupancy [ : , 1 ] < 0 ) : # Handle negative occupancies if negative_occupancies == 'warn' : warnings . warn ( "One or more occupancies in your PROCAR file are negative." ) elif negative_occupancies == 'raise' : raise ValueError ( "One or more occupancies in your PROCAR file are negative." ) elif negative_occupancies == 'zero' : self . occupancy [ self . occupancy < 0 ] = 0.0 self . parse_projections ( ) self . sanity_check ( ) self . read_in = None if self . calculation [ 'spin_polarised' ] : self . data = self . projection_data . reshape ( self . spin_channels , self . number_of_k_points , self . number_of_bands , self . number_of_ions + 1 , self . number_of_projections ) [ : , : , : , : , 1 : ] . swapaxes ( 0 , 1 ) . swapaxes ( 1 , 2 ) else : self . data = self . projection_data . reshape ( self . number_of_k_points , self . number_of_bands , self . spin_channels , self . number_of_ions + 1 , self . number_of_projections ) [ : , : , : , : , 1 : ]
Reads the projected wavefunction character of each band from a VASP PROCAR file .
503
19
21,213
def load_vasp_summary ( filename ) : with open ( filename , 'r' ) as stream : docs = yaml . load_all ( stream , Loader = yaml . SafeLoader ) data = { d [ 'title' ] : d for d in docs } return data
Reads a vasp_summary . yaml format YAML file and returns a dictionary of dictionaries . Each YAML document in the file corresponds to one sub - dictionary with the corresponding top - level key given by the title value .
61
50
21,214
def potcar_spec ( filename ) : p_spec = { } with open ( filename , 'r' ) as f : potcars = re . split ( '(End of Dataset\n)' , f . read ( ) ) potcar_md5sums = [ md5sum ( '' . join ( pair ) ) for pair in zip ( potcars [ : : 2 ] , potcars [ 1 : - 1 : 2 ] ) ] for this_md5sum in potcar_md5sums : for ps in potcar_sets : for p , p_md5sum in potcar_md5sum_data [ ps ] . items ( ) : if this_md5sum == p_md5sum : p_spec [ p ] = ps if len ( p_spec ) != len ( potcar_md5sums ) : raise ValueError ( 'One or more POTCARs did not have matching md5 hashes' ) return p_spec
Returns a dictionary specifying the pseudopotentials contained in a POTCAR file .
208
16
21,215
def find_vasp_calculations ( ) : dir_list = [ './' + re . sub ( r'vasprun\.xml' , '' , path ) for path in glob . iglob ( '**/vasprun.xml' , recursive = True ) ] gz_dir_list = [ './' + re . sub ( r'vasprun\.xml\.gz' , '' , path ) for path in glob . iglob ( '**/vasprun.xml.gz' , recursive = True ) ] return dir_list + gz_dir_list
Returns a list of all subdirectories that contain either a vasprun . xml file or a compressed vasprun . xml . gz file .
130
31
21,216
def parse_vasprun ( self ) : self . vasprun_filename = match_filename ( 'vasprun.xml' ) if not self . vasprun_filename : raise FileNotFoundError ( 'Could not find vasprun.xml or vasprun.xml.gz file' ) try : self . vasprun = Vasprun ( self . vasprun_filename , parse_potcar_file = False ) except ET . ParseError : self . vasprun = None except : raise
Read in vasprun . xml as a pymatgen Vasprun object .
112
18
21,217
def read_projected_dos ( self ) : pdos_list = [ ] for i in range ( self . number_of_atoms ) : df = self . read_atomic_dos_as_df ( i + 1 ) pdos_list . append ( df ) self . pdos = np . vstack ( [ np . array ( df ) for df in pdos_list ] ) . reshape ( self . number_of_atoms , self . number_of_data_points , self . number_of_channels , self . ispin )
Read the projected density of states data into
123
8
21,218
def pdos_select ( self , atoms = None , spin = None , l = None , m = None ) : valid_m_values = { 's' : [ ] , 'p' : [ 'x' , 'y' , 'z' ] , 'd' : [ 'xy' , 'yz' , 'z2-r2' , 'xz' , 'x2-y2' ] , 'f' : [ 'y(3x2-y2)' , 'xyz' , 'yz2' , 'z3' , 'xz2' , 'z(x2-y2)' , 'x(x2-3y2)' ] } if not atoms : atom_idx = list ( range ( self . number_of_atoms ) ) else : atom_idx = atoms to_return = self . pdos [ atom_idx , : , : , : ] if not spin : spin_idx = list ( range ( self . ispin ) ) elif spin is 'up' : spin_idx = [ 0 ] elif spin is 'down' : spin_idx = [ 1 ] elif spin is 'both' : spin_idx = [ 0 , 1 ] else : raise ValueError ( "valid spin values are 'up', 'down', and 'both'. The default is 'both'" ) to_return = to_return [ : , : , : , spin_idx ] if not l : channel_idx = list ( range ( self . number_of_channels ) ) elif l == 's' : channel_idx = [ 0 ] elif l == 'p' : if not m : channel_idx = [ 1 , 2 , 3 ] else : # TODO this looks like it should be i+1 channel_idx = [ i + 1 for i , v in enumerate ( valid_m_values [ 'p' ] ) if v in m ] elif l == 'd' : if not m : channel_idx = [ 4 , 5 , 6 , 7 , 8 ] else : # TODO this looks like it should be i+4 channel_idx = [ i + 4 for i , v in enumerate ( valid_m_values [ 'd' ] ) if v in m ] elif l == 'f' : if not m : channel_idx = [ 9 , 10 , 11 , 12 , 13 , 14 , 15 ] else : # TODO this looks like it should be i+9 channel_idx = [ i + 9 for i , v in enumerate ( valid_m_values [ 'f' ] ) if v in m ] else : raise ValueError return to_return [ : , : , channel_idx , : ]
Returns a subset of the projected density of states array .
603
11
21,219
def scale_stoichiometry ( self , scaling ) : return { k : v * scaling for k , v in self . stoichiometry . items ( ) }
Scale the Calculation stoichiometry Returns the stoichiometry scaled by the argument scaling .
35
18
21,220
def angle ( x , y ) : dot = np . dot ( x , y ) x_mod = np . linalg . norm ( x ) y_mod = np . linalg . norm ( y ) cos_angle = dot / ( x_mod * y_mod ) return np . degrees ( np . arccos ( cos_angle ) )
Calculate the angle between two vectors in degrees .
77
11
21,221
def minimum_image ( self , r1 , r2 ) : delta_r = r2 - r1 delta_r = np . array ( [ x - math . copysign ( 1.0 , x ) if abs ( x ) > 0.5 else x for x in delta_r ] ) return ( delta_r )
Find the minimum image vector from point r1 to point r2 .
71
14
21,222
def minimum_image_dr ( self , r1 , r2 , cutoff = None ) : delta_r_vector = self . minimum_image ( r1 , r2 ) return ( self . dr ( np . zeros ( 3 ) , delta_r_vector , cutoff ) )
Calculate the shortest distance between two points in the cell accounting for periodic boundary conditions .
61
18
21,223
def lengths ( self ) : return ( np . array ( [ math . sqrt ( sum ( row ** 2 ) ) for row in self . matrix ] ) )
The cell lengths .
34
4
21,224
def inside_cell ( self , r ) : centre = np . array ( [ 0.5 , 0.5 , 0.5 ] ) new_r = self . nearest_image ( centre , r ) return new_r
Given a fractional - coordinate if this lies outside the cell return the equivalent point inside the cell .
48
20
21,225
def volume ( self ) : return np . dot ( self . matrix [ 0 ] , np . cross ( self . matrix [ 1 ] , self . matrix [ 2 ] ) )
The cell volume .
37
4
21,226
def from_file ( cls , filename ) : with open ( filename , 'r' ) as stream : data = yaml . load ( stream , Loader = yaml . SafeLoader ) notes = data . get ( 'notes' ) v_type = data . get ( 'type' ) track = data . get ( 'track' ) xargs = { } if track : if type ( track ) is str : track = [ track ] xargs [ 'track' ] = track vaspmeta = VASPMeta ( data [ 'title' ] , data [ 'description' ] , data [ 'status' ] , notes = notes , type = v_type , * * xargs ) return vaspmeta
Create a VASPMeta object by reading a vaspmeta . yaml file
152
17
21,227
def vasp_version_from_outcar ( filename = 'OUTCAR' ) : with open ( filename ) as f : line = f . readline ( ) . strip ( ) return line
Returns the first line from a VASP OUTCAR file to get the VASP source version string .
42
22
21,228
def potcar_eatom_list_from_outcar ( filename = 'OUTCAR' ) : with open ( filename ) as f : outcar = f . read ( ) eatom_re = re . compile ( "energy of atom\s+\d+\s+EATOM=\s*([-\d\.]+)" ) eatom = [ float ( e ) for e in eatom_re . findall ( outcar ) ] return eatom
Returns a list of EATOM values for the pseudopotentials used .
103
15
21,229
def build_description ( node = None ) : if node is None : from logging_tree . nodes import tree node = tree ( ) return '\n' . join ( [ line . rstrip ( ) for line in describe ( node ) ] ) + '\n'
Return a multi - line string describing a logging_tree . nodes . Node .
57
16
21,230
def _describe ( node , parent ) : name , logger , children = node is_placeholder = isinstance ( logger , logging . PlaceHolder ) if is_placeholder : yield '<--[%s]' % name else : parent_is_correct = ( parent is None ) or ( logger . parent is parent ) if not logger . propagate : arrow = ' ' elif parent_is_correct : arrow = '<--' else : arrow = ' !-' yield '%s"%s"' % ( arrow , name ) if not parent_is_correct : if logger . parent is None : yield ( ' Broken .parent is None, so messages stop here' ) else : yield ( ' Broken .parent redirects messages to %r instead' % ( logger . parent . name , ) ) if logger . level == logging . NOTSET : yield ' Level NOTSET so inherits level ' + logging . getLevelName ( logger . getEffectiveLevel ( ) ) else : yield ' Level ' + logging . getLevelName ( logger . level ) if not logger . propagate : yield ' Propagate OFF' if logger . disabled : yield ' Disabled' # In case someone has defined a custom logger that lacks a # `filters` or `handlers` attribute, we call getattr() and # provide an empty sequence as a fallback. for f in getattr ( logger , 'filters' , ( ) ) : yield ' Filter %s' % describe_filter ( f ) for h in getattr ( logger , 'handlers' , ( ) ) : g = describe_handler ( h ) yield ' Handler %s' % next ( g ) for line in g : yield ' ' + line if children : if not is_placeholder : parent = logger last_child = children [ - 1 ] for child in children : g = _describe ( child , parent ) yield ' |' yield ' o' + next ( g ) if child is last_child : prefix = ' ' else : prefix = ' |' for line in g : yield prefix + line
Generate lines describing the given node tuple .
437
9
21,231
def describe_filter ( f ) : if f . __class__ is logging . Filter : # using type() breaks in Python <= 2.6 return 'name=%r' % f . name return repr ( f )
Return text describing the logging filter f .
46
8
21,232
def describe_handler ( h ) : t = h . __class__ # using type() breaks in Python <= 2.6 format = handler_formats . get ( t ) if format is not None : yield format % h . __dict__ else : yield repr ( h ) level = getattr ( h , 'level' , logging . NOTSET ) if level != logging . NOTSET : yield ' Level ' + logging . getLevelName ( level ) for f in getattr ( h , 'filters' , ( ) ) : yield ' Filter %s' % describe_filter ( f ) formatter = getattr ( h , 'formatter' , None ) if formatter is not None : if type ( formatter ) is logging . Formatter : yield ' Formatter fmt=%r datefmt=%r' % ( getattr ( formatter , '_fmt' , None ) , getattr ( formatter , 'datefmt' , None ) ) else : yield ' Formatter %r' % ( formatter , ) if t is logging . handlers . MemoryHandler and h . target is not None : yield ' Flushes output to:' g = describe_handler ( h . target ) yield ' Handler ' + next ( g ) for line in g : yield ' ' + line
Yield one or more lines describing the logging handler h .
274
12
21,233
def tree ( ) : root = ( '' , logging . root , [ ] ) nodes = { } items = list ( logging . root . manager . loggerDict . items ( ) ) # for Python 2 and 3 items . sort ( ) for name , logger in items : nodes [ name ] = node = ( name , logger , [ ] ) i = name . rfind ( '.' , 0 , len ( name ) - 1 ) # same formula used in `logging` if i == - 1 : parent = root else : parent = nodes [ name [ : i ] ] parent [ 2 ] . append ( node ) return root
Return a tree of tuples representing the logger layout .
132
11
21,234
def patched_str ( self ) : def red ( words ) : return u ( "\033[31m\033[49m%s\033[0m" ) % words def white ( words ) : return u ( "\033[37m\033[49m%s\033[0m" ) % words def blue ( words ) : return u ( "\033[34m\033[49m%s\033[0m" ) % words def teal ( words ) : return u ( "\033[36m\033[49m%s\033[0m" ) % words def get_uri ( code ) : return "https://www.signalwire.com/docs/errors/{0}" . format ( code ) # If it makes sense to print a human readable error message, try to # do it. The one problem is that someone might catch this error and # try to display the message from it to an end user. if hasattr ( sys . stderr , 'isatty' ) and sys . stderr . isatty ( ) : msg = ( "\n{red_error} {request_was}\n\n{http_line}" "\n\n{sw_returned}\n\n{message}\n" . format ( red_error = red ( "HTTP Error" ) , request_was = white ( "Your request was:" ) , http_line = teal ( "%s %s" % ( self . method , self . uri ) ) , sw_returned = white ( "Signalwire returned the following information:" ) , message = blue ( str ( self . msg ) ) ) ) if self . code : msg = "" . join ( [ msg , "\n{more_info}\n\n{uri}\n\n" . format ( more_info = white ( "More information may be available here:" ) , uri = blue ( get_uri ( self . code ) ) ) , ] ) return msg else : return "HTTP {0} error: {1}" . format ( self . status , self . msg )
Try to pretty - print the exception if this is going on screen .
452
14
21,235
def h ( self ) : if np . size ( self . _h ) > 1 : assert np . size ( self . _h ) == self . n_modelparams return self . _h else : return self . _h * np . ones ( self . n_modelparams )
r Returns the step size to be used in numerical differentiation with respect to the model parameters . The step size is given as a vector with length n_modelparams so that each model parameter can be weighted independently .
60
42
21,236
def clear_cache ( self ) : self . underlying_model . clear_cache ( ) try : logger . info ( 'DirectView results has {} items. Clearing.' . format ( len ( self . _dv . results ) ) ) self . _dv . purge_results ( 'all' ) if self . _purge_client : self . _dv . client . purge_everything ( ) except : pass
Clears any cache associated with the serial model and the engines seen by the direct view .
90
18
21,237
def _maybe_resample ( self ) : ess = self . n_ess if ess <= 10 : warnings . warn ( "Extremely small n_ess encountered ({}). " "Resampling is likely to fail. Consider adding particles, or " "resampling more often." . format ( ess ) , ApproximationWarning ) if ess < self . n_particles * self . resample_thresh : self . resample ( ) pass
Checks the resample threshold and conditionally resamples .
96
12
21,238
def reset ( self , n_particles = None , only_params = None , reset_weights = True ) : # Particles are stored using two arrays, particle_locations and # particle_weights, such that: # # particle_locations[idx_particle, idx_modelparam] is the idx_modelparam # parameter of the particle idx_particle. # particle_weights[idx_particle] is the weight of the particle # idx_particle. if n_particles is not None and only_params is not None : raise ValueError ( "Cannot set both n_particles and only_params." ) if n_particles is None : n_particles = self . n_particles if reset_weights : self . particle_weights = np . ones ( ( n_particles , ) ) / n_particles if only_params is None : sl = np . s_ [ : , : ] # Might as well make a new array if we're resetting everything. self . particle_locations = np . zeros ( ( n_particles , self . model . n_modelparams ) ) else : sl = np . s_ [ : , only_params ] self . particle_locations [ sl ] = self . prior . sample ( n = n_particles ) [ sl ] # Since this changes particle positions, we must recanonicalize. if self . _canonicalize : self . particle_locations [ sl ] = self . model . canonicalize ( self . particle_locations [ sl ] )
Causes all particle locations and weights to be drawn fresh from the initial prior .
341
16
21,239
def batch_update ( self , outcomes , expparams , resample_interval = 5 ) : # TODO: write a faster implementation here using vectorized calls to # likelihood. # Check that the number of outcomes and experiments is the same. n_exps = outcomes . shape [ 0 ] if expparams . shape [ 0 ] != n_exps : raise ValueError ( "The number of outcomes and experiments must match." ) if len ( expparams . shape ) == 1 : expparams = expparams [ : , None ] # Loop over experiments and update one at a time. for idx_exp , ( outcome , experiment ) in enumerate ( zip ( iter ( outcomes ) , iter ( expparams ) ) ) : self . update ( outcome , experiment , check_for_resample = False ) if ( idx_exp + 1 ) % resample_interval == 0 : self . _maybe_resample ( )
r Updates based on a batch of outcomes and experiments rather than just one .
209
15
21,240
def resample ( self ) : if self . just_resampled : warnings . warn ( "Resampling without additional data; this may not perform as " "desired." , ResamplerWarning ) # Record that we have performed a resampling step. self . _just_resampled = True self . _resample_count += 1 # If we're tracking divergences, make a copy of the weights and # locations. if self . _resampling_divergences is not None : old_locs = self . particle_locations . copy ( ) old_weights = self . particle_weights . copy ( ) # Record the previous mean, cov if needed. if self . _debug_resampling : old_mean = self . est_mean ( ) old_cov = self . est_covariance_mtx ( ) # Find the new particle locations according to the chosen resampling # algorithm. # We pass the model so that the resampler can check for validity of # newly placed particles. # FIXME This feels fishy. If we update particles elsewwhere new_distribution = self . resampler ( self . model , self ) self . particle_weights = new_distribution . particle_weights self . particle_locations = new_distribution . particle_locations # Possibly canonicalize, if we've been asked to do so. if self . _canonicalize : self . particle_locations [ : , : ] = self . model . canonicalize ( self . particle_locations ) # Instruct the model to clear its cache, demoting any errors to # warnings. try : self . model . clear_cache ( ) except Exception as e : warnings . warn ( "Exception raised when clearing model cache: {}. Ignoring." . format ( e ) ) # Possibly track the new divergence. if self . _resampling_divergences is not None : self . _resampling_divergences . append ( self . _kl_divergence ( old_locs , old_weights ) ) # Report current and previous mean, cov. if self . _debug_resampling : new_mean = self . est_mean ( ) new_cov = self . est_covariance_mtx ( ) logger . debug ( "Resampling changed mean by {}. Norm change in cov: {}." . format ( old_mean - new_mean , np . linalg . norm ( new_cov - old_cov ) ) )
Forces the updater to perform a resampling step immediately .
545
14
21,241
def expected_information_gain ( self , expparams ) : # This is a special case of the KL divergence estimator (see below), # in which the other distribution is guaranteed to share support. # for models whose outcome number changes with experiment, we # take the easy way out and for-loop over experiments n_eps = expparams . size if n_eps > 1 and not self . model . is_n_outcomes_constant : risk = np . empty ( n_eps ) for idx in range ( n_eps ) : risk [ idx ] = self . expected_information_gain ( expparams [ idx , np . newaxis ] ) return risk # number of outcomes for the first experiment os = self . model . domain ( expparams [ 0 , np . newaxis ] ) [ 0 ] . values # compute the hypothetical weights, likelihoods and normalizations for # every possible outcome and expparam # the likelihood over outcomes should sum to 1, so don't compute for last outcome w_hyp , L , N = self . hypothetical_update ( os [ : - 1 ] , expparams , return_normalization = True , return_likelihood = True ) w_hyp_last_outcome = ( 1 - L . sum ( axis = 0 ) ) * self . particle_weights [ np . newaxis , : ] N = np . concatenate ( [ N [ : , : , 0 ] , np . sum ( w_hyp_last_outcome [ np . newaxis , : , : ] , axis = 2 ) ] , axis = 0 ) w_hyp_last_outcome = w_hyp_last_outcome / N [ - 1 , : , np . newaxis ] w_hyp = np . concatenate ( [ w_hyp , w_hyp_last_outcome [ np . newaxis , : , : ] ] , axis = 0 ) # w_hyp.shape == (n_out, n_eps, n_particles) # N.shape == (n_out, n_eps) # compute the Kullback-Liebler divergence for every experiment and possible outcome # KLD.shape == (n_out, n_eps) KLD = np . sum ( w_hyp * np . log ( w_hyp / self . particle_weights ) , axis = 2 ) # return the expected KLD (ie expected info gain) for every experiment return np . sum ( N * KLD , axis = 0 )
r Calculates the expected information gain for each hypothetical experiment .
540
12
21,242
def posterior_marginal ( self , idx_param = 0 , res = 100 , smoothing = 0 , range_min = None , range_max = None ) : # We need to sort the particles to get cumsum to make sense. # interp1d would do it anyways (using argsort, too), so it's not a waste s = np . argsort ( self . particle_locations [ : , idx_param ] ) locs = self . particle_locations [ s , idx_param ] # relevant axis discretization r_min = np . min ( locs ) if range_min is None else range_min r_max = np . max ( locs ) if range_max is None else range_max ps = np . linspace ( r_min , r_max , res ) # interpolate the cdf of the marginal distribution using cumsum interp = scipy . interpolate . interp1d ( np . append ( locs , r_max + np . abs ( r_max - r_min ) ) , np . append ( np . cumsum ( self . particle_weights [ s ] ) , 1 ) , #kind='cubic', bounds_error = False , fill_value = 0 , assume_sorted = True ) # get distribution from derivative of cdf, and smooth it pr = np . gradient ( interp ( ps ) , ps [ 1 ] - ps [ 0 ] ) if smoothing > 0 : gaussian_filter1d ( pr , res * smoothing / ( np . abs ( r_max - r_min ) ) , output = pr ) del interp return ps , pr
Returns an estimate of the marginal distribution of a given model parameter based on taking the derivative of the interpolated cdf .
363
24
21,243
def plot_posterior_marginal ( self , idx_param = 0 , res = 100 , smoothing = 0 , range_min = None , range_max = None , label_xaxis = True , other_plot_args = { } , true_model = None ) : res = plt . plot ( * self . posterior_marginal ( idx_param , res , smoothing , range_min , range_max ) , * * other_plot_args ) if label_xaxis : plt . xlabel ( '${}$' . format ( self . model . modelparam_names [ idx_param ] ) ) if true_model is not None : true_model = true_model [ 0 , idx_param ] if true_model . ndim == 2 else true_model [ idx_param ] old_ylim = plt . ylim ( ) plt . vlines ( true_model , old_ylim [ 0 ] - 0.1 , old_ylim [ 1 ] + 0.1 , color = 'k' , linestyles = '--' ) plt . ylim ( old_ylim ) return res
Plots a marginal of the requested parameter .
255
9
21,244
def plot_covariance ( self , corr = False , param_slice = None , tick_labels = None , tick_params = None ) : if mpls is None : raise ImportError ( "Hinton diagrams require mpltools." ) if param_slice is None : param_slice = np . s_ [ : ] tick_labels = ( list ( range ( len ( self . model . modelparam_names [ param_slice ] ) ) ) , tick_labels if tick_labels is not None else list ( map ( u"${}$" . format , self . model . modelparam_names [ param_slice ] ) ) ) cov = self . est_covariance_mtx ( corr = corr ) [ param_slice , param_slice ] retval = mpls . hinton ( cov ) plt . xticks ( * tick_labels , * * ( tick_params if tick_params is not None else { } ) ) plt . yticks ( * tick_labels , * * ( tick_params if tick_params is not None else { } ) ) plt . gca ( ) . xaxis . tick_top ( ) return retval
Plots the covariance matrix of the posterior as a Hinton diagram .
267
15
21,245
def posterior_mesh ( self , idx_param1 = 0 , idx_param2 = 1 , res1 = 100 , res2 = 100 , smoothing = 0.01 ) : # WARNING: fancy indexing is used here, which means that a copy is # made. locs = self . particle_locations [ : , [ idx_param1 , idx_param2 ] ] p1s , p2s = np . meshgrid ( np . linspace ( np . min ( locs [ : , 0 ] ) , np . max ( locs [ : , 0 ] ) , res1 ) , np . linspace ( np . min ( locs [ : , 1 ] ) , np . max ( locs [ : , 1 ] ) , res2 ) ) plot_locs = np . array ( [ p1s , p2s ] ) . T . reshape ( ( np . prod ( p1s . shape ) , 2 ) ) pr = np . sum ( # <- sum over the particles in the SMC approximation. np . prod ( # <- product over model parameters to get a multinormal # Evaluate the PDF at the plotting locations, with a normal # located at the particle locations. scipy . stats . norm . pdf ( plot_locs [ : , np . newaxis , : ] , scale = smoothing , loc = locs ) , axis = - 1 ) * self . particle_weights , axis = 1 ) . reshape ( p1s . shape ) # Finally, reshape back into the same shape as the mesh. return p1s , p2s , pr
Returns a mesh useful for plotting of kernel density estimation of a 2D projection of the current posterior distribution .
352
21
21,246
def plot_posterior_contour ( self , idx_param1 = 0 , idx_param2 = 1 , res1 = 100 , res2 = 100 , smoothing = 0.01 ) : return plt . contour ( * self . posterior_mesh ( idx_param1 , idx_param2 , res1 , res2 , smoothing ) )
Plots a contour of the kernel density estimation of a 2D projection of the current posterior distribution .
83
21
21,247
def plot_rebit_prior ( prior , rebit_axes = REBIT_AXES , n_samples = 2000 , true_state = None , true_size = 250 , force_mean = None , legend = True , mean_color_index = 2 ) : pallette = plt . rcParams [ 'axes.color_cycle' ] plot_rebit_modelparams ( prior . sample ( n_samples ) , c = pallette [ 0 ] , label = 'Prior' , rebit_axes = rebit_axes ) if true_state is not None : plot_rebit_modelparams ( true_state , c = pallette [ 1 ] , label = 'True' , marker = '*' , s = true_size , rebit_axes = rebit_axes ) if hasattr ( prior , '_mean' ) or force_mean is not None : mean = force_mean if force_mean is not None else prior . _mean plot_rebit_modelparams ( prior . _basis . state_to_modelparams ( mean ) [ None , : ] , edgecolors = pallette [ mean_color_index ] , s = 250 , facecolors = 'none' , linewidth = 3 , label = 'Mean' , rebit_axes = rebit_axes ) plot_decorate_rebits ( prior . basis , rebit_axes = rebit_axes ) if legend : plt . legend ( loc = 'lower left' , ncol = 3 , scatterpoints = 1 )
Plots rebit states drawn from a given prior .
347
11
21,248
def plot_rebit_posterior ( updater , prior = None , true_state = None , n_std = 3 , rebit_axes = REBIT_AXES , true_size = 250 , legend = True , level = 0.95 , region_est_method = 'cov' ) : pallette = plt . rcParams [ 'axes.color_cycle' ] plot_rebit_modelparams ( updater . particle_locations , c = pallette [ 0 ] , label = 'Posterior' , s = 12 * np . sqrt ( updater . particle_weights * len ( updater . particle_weights ) ) , rebit_axes = rebit_axes , zorder = - 10 ) plot_rebit_modelparams ( true_state , c = pallette [ 1 ] , label = 'True' , marker = '*' , s = true_size , rebit_axes = rebit_axes ) if prior is not None : plot_rebit_modelparams ( prior . _basis . state_to_modelparams ( prior . _mean ) [ None , : ] , edgecolors = pallette [ 3 ] , s = 250 , facecolors = 'none' , linewidth = 3 , label = 'Prior Mean' , rebit_axes = rebit_axes ) plot_rebit_modelparams ( updater . est_mean ( ) [ None , : ] , edgecolors = pallette [ 2 ] , s = 250 , facecolors = 'none' , linewidth = 3 , label = 'Posterior Mean' , rebit_axes = rebit_axes ) if region_est_method == 'cov' : # Multiplying by sqrt{2} to rescale to Bloch ball. cov = 2 * updater . est_covariance_mtx ( ) # Use fancy indexing to cut out all but the desired submatrix. cov = cov [ rebit_axes , : ] [ : , rebit_axes ] plot_cov_ellipse ( cov , updater . est_mean ( ) [ rebit_axes ] * np . sqrt ( 2 ) , nstd = n_std , edgecolor = 'k' , fill = True , lw = 2 , facecolor = pallette [ 0 ] , alpha = 0.4 , zorder = - 9 , label = 'Posterior Cov Ellipse ($Z = {}$)' . format ( n_std ) ) elif region_est_method == 'hull' : # Find the convex hull from the updater, projected # on the rebit axes. faces , vertices = updater . region_est_hull ( level , modelparam_slice = rebit_axes ) polygon = Polygon ( vertices * np . sqrt ( 2 ) , facecolor = pallette [ 0 ] , alpha = 0.4 , zorder = - 9 , label = r'Credible Region ($\alpha = {}$)' . format ( level ) , edgecolor = 'k' , lw = 2 , fill = True ) # TODO: consolidate add_patch code with that above. plt . gca ( ) . add_patch ( polygon ) plot_decorate_rebits ( updater . model . base_model . _basis , rebit_axes = rebit_axes ) if legend : plt . legend ( loc = 'lower left' , ncol = 4 , scatterpoints = 1 )
Plots posterior distributions over rebits including covariance ellipsoids
779
14
21,249
def data_to_params ( data , expparams_dtype , col_outcomes = ( 0 , 'counts' ) , cols_expparams = None ) : BY_IDX , BY_NAME = range ( 2 ) is_exp_scalar = np . issctype ( expparams_dtype ) is_data_scalar = np . issctype ( data . dtype ) and not data . dtype . fields s_ = ( ( lambda idx : np . s_ [ ... , idx [ BY_IDX ] ] ) if is_data_scalar else ( lambda idx : np . s_ [ idx [ BY_NAME ] ] ) ) outcomes = data [ s_ ( col_outcomes ) ] . astype ( int ) # mk new slicer t expparams = np . empty ( outcomes . shape , dtype = expparams_dtype ) if is_exp_scalar : expparams [ : ] = data [ s_ ( cols_expparams ) ] else : for expparams_key , column in cols_expparams . items ( ) : expparams [ expparams_key ] = data [ s_ ( column ) ] return outcomes , expparams
Given data as a NumPy array separates out each column either as the outcomes or as a field of an expparams array . Columns may be specified either as indices into a two - axis scalar array or as field names for a one - axis record array .
291
55
21,250
def canonicalize ( self , modelparams ) : modelparams = np . apply_along_axis ( self . trunc_neg_eigs , 1 , modelparams ) # Renormalizes particles if allow_subnormalized=False. if not self . _allow_subnormalied : modelparams = self . renormalize ( modelparams ) return modelparams
Truncates negative eigenvalues and from each state represented by a tensor of model parameter vectors and renormalizes as appropriate .
75
27
21,251
def trunc_neg_eigs ( self , particle ) : arr = np . tensordot ( particle , self . _basis . data . conj ( ) , 1 ) w , v = np . linalg . eig ( arr ) if np . all ( w >= 0 ) : return particle else : w [ w < 0 ] = 0 new_arr = np . dot ( v * w , v . conj ( ) . T ) new_particle = np . real ( np . dot ( self . _basis . flat ( ) , new_arr . flatten ( ) ) ) assert new_particle [ 0 ] > 0 return new_particle
Given a state represented as a model parameter vector returns a model parameter vector representing the same state with any negative eigenvalues set to zero .
142
28
21,252
def renormalize ( self , modelparams ) : # The 0th basis element (identity) should have # a value 1 / sqrt{dim}, since the trace of that basis # element is fixed to be sqrt{dim} by convention. norm = modelparams [ : , 0 ] * np . sqrt ( self . _dim ) assert not np . sum ( norm == 0 ) return modelparams / norm [ : , None ]
Renormalizes one or more states represented as model parameter vectors such that each state has trace 1 .
93
20
21,253
def values ( self ) : separate_values = [ domain . values for domain in self . _domains ] return np . concatenate ( [ join_struct_arrays ( list ( map ( np . array , value ) ) ) for value in product ( * separate_values ) ] )
Returns an np . array of type dtype containing some values from the domain . For domains where is_finite is True all elements of the domain will be yielded exactly once .
62
36
21,254
def min ( self ) : return int ( self . _min ) if not np . isinf ( self . _min ) else self . _min
Returns the minimum value of the domain .
31
8
21,255
def max ( self ) : return int ( self . _max ) if not np . isinf ( self . _max ) else self . _max
Returns the maximum value of the domain .
31
8
21,256
def is_finite ( self ) : return not np . isinf ( self . min ) and not np . isinf ( self . max )
Whether or not the domain contains a finite number of points .
31
12
21,257
def n_members ( self ) : return int ( binom ( self . n_meas + self . n_elements - 1 , self . n_elements - 1 ) )
Returns the number of members in the domain if it is_finite otherwise returns None .
40
18
21,258
def to_regular_array ( self , A ) : # this could be a static method, but we choose to be consistent with # from_regular_array return A . view ( ( int , len ( A . dtype . names ) ) ) . reshape ( A . shape + ( - 1 , ) )
Converts from an array of type self . dtype to an array of type int with an additional index labeling the tuple indeces .
66
27
21,259
def from_regular_array ( self , A ) : dims = A . shape [ : - 1 ] return A . reshape ( ( np . prod ( dims ) , - 1 ) ) . view ( dtype = self . dtype ) . squeeze ( - 1 ) . reshape ( dims )
Converts from an array of type int where the last index is assumed to have length self . n_elements to an array of type self . d_type with one fewer index .
66
38
21,260
def start ( self , max ) : try : self . widget . max = max display ( self . widget ) except : pass
Displays the progress bar for a given maximum value .
26
11
21,261
def likelihood ( self , outcomes , modelparams , expparams ) : # By calling the superclass implementation, we can consolidate # call counting there. super ( MultiQubitStatePauliModel , self ) . likelihood ( outcomes , modelparams , expparams ) # Note that expparams['axis'] has shape (n_exp, 3). pr0 = 0.5 * ( 1 + modelparams [ : , expparams [ 'pauli' ] ] ) # Use the following hack if you don't want to ensure positive weights pr0 [ pr0 < 0 ] = 0 pr0 [ pr0 > 1 ] = 1 # Note that expparams['vis'] has shape (n_exp, ). pr0 = expparams [ 'vis' ] * pr0 + ( 1 - expparams [ 'vis' ] ) * 0.5 # Now we concatenate over outcomes. return Model . pr0_to_likelihood_array ( outcomes , pr0 )
Calculates the likelihood function at the states specified by modelparams and measurement specified by expparams . This is given by the Born rule and is the probability of outcomes given the state and measurement operator .
217
42
21,262
def domain ( self , expparams ) : return [ IntegerDomain ( min = 0 , max = n_o - 1 ) for n_o in self . n_outcomes ( expparams ) ]
Returns a list of Domain s one for each input expparam .
46
14
21,263
def underlying_likelihood ( self , binary_outcomes , modelparams , expparams ) : original_mps = modelparams [ ... , self . _orig_mps_slice ] return self . underlying_model . likelihood ( binary_outcomes , original_mps , expparams )
Given outcomes hypothesized for the underlying model returns the likelihood which which those outcomes occur .
66
16
21,264
def are_expparam_dtypes_consistent ( self , expparams ) : if self . is_n_outcomes_constant : # This implies that all domains are equal, so this must be true return True # otherwise we have to actually check all the dtypes if expparams . size > 0 : domains = self . domain ( expparams ) first_dtype = domains [ 0 ] . dtype return all ( domain . dtype == first_dtype for domain in domains [ 1 : ] ) else : return True
Returns True iff all of the given expparams correspond to outcome domains with the same dtype . For efficiency concrete subclasses should override this method if the result is always True .
119
38
21,265
def simulate_experiment ( self , modelparams , expparams , repeat = 1 ) : self . _sim_count += modelparams . shape [ 0 ] * expparams . shape [ 0 ] * repeat assert ( self . are_expparam_dtypes_consistent ( expparams ) )
Produces data according to the given model parameters and experimental parameters structured as a NumPy array .
69
19
21,266
def likelihood ( self , outcomes , modelparams , expparams ) : # Count the number of times the inner-most loop is called. self . _call_count += ( safe_shape ( outcomes ) * safe_shape ( modelparams ) * safe_shape ( expparams ) )
r Calculates the probability of each given outcome conditioned on each given model parameter vector and each given experimental control setting .
63
23
21,267
def get_qutip_module ( required_version = '3.2' ) : try : import qutip as qt from distutils . version import LooseVersion _qt_version = LooseVersion ( qt . version . version ) if _qt_version < LooseVersion ( required_version ) : return None except ImportError : return None return qt
Attempts to return the qutip module but silently returns None if it can t be imported or doesn t have version at least required_version .
80
29
21,268
def particle_covariance_mtx ( weights , locations ) : # TODO: add shapes to docstring. warnings . warn ( 'particle_covariance_mtx is deprecated, please use distributions.ParticleDistribution' , DeprecationWarning ) # Find the mean model vector, shape (n_modelparams, ). mu = particle_meanfn ( weights , locations ) # Transpose the particle locations to have shape # (n_modelparams, n_particles). xs = locations . transpose ( [ 1 , 0 ] ) # Give a shorter name to the particle weights, shape (n_particles, ). ws = weights cov = ( # This sum is a reduction over the particle index, chosen to be # axis=2. Thus, the sum represents an expectation value over the # outer product $x . x^T$. # # All three factors have the particle index as the rightmost # index, axis=2. Using the Einstein summation convention (ESC), # we can reduce over the particle index easily while leaving # the model parameter index to vary between the two factors # of xs. # # This corresponds to evaluating A_{m,n} = w_{i} x_{m,i} x_{n,i} # using the ESC, where A_{m,n} is the temporary array created. np . einsum ( 'i,mi,ni' , ws , xs , xs ) # We finish by subracting from the above expectation value # the outer product $mu . mu^T$. - np . dot ( mu [ ... , np . newaxis ] , mu [ np . newaxis , ... ] ) ) # The SMC approximation is not guaranteed to produce a # positive-semidefinite covariance matrix. If a negative eigenvalue # is produced, we should warn the caller of this. assert np . all ( np . isfinite ( cov ) ) if not np . all ( la . eig ( cov ) [ 0 ] >= 0 ) : warnings . warn ( 'Numerical error in covariance estimation causing positive semidefinite violation.' , ApproximationWarning ) return cov
Returns an estimate of the covariance of a distribution represented by a given set of SMC particle .
463
20
21,269
def ellipsoid_volume ( A = None , invA = None ) : if invA is None and A is None : raise ValueError ( "Must pass either inverse(A) or A." ) if invA is None and A is not None : invA = la . inv ( A ) # Find the unit sphere volume. # http://en.wikipedia.org/wiki/Unit_sphere#General_area_and_volume_formulas n = invA . shape [ 0 ] Vn = ( np . pi ** ( n / 2 ) ) / gamma ( 1 + ( n / 2 ) ) return Vn * la . det ( sqrtm ( invA ) )
Returns the volume of an ellipsoid given either its matrix or the inverse of its matrix .
147
20
21,270
def in_ellipsoid ( x , A , c ) : if x . ndim == 1 : y = c - x return np . einsum ( 'j,jl,l' , y , np . linalg . inv ( A ) , y ) <= 1 else : y = c [ np . newaxis , : ] - x return np . einsum ( 'ij,jl,il->i' , y , np . linalg . inv ( A ) , y ) <= 1
Determines which of the points x are in the closed ellipsoid with shape matrix A centered at c . For a single point x this is computed as
109
33
21,271
def assert_sigfigs_equal ( x , y , sigfigs = 3 ) : # determine which power of 10 best describes x xpow = np . floor ( np . log10 ( x ) ) # now rescale 1 \leq x < 9 x = x * 10 ** ( - xpow ) # scale y by the same amount y = y * 10 ** ( - xpow ) # now test if abs(x-y) < 0.5 * 10**(-sigfigs) assert_almost_equal ( x , y , sigfigs )
Tests if all elements in x and y agree up to a certain number of significant figures .
122
19
21,272
def format_uncertainty ( value , uncertianty , scinotn_break = 4 ) : if uncertianty == 0 : # Return the exact number, without the ± annotation as a fixed point # number, since all digits matter. # FIXME: this assumes a precision of 6; need to select that dynamically. return "{0:f}" . format ( value ) else : # Return a string of the form "0.00 \pm 0.01". mag_unc = int ( np . log10 ( np . abs ( uncertianty ) ) ) # Zero should be printed as a single digit; that is, as wide as str "1". mag_val = int ( np . log10 ( np . abs ( value ) ) ) if value != 0 else 0 n_digits = max ( mag_val - mag_unc , 0 ) if abs ( mag_val ) < abs ( mag_unc ) and abs ( mag_unc ) > scinotn_break : # We're formatting something close to zero, so recale uncertianty # accordingly. scale = 10 ** mag_unc return r"({{0:0.{0}f}} \pm {{1:0.{0}f}}) \times 10^{{2}}" . format ( n_digits ) . format ( value / scale , uncertianty / scale , mag_unc ) if abs ( mag_val ) <= scinotn_break : return r"{{0:0.{n_digits}f}} \pm {{1:0.{n_digits}f}}" . format ( n_digits = n_digits ) . format ( value , uncertianty ) else : scale = 10 ** mag_val return r"({{0:0.{0}f}} \pm {{1:0.{0}f}}) \times 10^{{2}}" . format ( n_digits ) . format ( value / scale , uncertianty / scale , mag_val )
Given a value and its uncertianty format as a LaTeX string for pretty - printing .
441
20
21,273
def from_simplex ( x ) : n = x . shape [ - 1 ] # z are the stick breaking fractions in [0,1] # the last one is always 1, so don't worry about it z = np . empty ( shape = x . shape ) z [ ... , 0 ] = x [ ... , 0 ] z [ ... , 1 : - 1 ] = x [ ... , 1 : - 1 ] / ( 1 - x [ ... , : - 2 ] . cumsum ( axis = - 1 ) ) # now z are the logit-transformed breaking fractions z [ ... , : - 1 ] = logit ( z [ ... , : - 1 ] ) - logit ( 1 / ( n - np . arange ( n - 1 , dtype = np . float ) ) ) # set this to 0 manually to avoid subtracting inf-inf z [ ... , - 1 ] = 0 return z
r Inteprets the last index of x as unit simplices and returns a real array of the sampe shape in logit space .
196
28
21,274
def join_struct_arrays ( arrays ) : # taken from http://stackoverflow.com/questions/5355744/numpy-joining-structured-arrays sizes = np . array ( [ a . itemsize for a in arrays ] ) offsets = np . r_ [ 0 , sizes . cumsum ( ) ] shape = arrays [ 0 ] . shape joint = np . empty ( shape + ( offsets [ - 1 ] , ) , dtype = np . uint8 ) for a , size , offset in zip ( arrays , sizes , offsets ) : joint [ ... , offset : offset + size ] = np . atleast_1d ( a ) . view ( np . uint8 ) . reshape ( shape + ( size , ) ) dtype = sum ( ( a . dtype . descr for a in arrays ) , [ ] ) return joint . ravel ( ) . view ( dtype )
Takes a list of possibly structured arrays concatenates their dtypes and returns one big array with that dtype . Does the inverse of separate_struct_array .
198
34
21,275
def separate_struct_array ( array , dtypes ) : try : offsets = np . cumsum ( [ np . dtype ( dtype ) . itemsize for dtype in dtypes ] ) except TypeError : dtype_size = np . dtype ( dtypes ) . itemsize num_fields = int ( array . nbytes / ( array . size * dtype_size ) ) offsets = np . cumsum ( [ dtype_size ] * num_fields ) dtypes = [ dtypes ] * num_fields offsets = np . concatenate ( [ [ 0 ] , offsets ] ) . astype ( int ) uint_array = array . view ( np . uint8 ) . reshape ( array . shape + ( - 1 , ) ) return [ uint_array [ ... , offsets [ idx ] : offsets [ idx + 1 ] ] . flatten ( ) . view ( dtype ) for idx , dtype in enumerate ( dtypes ) ]
Takes an array with a structured dtype and separates it out into a list of arrays with dtypes coming from the input dtypes . Does the inverse of join_struct_arrays .
211
39
21,276
def sqrtm_psd ( A , est_error = True , check_finite = True ) : w , v = eigh ( A , check_finite = check_finite ) mask = w <= 0 w [ mask ] = 0 np . sqrt ( w , out = w ) A_sqrt = ( v * w ) . dot ( v . conj ( ) . T ) if est_error : return A_sqrt , np . linalg . norm ( np . dot ( A_sqrt , A_sqrt ) - A , 'fro' ) else : return A_sqrt
Returns the matrix square root of a positive semidefinite matrix truncating negative eigenvalues .
134
20
21,277
def tensor_product_basis ( * bases ) : dim = np . prod ( [ basis . data . shape [ 1 ] for basis in bases ] ) tp_basis = np . zeros ( ( dim ** 2 , dim , dim ) , dtype = complex ) for idx_factors , factors in enumerate ( it . product ( * [ basis . data for basis in bases ] ) ) : tp_basis [ idx_factors , : , : ] = reduce ( np . kron , factors ) return TomographyBasis ( tp_basis , sum ( ( factor . dims for factor in bases ) , [ ] ) , list ( map ( r"\otimes" . join , it . product ( * [ basis . labels for basis in bases ] ) ) ) )
Returns a TomographyBasis formed by the tensor product of two or more factor bases . Each basis element is the tensor product of basis elements from the underlying factors .
175
35
21,278
def state_to_modelparams ( self , state ) : basis = self . flat ( ) data = state . data . todense ( ) . view ( np . ndarray ) . flatten ( ) # NB: assumes Hermitian state and basis! return np . real ( np . dot ( basis . conj ( ) , data ) )
Converts a QuTiP - represented state into a model parameter vector .
74
15
21,279
def modelparams_to_state ( self , modelparams ) : if modelparams . ndim == 1 : qobj = qt . Qobj ( np . tensordot ( modelparams , self . data , 1 ) , dims = [ self . dims , self . dims ] ) if self . superrep is not None : qobj . superrep = self . superrep return qobj else : return list ( map ( self . modelparams_to_state , modelparams ) )
Converts one or more vectors of model parameters into QuTiP - represented states .
105
17
21,280
def covariance_mtx_to_superop ( self , mtx ) : M = self . flat ( ) return qt . Qobj ( np . dot ( np . dot ( M . conj ( ) . T , mtx ) , M ) , dims = [ [ self . dims ] * 2 ] * 2 )
Converts a covariance matrix to the corresponding superoperator represented as a QuTiP Qobj with type = super .
71
24
21,281
def _dist_kw_arg ( self , k ) : if self . _dist_kw_args is not None : return { key : self . _dist_kw_args [ key ] [ k , : ] for key in self . _dist_kw_args . keys ( ) } else : return { }
Returns a dictionary of keyword arguments for the k th distribution .
67
12
21,282
def sample ( self , n = 1 ) : cumsum_weights = np . cumsum ( self . particle_weights ) return self . particle_locations [ np . minimum ( cumsum_weights . searchsorted ( np . random . random ( ( n , ) ) , side = 'right' ) , len ( cumsum_weights ) - 1 ) ]
Returns random samples from the current particle distribution according to particle weights .
81
13
21,283
def est_covariance_mtx ( self , corr = False ) : cov = self . particle_covariance_mtx ( self . particle_weights , self . particle_locations ) if corr : dstd = np . sqrt ( np . diag ( cov ) ) cov /= ( np . outer ( dstd , dstd ) ) return cov
Returns the full - rank covariance matrix of the current particle distribution .
83
14
21,284
def est_credible_region ( self , level = 0.95 , return_outside = False , modelparam_slice = None ) : # which slice of modelparams to take s_ = np . s_ [ modelparam_slice ] if modelparam_slice is not None else np . s_ [ : ] mps = self . particle_locations [ : , s_ ] # Start by sorting the particles by weight. # We do so by obtaining an array of indices `id_sort` such that # `particle_weights[id_sort]` is in descending order. id_sort = np . argsort ( self . particle_weights ) [ : : - 1 ] # Find the cummulative sum of the sorted weights. cumsum_weights = np . cumsum ( self . particle_weights [ id_sort ] ) # Find all the indices where the sum is less than level. # We first find id_cred such that # `all(cumsum_weights[id_cred] <= level)`. id_cred = cumsum_weights <= level # By construction, by adding the next particle to id_cred, it must be # true that `cumsum_weights[id_cred] >= level`, as required. id_cred [ np . sum ( id_cred ) ] = True # We now return a slice onto the particle_locations by first permuting # the particles according to the sort order, then by selecting the # credible particles. if return_outside : return ( mps [ id_sort ] [ id_cred ] , mps [ id_sort ] [ np . logical_not ( id_cred ) ] ) else : return mps [ id_sort ] [ id_cred ]
Returns an array containing particles inside a credible region of a given level such that the described region has probability mass no less than the desired level .
382
28
21,285
def region_est_hull ( self , level = 0.95 , modelparam_slice = None ) : points = self . est_credible_region ( level = level , modelparam_slice = modelparam_slice ) hull = ConvexHull ( points ) return points [ hull . simplices ] , points [ u . uniquify ( hull . vertices . flatten ( ) ) ]
Estimates a credible region over models by taking the convex hull of a credible subset of particles .
87
20
21,286
def in_credible_region ( self , points , level = 0.95 , modelparam_slice = None , method = 'hpd-hull' , tol = 0.0001 ) : if method == 'pce' : s_ = np . s_ [ modelparam_slice ] if modelparam_slice is not None else np . s_ [ : ] A = self . est_covariance_mtx ( ) [ s_ , s_ ] c = self . est_mean ( ) [ s_ ] # chi-squared distribution gives correct level curve conversion mult = st . chi2 . ppf ( level , c . size ) results = u . in_ellipsoid ( points , mult * A , c ) elif method == 'hpd-mvee' : tol = 0.0001 if tol is None else tol A , c = self . region_est_ellipsoid ( level = level , tol = tol , modelparam_slice = modelparam_slice ) results = u . in_ellipsoid ( points , np . linalg . inv ( A ) , c ) elif method == 'hpd-hull' : # it would be more natural to call region_est_hull, # but that function uses ConvexHull which has no # easy way of determining if a point is interior. # Here, Delaunay gives us access to all of the # necessary simplices. # this fills the convex hull with (n_mps+1)-dimensional # simplices; the convex hull is an almost-everywhere # disjoint union of these simplices hull = Delaunay ( self . est_credible_region ( level = level , modelparam_slice = modelparam_slice ) ) # now we just check whether each of the given points are in # any of the simplices. (http://stackoverflow.com/a/16898636/1082565) results = hull . find_simplex ( points ) >= 0 return results
Decides whether each of the points lie within a credible region of the current distribution .
442
17
21,287
def sample ( self , n = 1 ) : samples = np . empty ( ( n , self . n_rvs ) ) idxs_to_sample = np . arange ( n ) iters = 0 while idxs_to_sample . size and iters < self . _maxiters : samples [ idxs_to_sample ] = self . _dist . sample ( len ( idxs_to_sample ) ) idxs_to_sample = idxs_to_sample [ np . nonzero ( np . logical_not ( self . _model . are_models_valid ( samples [ idxs_to_sample , : ] ) ) ) [ 0 ] ] iters += 1 if idxs_to_sample . size : raise RuntimeError ( "Did not successfully postselect within {} iterations." . format ( self . _maxiters ) ) return samples
Returns one or more samples from this probability distribution .
187
10
21,288
def iter_actions ( self ) : # pylint: disable=too-many-locals # pylint: disable=invalid-name ns = '{urn:schemas-upnp-org:service-1-0}' # get the scpd body as bytes, and feed directly to elementtree # which likes to receive bytes scpd_body = requests . get ( self . base_url + self . scpd_url ) . content tree = XML . fromstring ( scpd_body ) # parse the state variables to get the relevant variable types vartypes = { } srvStateTables = tree . findall ( '{}serviceStateTable' . format ( ns ) ) for srvStateTable in srvStateTables : statevars = srvStateTable . findall ( '{}stateVariable' . format ( ns ) ) for state in statevars : name = state . findtext ( '{}name' . format ( ns ) ) datatype = state . findtext ( '{}dataType' . format ( ns ) ) default = state . findtext ( '{}defaultValue' . format ( ns ) ) value_list_elt = state . find ( '{}allowedValueList' . format ( ns ) ) if value_list_elt is None : value_list_elt = ( ) value_list = [ item . text for item in value_list_elt ] or None value_range_elt = state . find ( '{}allowedValueRange' . format ( ns ) ) if value_range_elt is None : value_range_elt = ( ) value_range = [ item . text for item in value_range_elt ] or None vartypes [ name ] = Vartype ( datatype , default , value_list , value_range ) # find all the actions actionLists = tree . findall ( '{}actionList' . format ( ns ) ) for actionList in actionLists : actions = actionList . findall ( '{}action' . format ( ns ) ) for i in actions : action_name = i . findtext ( '{}name' . format ( ns ) ) argLists = i . findall ( '{}argumentList' . format ( ns ) ) for argList in argLists : args_iter = argList . findall ( '{}argument' . format ( ns ) ) in_args = [ ] out_args = [ ] for arg in args_iter : arg_name = arg . findtext ( '{}name' . format ( ns ) ) direction = arg . findtext ( '{}direction' . format ( ns ) ) related_variable = arg . findtext ( '{}relatedStateVariable' . format ( ns ) ) vartype = vartypes [ related_variable ] if direction == "in" : in_args . append ( Argument ( arg_name , vartype ) ) else : out_args . append ( Argument ( arg_name , vartype ) ) yield Action ( action_name , in_args , out_args )
Yield the service s actions with their arguments .
673
10
21,289
def parse_event_xml ( xml_event ) : result = { } tree = XML . fromstring ( xml_event ) # property values are just under the propertyset, which # uses this namespace properties = tree . findall ( '{urn:schemas-upnp-org:event-1-0}property' ) for prop in properties : # pylint: disable=too-many-nested-blocks for variable in prop : # Special handling for a LastChange event specially. For details on # LastChange events, see # http://upnp.org/specs/av/UPnP-av-RenderingControl-v1-Service.pdf # and http://upnp.org/specs/av/UPnP-av-AVTransport-v1-Service.pdf if variable . tag == "LastChange" : last_change_tree = XML . fromstring ( variable . text . encode ( 'utf-8' ) ) # We assume there is only one InstanceID tag. This is true for # Sonos, as far as we know. # InstanceID can be in one of two namespaces, depending on # whether we are looking at an avTransport event, a # renderingControl event, or a Queue event # (there, it is named QueueID) instance = last_change_tree . find ( "{urn:schemas-upnp-org:metadata-1-0/AVT/}InstanceID" ) if instance is None : instance = last_change_tree . find ( "{urn:schemas-upnp-org:metadata-1-0/RCS/}InstanceID" ) if instance is None : instance = last_change_tree . find ( "{urn:schemas-sonos-com:metadata-1-0/Queue/}QueueID" ) # Look at each variable within the LastChange event for last_change_var in instance : tag = last_change_var . tag # Remove any namespaces from the tags if tag . startswith ( '{' ) : tag = tag . split ( '}' , 1 ) [ 1 ] # Un-camel case it tag = camel_to_underscore ( tag ) # Now extract the relevant value for the variable. # The UPnP specs suggest that the value of any variable # evented via a LastChange Event will be in the 'val' # attribute, but audio related variables may also have a # 'channel' attribute. In addition, it seems that Sonos # sometimes uses a text value instead: see # http://forums.sonos.com/showthread.php?t=34663 value = last_change_var . get ( 'val' ) if value is None : value = last_change_var . text # If DIDL metadata is returned, convert it to a music # library data structure if value . startswith ( '<DIDL-Lite' ) : # Wrap any parsing exception in a SoCoFault, so the # user can handle it try : didl = from_didl_string ( value ) if not didl : continue value = didl [ 0 ] except SoCoException as original_exception : log . debug ( "Event contains illegal metadata" "for '%s'.\n" "Error message: '%s'\n" "The result will be a SoCoFault." , tag , str ( original_exception ) ) event_parse_exception = EventParseException ( tag , value , original_exception ) value = SoCoFault ( event_parse_exception ) channel = last_change_var . get ( 'channel' ) if channel is not None : if result . get ( tag ) is None : result [ tag ] = { } result [ tag ] [ channel ] = value else : result [ tag ] = value else : result [ camel_to_underscore ( variable . tag ) ] = variable . text return result
Parse the body of a UPnP event .
855
11
21,290
def unsubscribe ( self ) : # Trying to unsubscribe if already unsubscribed, or not yet # subscribed, fails silently if self . _has_been_unsubscribed or not self . is_subscribed : return # Cancel any auto renew self . _auto_renew_thread_flag . set ( ) # Send an unsubscribe request like this: # UNSUBSCRIBE publisher path HTTP/1.1 # HOST: publisher host:publisher port # SID: uuid:subscription UUID headers = { 'SID' : self . sid } response = None try : response = requests . request ( 'UNSUBSCRIBE' , self . service . base_url + self . service . event_subscription_url , headers = headers , timeout = 3 ) except requests . exceptions . RequestException : pass self . is_subscribed = False self . _timestamp = None log . info ( "Unsubscribed from %s, sid: %s" , self . service . base_url + self . service . event_subscription_url , self . sid ) # remove queue from event queues and sid to service mappings with _subscriptions_lock : try : del _subscriptions [ self . sid ] except KeyError : pass self . _has_been_unsubscribed = True # Ignore "412 Client Error: Precondition Failed for url:" # from rebooted speakers. if response and response . status_code != 412 : response . raise_for_status ( )
Unsubscribe from the service s events .
322
9
21,291
def play_mode ( self , playmode ) : playmode = playmode . upper ( ) if playmode not in PLAY_MODES . keys ( ) : raise KeyError ( "'%s' is not a valid play mode" % playmode ) self . avTransport . SetPlayMode ( [ ( 'InstanceID' , 0 ) , ( 'NewPlayMode' , playmode ) ] )
Set the speaker s mode .
85
6
21,292
def repeat ( self , repeat ) : shuffle = self . shuffle self . play_mode = PLAY_MODE_BY_MEANING [ ( shuffle , repeat ) ]
Set the queue s repeat option
35
6
21,293
def join ( self , master ) : self . avTransport . SetAVTransportURI ( [ ( 'InstanceID' , 0 ) , ( 'CurrentURI' , 'x-rincon:{0}' . format ( master . uid ) ) , ( 'CurrentURIMetaData' , '' ) ] ) self . _zgs_cache . clear ( ) self . _parse_zone_group_state ( )
Join this speaker to another master speaker .
91
8
21,294
def unjoin ( self ) : self . avTransport . BecomeCoordinatorOfStandaloneGroup ( [ ( 'InstanceID' , 0 ) ] ) self . _zgs_cache . clear ( ) self . _parse_zone_group_state ( )
Remove this speaker from a group .
57
7
21,295
def set_sleep_timer ( self , sleep_time_seconds ) : # Note: A value of None for sleep_time_seconds is valid, and needs to # be preserved distinctly separate from 0. 0 means go to sleep now, # which will immediately start the sound tappering, and could be a # useful feature, while None means cancel the current timer try : if sleep_time_seconds is None : sleep_time = '' else : sleep_time = format ( datetime . timedelta ( seconds = int ( sleep_time_seconds ) ) ) self . avTransport . ConfigureSleepTimer ( [ ( 'InstanceID' , 0 ) , ( 'NewSleepTimerDuration' , sleep_time ) , ] ) except SoCoUPnPException as err : if 'Error 402 received' in str ( err ) : raise ValueError ( 'invalid sleep_time_seconds, must be integer \ value between 0 and 86399 inclusive or None' ) raise except ValueError : raise ValueError ( 'invalid sleep_time_seconds, must be integer \ value between 0 and 86399 inclusive or None' )
Sets the sleep timer .
239
6
21,296
def _restore_coordinator ( self ) : # Start by ensuring that the speaker is paused as we don't want # things all rolling back when we are changing them, as this could # include things like audio transport_info = self . device . get_current_transport_info ( ) if transport_info is not None : if transport_info [ 'current_transport_state' ] == 'PLAYING' : self . device . pause ( ) # Check if the queue should be restored self . _restore_queue ( ) # Reinstate what was playing if self . is_playing_queue and self . playlist_position > 0 : # was playing from playlist if self . playlist_position is not None : # The position in the playlist returned by # get_current_track_info starts at 1, but when # playing from playlist, the index starts at 0 # if position > 0: self . playlist_position -= 1 self . device . play_from_queue ( self . playlist_position , False ) if self . track_position is not None : if self . track_position != "" : self . device . seek ( self . track_position ) # reinstate track, position, play mode, cross fade # Need to make sure there is a proper track selected first self . device . play_mode = self . play_mode self . device . cross_fade = self . cross_fade elif self . is_playing_cloud_queue : # was playing a cloud queue started by Alexa # No way yet to re-start this so prevent it throwing an error! pass else : # was playing a stream (radio station, file, or nothing) # reinstate uri and meta data if self . media_uri != "" : self . device . play_uri ( self . media_uri , self . media_metadata , start = False )
Do the coordinator - only part of the restore .
393
10
21,297
def _restore_volume ( self , fade ) : self . device . mute = self . mute # Can only change volume on device with fixed volume set to False # otherwise get uPnP error, so check first. Before issuing a network # command to check, fixed volume always has volume set to 100. # So only checked fixed volume if volume is 100. if self . volume == 100 : fixed_vol = self . device . renderingControl . GetOutputFixed ( [ ( 'InstanceID' , 0 ) ] ) [ 'CurrentFixed' ] else : fixed_vol = False # now set volume if not fixed if not fixed_vol : self . device . bass = self . bass self . device . treble = self . treble self . device . loudness = self . loudness if fade : # if fade requested in restore # set volume to 0 then fade up to saved volume (non blocking) self . device . volume = 0 self . device . ramp_to_volume ( self . volume ) else : # set volume self . device . volume = self . volume
Reinstate volume .
225
5
21,298
def discover_thread ( callback , timeout = 5 , include_invisible = False , interface_addr = None ) : thread = StoppableThread ( target = _discover_thread , args = ( callback , timeout , include_invisible , interface_addr ) ) thread . start ( ) return thread
Return a started thread with a discovery callback .
63
9
21,299
def by_name ( name ) : devices = discover ( all_households = True ) for device in ( devices or [ ] ) : if device . player_name == name : return device return None
Return a device by name .
43
6