idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
21,300 | def serialize ( self ) : commands = [ ] for cmd in self . commands : commands . append ( cmd . serialize ( ) ) out = { 'commands' : commands , 'deviceURL' : self . __device_url } return out | Serialize action . |
21,301 | def factory ( data ) : if data [ 'name' ] is "DeviceStateChangedEvent" : return DeviceStateChangedEvent ( data ) elif data [ 'name' ] is "ExecutionStateChangedEvent" : return ExecutionStateChangedEvent ( data ) elif data [ 'name' ] is "CommandExecutionStateChangedEvent" : return CommandExecutionStateChangedEvent ( data ) else : raise ValueError ( "Unknown event '" + data [ 'name' ] + "' occurred." ) | Tahoma Event factory . |
21,302 | def parse ( date , dayfirst = True ) : if not date : return None if isinstance ( date , FlexiDate ) : return date if isinstance ( date , int ) : return FlexiDate ( year = date ) elif isinstance ( date , datetime . datetime ) : parser = PythonDateTimeParser ( ) return parser . parse ( date ) elif isinstance ( date , datetime . date ) : parser = PythonDateParser ( ) return parser . parse ( date ) else : parser = DateutilDateParser ( ) out = parser . parse ( date , ** { 'dayfirst' : dayfirst } ) if out is not None : return out val = 'UNPARSED: %s' % date val = val . encode ( 'ascii' , 'ignore' ) return FlexiDate ( qualifier = val ) | Parse a date into a FlexiDate . |
21,303 | def as_datetime ( self ) : year = int ( self . year ) month = int ( self . month ) if self . month else 1 day = int ( self . day ) if self . day else 1 hour = int ( self . hour ) if self . hour else 0 minute = int ( self . minute ) if self . minute else 0 second = int ( self . second ) if self . second else 0 microsecond = int ( self . microsecond ) if self . microsecond else 0 return datetime . datetime ( year , month , day , hour , minute , second , microsecond ) | Get as python datetime . datetime . |
21,304 | def md5sum ( string ) : h = hashlib . new ( 'md5' ) h . update ( string . encode ( 'utf-8' ) ) return h . hexdigest ( ) | Generate the md5 checksum for a string |
21,305 | def file_md5 ( filename ) : with zopen ( filename , 'r' ) as f : file_string = f . read ( ) try : file_string = file_string . decode ( ) except AttributeError : pass return ( md5sum ( file_string ) ) | Generate the md5 checksum for a file |
21,306 | def validate_checksum ( filename , md5sum ) : filename = match_filename ( filename ) md5_hash = file_md5 ( filename = filename ) if md5_hash != md5sum : raise ValueError ( 'md5 checksums are inconsistent: {}' . format ( filename ) ) | Compares the md5 checksum of a file with an expected value . If the calculated and expected checksum values are not equal ValueError is raised . If the filename foo is not found will try to read a gzipped file named foo . gz . In this case the checksum is calculated for the unzipped file . |
21,307 | def to_matrix ( xx , yy , zz , xy , yz , xz ) : matrix = np . array ( [ [ xx , xy , xz ] , [ xy , yy , yz ] , [ xz , yz , zz ] ] ) return matrix | Convert a list of matrix components to a symmetric 3x3 matrix . Inputs should be in the order xx yy zz xy yz xz . |
21,308 | def absorption_coefficient ( dielectric ) : energies_in_eV = np . array ( dielectric [ 0 ] ) real_dielectric = parse_dielectric_data ( dielectric [ 1 ] ) imag_dielectric = parse_dielectric_data ( dielectric [ 2 ] ) epsilon_1 = np . mean ( real_dielectric , axis = 1 ) epsilon_2 = np . mean ( imag_dielectric , axis = 1 ) return ( 2.0 * np . sqrt ( 2.0 ) * pi * eV_to_recip_cm * energies_in_eV * np . sqrt ( - epsilon_1 + np . sqrt ( epsilon_1 ** 2 + epsilon_2 ** 2 ) ) ) | Calculate the optical absorption coefficient from an input set of pymatgen vasprun dielectric constant data . |
21,309 | def dr ( self , atom1 , atom2 ) : return self . cell . dr ( atom1 . r , atom2 . r ) | Calculate the distance between two atoms . |
21,310 | def area_of_a_triangle_in_cartesian_space ( a , b , c ) : return 0.5 * np . linalg . norm ( np . cross ( b - a , c - a ) ) | Returns the area of a triangle defined by three points in Cartesian space . |
21,311 | def points_are_in_a_straight_line ( points , tolerance = 1e-7 ) : a = points [ 0 ] b = points [ 1 ] for c in points [ 2 : ] : if area_of_a_triangle_in_cartesian_space ( a , b , c ) > tolerance : return False return True | Check whether a set of points fall on a straight line . Calculates the areas of triangles formed by triplets of the points . Returns False is any of these areas are larger than the tolerance . |
21,312 | def two_point_effective_mass ( cartesian_k_points , eigenvalues ) : assert ( cartesian_k_points . shape [ 0 ] == 2 ) assert ( eigenvalues . size == 2 ) dk = cartesian_k_points [ 1 ] - cartesian_k_points [ 0 ] mod_dk = np . sqrt ( np . dot ( dk , dk ) ) delta_e = ( eigenvalues [ 1 ] - eigenvalues [ 0 ] ) * ev_to_hartree * 2.0 effective_mass = mod_dk * mod_dk / delta_e return effective_mass | Calculate the effective mass given eigenvalues at two k - points . Reimplemented from Aron Walsh s original effective mass Fortran code . |
21,313 | def least_squares_effective_mass ( cartesian_k_points , eigenvalues ) : if not points_are_in_a_straight_line ( cartesian_k_points ) : raise ValueError ( 'k-points are not collinear' ) dk = cartesian_k_points - cartesian_k_points [ 0 ] mod_dk = np . linalg . norm ( dk , axis = 1 ) delta_e = eigenvalues - eigenvalues [ 0 ] effective_mass = 1.0 / ( np . polyfit ( mod_dk , eigenvalues , 2 ) [ 0 ] * ev_to_hartree * 2.0 ) return effective_mass | Calculate the effective mass using a least squares quadratic fit . |
21,314 | def read_from_file ( self , filename , negative_occupancies = 'warn' ) : valid_negative_occupancies = [ 'warn' , 'raise' , 'ignore' , 'zero' ] if negative_occupancies not in valid_negative_occupancies : raise ValueError ( '"{}" is not a valid value for the keyword `negative_occupancies`.' . format ( negative_occupancies ) ) with open ( filename , 'r' ) as file_in : file_in . readline ( ) self . number_of_k_points , self . number_of_bands , self . number_of_ions = [ int ( f ) for f in get_numbers_from_string ( file_in . readline ( ) ) ] self . read_in = file_in . read ( ) self . parse_k_points ( ) self . parse_bands ( ) self . parse_occupancy ( ) if np . any ( self . occupancy [ : , 1 ] < 0 ) : if negative_occupancies == 'warn' : warnings . warn ( "One or more occupancies in your PROCAR file are negative." ) elif negative_occupancies == 'raise' : raise ValueError ( "One or more occupancies in your PROCAR file are negative." ) elif negative_occupancies == 'zero' : self . occupancy [ self . occupancy < 0 ] = 0.0 self . parse_projections ( ) self . sanity_check ( ) self . read_in = None if self . calculation [ 'spin_polarised' ] : self . data = self . projection_data . reshape ( self . spin_channels , self . number_of_k_points , self . number_of_bands , self . number_of_ions + 1 , self . number_of_projections ) [ : , : , : , : , 1 : ] . swapaxes ( 0 , 1 ) . swapaxes ( 1 , 2 ) else : self . data = self . projection_data . reshape ( self . number_of_k_points , self . number_of_bands , self . spin_channels , self . number_of_ions + 1 , self . number_of_projections ) [ : , : , : , : , 1 : ] | Reads the projected wavefunction character of each band from a VASP PROCAR file . |
21,315 | def load_vasp_summary ( filename ) : with open ( filename , 'r' ) as stream : docs = yaml . load_all ( stream , Loader = yaml . SafeLoader ) data = { d [ 'title' ] : d for d in docs } return data | Reads a vasp_summary . yaml format YAML file and returns a dictionary of dictionaries . Each YAML document in the file corresponds to one sub - dictionary with the corresponding top - level key given by the title value . |
21,316 | def potcar_spec ( filename ) : p_spec = { } with open ( filename , 'r' ) as f : potcars = re . split ( '(End of Dataset\n)' , f . read ( ) ) potcar_md5sums = [ md5sum ( '' . join ( pair ) ) for pair in zip ( potcars [ : : 2 ] , potcars [ 1 : - 1 : 2 ] ) ] for this_md5sum in potcar_md5sums : for ps in potcar_sets : for p , p_md5sum in potcar_md5sum_data [ ps ] . items ( ) : if this_md5sum == p_md5sum : p_spec [ p ] = ps if len ( p_spec ) != len ( potcar_md5sums ) : raise ValueError ( 'One or more POTCARs did not have matching md5 hashes' ) return p_spec | Returns a dictionary specifying the pseudopotentials contained in a POTCAR file . |
21,317 | def find_vasp_calculations ( ) : dir_list = [ './' + re . sub ( r'vasprun\.xml' , '' , path ) for path in glob . iglob ( '**/vasprun.xml' , recursive = True ) ] gz_dir_list = [ './' + re . sub ( r'vasprun\.xml\.gz' , '' , path ) for path in glob . iglob ( '**/vasprun.xml.gz' , recursive = True ) ] return dir_list + gz_dir_list | Returns a list of all subdirectories that contain either a vasprun . xml file or a compressed vasprun . xml . gz file . |
21,318 | def parse_vasprun ( self ) : self . vasprun_filename = match_filename ( 'vasprun.xml' ) if not self . vasprun_filename : raise FileNotFoundError ( 'Could not find vasprun.xml or vasprun.xml.gz file' ) try : self . vasprun = Vasprun ( self . vasprun_filename , parse_potcar_file = False ) except ET . ParseError : self . vasprun = None except : raise | Read in vasprun . xml as a pymatgen Vasprun object . |
21,319 | def read_projected_dos ( self ) : pdos_list = [ ] for i in range ( self . number_of_atoms ) : df = self . read_atomic_dos_as_df ( i + 1 ) pdos_list . append ( df ) self . pdos = np . vstack ( [ np . array ( df ) for df in pdos_list ] ) . reshape ( self . number_of_atoms , self . number_of_data_points , self . number_of_channels , self . ispin ) | Read the projected density of states data into |
21,320 | def pdos_select ( self , atoms = None , spin = None , l = None , m = None ) : valid_m_values = { 's' : [ ] , 'p' : [ 'x' , 'y' , 'z' ] , 'd' : [ 'xy' , 'yz' , 'z2-r2' , 'xz' , 'x2-y2' ] , 'f' : [ 'y(3x2-y2)' , 'xyz' , 'yz2' , 'z3' , 'xz2' , 'z(x2-y2)' , 'x(x2-3y2)' ] } if not atoms : atom_idx = list ( range ( self . number_of_atoms ) ) else : atom_idx = atoms to_return = self . pdos [ atom_idx , : , : , : ] if not spin : spin_idx = list ( range ( self . ispin ) ) elif spin is 'up' : spin_idx = [ 0 ] elif spin is 'down' : spin_idx = [ 1 ] elif spin is 'both' : spin_idx = [ 0 , 1 ] else : raise ValueError ( "valid spin values are 'up', 'down', and 'both'. The default is 'both'" ) to_return = to_return [ : , : , : , spin_idx ] if not l : channel_idx = list ( range ( self . number_of_channels ) ) elif l == 's' : channel_idx = [ 0 ] elif l == 'p' : if not m : channel_idx = [ 1 , 2 , 3 ] else : channel_idx = [ i + 1 for i , v in enumerate ( valid_m_values [ 'p' ] ) if v in m ] elif l == 'd' : if not m : channel_idx = [ 4 , 5 , 6 , 7 , 8 ] else : channel_idx = [ i + 4 for i , v in enumerate ( valid_m_values [ 'd' ] ) if v in m ] elif l == 'f' : if not m : channel_idx = [ 9 , 10 , 11 , 12 , 13 , 14 , 15 ] else : channel_idx = [ i + 9 for i , v in enumerate ( valid_m_values [ 'f' ] ) if v in m ] else : raise ValueError return to_return [ : , : , channel_idx , : ] | Returns a subset of the projected density of states array . |
21,321 | def scale_stoichiometry ( self , scaling ) : return { k : v * scaling for k , v in self . stoichiometry . items ( ) } | Scale the Calculation stoichiometry Returns the stoichiometry scaled by the argument scaling . |
21,322 | def angle ( x , y ) : dot = np . dot ( x , y ) x_mod = np . linalg . norm ( x ) y_mod = np . linalg . norm ( y ) cos_angle = dot / ( x_mod * y_mod ) return np . degrees ( np . arccos ( cos_angle ) ) | Calculate the angle between two vectors in degrees . |
21,323 | def minimum_image ( self , r1 , r2 ) : delta_r = r2 - r1 delta_r = np . array ( [ x - math . copysign ( 1.0 , x ) if abs ( x ) > 0.5 else x for x in delta_r ] ) return ( delta_r ) | Find the minimum image vector from point r1 to point r2 . |
21,324 | def minimum_image_dr ( self , r1 , r2 , cutoff = None ) : delta_r_vector = self . minimum_image ( r1 , r2 ) return ( self . dr ( np . zeros ( 3 ) , delta_r_vector , cutoff ) ) | Calculate the shortest distance between two points in the cell accounting for periodic boundary conditions . |
21,325 | def lengths ( self ) : return ( np . array ( [ math . sqrt ( sum ( row ** 2 ) ) for row in self . matrix ] ) ) | The cell lengths . |
21,326 | def inside_cell ( self , r ) : centre = np . array ( [ 0.5 , 0.5 , 0.5 ] ) new_r = self . nearest_image ( centre , r ) return new_r | Given a fractional - coordinate if this lies outside the cell return the equivalent point inside the cell . |
21,327 | def volume ( self ) : return np . dot ( self . matrix [ 0 ] , np . cross ( self . matrix [ 1 ] , self . matrix [ 2 ] ) ) | The cell volume . |
21,328 | def from_file ( cls , filename ) : with open ( filename , 'r' ) as stream : data = yaml . load ( stream , Loader = yaml . SafeLoader ) notes = data . get ( 'notes' ) v_type = data . get ( 'type' ) track = data . get ( 'track' ) xargs = { } if track : if type ( track ) is str : track = [ track ] xargs [ 'track' ] = track vaspmeta = VASPMeta ( data [ 'title' ] , data [ 'description' ] , data [ 'status' ] , notes = notes , type = v_type , ** xargs ) return vaspmeta | Create a VASPMeta object by reading a vaspmeta . yaml file |
21,329 | def vasp_version_from_outcar ( filename = 'OUTCAR' ) : with open ( filename ) as f : line = f . readline ( ) . strip ( ) return line | Returns the first line from a VASP OUTCAR file to get the VASP source version string . |
21,330 | def potcar_eatom_list_from_outcar ( filename = 'OUTCAR' ) : with open ( filename ) as f : outcar = f . read ( ) eatom_re = re . compile ( "energy of atom\s+\d+\s+EATOM=\s*([-\d\.]+)" ) eatom = [ float ( e ) for e in eatom_re . findall ( outcar ) ] return eatom | Returns a list of EATOM values for the pseudopotentials used . |
21,331 | def build_description ( node = None ) : if node is None : from logging_tree . nodes import tree node = tree ( ) return '\n' . join ( [ line . rstrip ( ) for line in describe ( node ) ] ) + '\n' | Return a multi - line string describing a logging_tree . nodes . Node . |
21,332 | def _describe ( node , parent ) : name , logger , children = node is_placeholder = isinstance ( logger , logging . PlaceHolder ) if is_placeholder : yield '<--[%s]' % name else : parent_is_correct = ( parent is None ) or ( logger . parent is parent ) if not logger . propagate : arrow = ' ' elif parent_is_correct : arrow = '<--' else : arrow = ' !-' yield '%s"%s"' % ( arrow , name ) if not parent_is_correct : if logger . parent is None : yield ( ' Broken .parent is None, so messages stop here' ) else : yield ( ' Broken .parent redirects messages to %r instead' % ( logger . parent . name , ) ) if logger . level == logging . NOTSET : yield ' Level NOTSET so inherits level ' + logging . getLevelName ( logger . getEffectiveLevel ( ) ) else : yield ' Level ' + logging . getLevelName ( logger . level ) if not logger . propagate : yield ' Propagate OFF' if logger . disabled : yield ' Disabled' for f in getattr ( logger , 'filters' , ( ) ) : yield ' Filter %s' % describe_filter ( f ) for h in getattr ( logger , 'handlers' , ( ) ) : g = describe_handler ( h ) yield ' Handler %s' % next ( g ) for line in g : yield ' ' + line if children : if not is_placeholder : parent = logger last_child = children [ - 1 ] for child in children : g = _describe ( child , parent ) yield ' |' yield ' o' + next ( g ) if child is last_child : prefix = ' ' else : prefix = ' |' for line in g : yield prefix + line | Generate lines describing the given node tuple . |
21,333 | def describe_filter ( f ) : if f . __class__ is logging . Filter : return 'name=%r' % f . name return repr ( f ) | Return text describing the logging filter f . |
21,334 | def describe_handler ( h ) : t = h . __class__ format = handler_formats . get ( t ) if format is not None : yield format % h . __dict__ else : yield repr ( h ) level = getattr ( h , 'level' , logging . NOTSET ) if level != logging . NOTSET : yield ' Level ' + logging . getLevelName ( level ) for f in getattr ( h , 'filters' , ( ) ) : yield ' Filter %s' % describe_filter ( f ) formatter = getattr ( h , 'formatter' , None ) if formatter is not None : if type ( formatter ) is logging . Formatter : yield ' Formatter fmt=%r datefmt=%r' % ( getattr ( formatter , '_fmt' , None ) , getattr ( formatter , 'datefmt' , None ) ) else : yield ' Formatter %r' % ( formatter , ) if t is logging . handlers . MemoryHandler and h . target is not None : yield ' Flushes output to:' g = describe_handler ( h . target ) yield ' Handler ' + next ( g ) for line in g : yield ' ' + line | Yield one or more lines describing the logging handler h . |
21,335 | def tree ( ) : root = ( '' , logging . root , [ ] ) nodes = { } items = list ( logging . root . manager . loggerDict . items ( ) ) items . sort ( ) for name , logger in items : nodes [ name ] = node = ( name , logger , [ ] ) i = name . rfind ( '.' , 0 , len ( name ) - 1 ) if i == - 1 : parent = root else : parent = nodes [ name [ : i ] ] parent [ 2 ] . append ( node ) return root | Return a tree of tuples representing the logger layout . |
21,336 | def patched_str ( self ) : def red ( words ) : return u ( "\033[31m\033[49m%s\033[0m" ) % words def white ( words ) : return u ( "\033[37m\033[49m%s\033[0m" ) % words def blue ( words ) : return u ( "\033[34m\033[49m%s\033[0m" ) % words def teal ( words ) : return u ( "\033[36m\033[49m%s\033[0m" ) % words def get_uri ( code ) : return "https://www.signalwire.com/docs/errors/{0}" . format ( code ) if hasattr ( sys . stderr , 'isatty' ) and sys . stderr . isatty ( ) : msg = ( "\n{red_error} {request_was}\n\n{http_line}" "\n\n{sw_returned}\n\n{message}\n" . format ( red_error = red ( "HTTP Error" ) , request_was = white ( "Your request was:" ) , http_line = teal ( "%s %s" % ( self . method , self . uri ) ) , sw_returned = white ( "Signalwire returned the following information:" ) , message = blue ( str ( self . msg ) ) ) ) if self . code : msg = "" . join ( [ msg , "\n{more_info}\n\n{uri}\n\n" . format ( more_info = white ( "More information may be available here:" ) , uri = blue ( get_uri ( self . code ) ) ) , ] ) return msg else : return "HTTP {0} error: {1}" . format ( self . status , self . msg ) | Try to pretty - print the exception if this is going on screen . |
21,337 | def h ( self ) : r if np . size ( self . _h ) > 1 : assert np . size ( self . _h ) == self . n_modelparams return self . _h else : return self . _h * np . ones ( self . n_modelparams ) | r Returns the step size to be used in numerical differentiation with respect to the model parameters . The step size is given as a vector with length n_modelparams so that each model parameter can be weighted independently . |
21,338 | def clear_cache ( self ) : self . underlying_model . clear_cache ( ) try : logger . info ( 'DirectView results has {} items. Clearing.' . format ( len ( self . _dv . results ) ) ) self . _dv . purge_results ( 'all' ) if self . _purge_client : self . _dv . client . purge_everything ( ) except : pass | Clears any cache associated with the serial model and the engines seen by the direct view . |
21,339 | def _maybe_resample ( self ) : ess = self . n_ess if ess <= 10 : warnings . warn ( "Extremely small n_ess encountered ({}). " "Resampling is likely to fail. Consider adding particles, or " "resampling more often." . format ( ess ) , ApproximationWarning ) if ess < self . n_particles * self . resample_thresh : self . resample ( ) pass | Checks the resample threshold and conditionally resamples . |
21,340 | def reset ( self , n_particles = None , only_params = None , reset_weights = True ) : if n_particles is not None and only_params is not None : raise ValueError ( "Cannot set both n_particles and only_params." ) if n_particles is None : n_particles = self . n_particles if reset_weights : self . particle_weights = np . ones ( ( n_particles , ) ) / n_particles if only_params is None : sl = np . s_ [ : , : ] self . particle_locations = np . zeros ( ( n_particles , self . model . n_modelparams ) ) else : sl = np . s_ [ : , only_params ] self . particle_locations [ sl ] = self . prior . sample ( n = n_particles ) [ sl ] if self . _canonicalize : self . particle_locations [ sl ] = self . model . canonicalize ( self . particle_locations [ sl ] ) | Causes all particle locations and weights to be drawn fresh from the initial prior . |
21,341 | def batch_update ( self , outcomes , expparams , resample_interval = 5 ) : r n_exps = outcomes . shape [ 0 ] if expparams . shape [ 0 ] != n_exps : raise ValueError ( "The number of outcomes and experiments must match." ) if len ( expparams . shape ) == 1 : expparams = expparams [ : , None ] for idx_exp , ( outcome , experiment ) in enumerate ( zip ( iter ( outcomes ) , iter ( expparams ) ) ) : self . update ( outcome , experiment , check_for_resample = False ) if ( idx_exp + 1 ) % resample_interval == 0 : self . _maybe_resample ( ) | r Updates based on a batch of outcomes and experiments rather than just one . |
21,342 | def resample ( self ) : if self . just_resampled : warnings . warn ( "Resampling without additional data; this may not perform as " "desired." , ResamplerWarning ) self . _just_resampled = True self . _resample_count += 1 if self . _resampling_divergences is not None : old_locs = self . particle_locations . copy ( ) old_weights = self . particle_weights . copy ( ) if self . _debug_resampling : old_mean = self . est_mean ( ) old_cov = self . est_covariance_mtx ( ) new_distribution = self . resampler ( self . model , self ) self . particle_weights = new_distribution . particle_weights self . particle_locations = new_distribution . particle_locations if self . _canonicalize : self . particle_locations [ : , : ] = self . model . canonicalize ( self . particle_locations ) try : self . model . clear_cache ( ) except Exception as e : warnings . warn ( "Exception raised when clearing model cache: {}. Ignoring." . format ( e ) ) if self . _resampling_divergences is not None : self . _resampling_divergences . append ( self . _kl_divergence ( old_locs , old_weights ) ) if self . _debug_resampling : new_mean = self . est_mean ( ) new_cov = self . est_covariance_mtx ( ) logger . debug ( "Resampling changed mean by {}. Norm change in cov: {}." . format ( old_mean - new_mean , np . linalg . norm ( new_cov - old_cov ) ) ) | Forces the updater to perform a resampling step immediately . |
21,343 | def expected_information_gain ( self , expparams ) : r n_eps = expparams . size if n_eps > 1 and not self . model . is_n_outcomes_constant : risk = np . empty ( n_eps ) for idx in range ( n_eps ) : risk [ idx ] = self . expected_information_gain ( expparams [ idx , np . newaxis ] ) return risk os = self . model . domain ( expparams [ 0 , np . newaxis ] ) [ 0 ] . values w_hyp , L , N = self . hypothetical_update ( os [ : - 1 ] , expparams , return_normalization = True , return_likelihood = True ) w_hyp_last_outcome = ( 1 - L . sum ( axis = 0 ) ) * self . particle_weights [ np . newaxis , : ] N = np . concatenate ( [ N [ : , : , 0 ] , np . sum ( w_hyp_last_outcome [ np . newaxis , : , : ] , axis = 2 ) ] , axis = 0 ) w_hyp_last_outcome = w_hyp_last_outcome / N [ - 1 , : , np . newaxis ] w_hyp = np . concatenate ( [ w_hyp , w_hyp_last_outcome [ np . newaxis , : , : ] ] , axis = 0 ) KLD = np . sum ( w_hyp * np . log ( w_hyp / self . particle_weights ) , axis = 2 ) return np . sum ( N * KLD , axis = 0 ) | r Calculates the expected information gain for each hypothetical experiment . |
21,344 | def posterior_marginal ( self , idx_param = 0 , res = 100 , smoothing = 0 , range_min = None , range_max = None ) : s = np . argsort ( self . particle_locations [ : , idx_param ] ) locs = self . particle_locations [ s , idx_param ] r_min = np . min ( locs ) if range_min is None else range_min r_max = np . max ( locs ) if range_max is None else range_max ps = np . linspace ( r_min , r_max , res ) interp = scipy . interpolate . interp1d ( np . append ( locs , r_max + np . abs ( r_max - r_min ) ) , np . append ( np . cumsum ( self . particle_weights [ s ] ) , 1 ) , bounds_error = False , fill_value = 0 , assume_sorted = True ) pr = np . gradient ( interp ( ps ) , ps [ 1 ] - ps [ 0 ] ) if smoothing > 0 : gaussian_filter1d ( pr , res * smoothing / ( np . abs ( r_max - r_min ) ) , output = pr ) del interp return ps , pr | Returns an estimate of the marginal distribution of a given model parameter based on taking the derivative of the interpolated cdf . |
21,345 | def plot_posterior_marginal ( self , idx_param = 0 , res = 100 , smoothing = 0 , range_min = None , range_max = None , label_xaxis = True , other_plot_args = { } , true_model = None ) : res = plt . plot ( * self . posterior_marginal ( idx_param , res , smoothing , range_min , range_max ) , ** other_plot_args ) if label_xaxis : plt . xlabel ( '${}$' . format ( self . model . modelparam_names [ idx_param ] ) ) if true_model is not None : true_model = true_model [ 0 , idx_param ] if true_model . ndim == 2 else true_model [ idx_param ] old_ylim = plt . ylim ( ) plt . vlines ( true_model , old_ylim [ 0 ] - 0.1 , old_ylim [ 1 ] + 0.1 , color = 'k' , linestyles = '--' ) plt . ylim ( old_ylim ) return res | Plots a marginal of the requested parameter . |
21,346 | def plot_covariance ( self , corr = False , param_slice = None , tick_labels = None , tick_params = None ) : if mpls is None : raise ImportError ( "Hinton diagrams require mpltools." ) if param_slice is None : param_slice = np . s_ [ : ] tick_labels = ( list ( range ( len ( self . model . modelparam_names [ param_slice ] ) ) ) , tick_labels if tick_labels is not None else list ( map ( u"${}$" . format , self . model . modelparam_names [ param_slice ] ) ) ) cov = self . est_covariance_mtx ( corr = corr ) [ param_slice , param_slice ] retval = mpls . hinton ( cov ) plt . xticks ( * tick_labels , ** ( tick_params if tick_params is not None else { } ) ) plt . yticks ( * tick_labels , ** ( tick_params if tick_params is not None else { } ) ) plt . gca ( ) . xaxis . tick_top ( ) return retval | Plots the covariance matrix of the posterior as a Hinton diagram . |
21,347 | def posterior_mesh ( self , idx_param1 = 0 , idx_param2 = 1 , res1 = 100 , res2 = 100 , smoothing = 0.01 ) : locs = self . particle_locations [ : , [ idx_param1 , idx_param2 ] ] p1s , p2s = np . meshgrid ( np . linspace ( np . min ( locs [ : , 0 ] ) , np . max ( locs [ : , 0 ] ) , res1 ) , np . linspace ( np . min ( locs [ : , 1 ] ) , np . max ( locs [ : , 1 ] ) , res2 ) ) plot_locs = np . array ( [ p1s , p2s ] ) . T . reshape ( ( np . prod ( p1s . shape ) , 2 ) ) pr = np . sum ( np . prod ( scipy . stats . norm . pdf ( plot_locs [ : , np . newaxis , : ] , scale = smoothing , loc = locs ) , axis = - 1 ) * self . particle_weights , axis = 1 ) . reshape ( p1s . shape ) return p1s , p2s , pr | Returns a mesh useful for plotting of kernel density estimation of a 2D projection of the current posterior distribution . |
21,348 | def plot_posterior_contour ( self , idx_param1 = 0 , idx_param2 = 1 , res1 = 100 , res2 = 100 , smoothing = 0.01 ) : return plt . contour ( * self . posterior_mesh ( idx_param1 , idx_param2 , res1 , res2 , smoothing ) ) | Plots a contour of the kernel density estimation of a 2D projection of the current posterior distribution . |
21,349 | def plot_rebit_prior ( prior , rebit_axes = REBIT_AXES , n_samples = 2000 , true_state = None , true_size = 250 , force_mean = None , legend = True , mean_color_index = 2 ) : pallette = plt . rcParams [ 'axes.color_cycle' ] plot_rebit_modelparams ( prior . sample ( n_samples ) , c = pallette [ 0 ] , label = 'Prior' , rebit_axes = rebit_axes ) if true_state is not None : plot_rebit_modelparams ( true_state , c = pallette [ 1 ] , label = 'True' , marker = '*' , s = true_size , rebit_axes = rebit_axes ) if hasattr ( prior , '_mean' ) or force_mean is not None : mean = force_mean if force_mean is not None else prior . _mean plot_rebit_modelparams ( prior . _basis . state_to_modelparams ( mean ) [ None , : ] , edgecolors = pallette [ mean_color_index ] , s = 250 , facecolors = 'none' , linewidth = 3 , label = 'Mean' , rebit_axes = rebit_axes ) plot_decorate_rebits ( prior . basis , rebit_axes = rebit_axes ) if legend : plt . legend ( loc = 'lower left' , ncol = 3 , scatterpoints = 1 ) | Plots rebit states drawn from a given prior . |
21,350 | def plot_rebit_posterior ( updater , prior = None , true_state = None , n_std = 3 , rebit_axes = REBIT_AXES , true_size = 250 , legend = True , level = 0.95 , region_est_method = 'cov' ) : pallette = plt . rcParams [ 'axes.color_cycle' ] plot_rebit_modelparams ( updater . particle_locations , c = pallette [ 0 ] , label = 'Posterior' , s = 12 * np . sqrt ( updater . particle_weights * len ( updater . particle_weights ) ) , rebit_axes = rebit_axes , zorder = - 10 ) plot_rebit_modelparams ( true_state , c = pallette [ 1 ] , label = 'True' , marker = '*' , s = true_size , rebit_axes = rebit_axes ) if prior is not None : plot_rebit_modelparams ( prior . _basis . state_to_modelparams ( prior . _mean ) [ None , : ] , edgecolors = pallette [ 3 ] , s = 250 , facecolors = 'none' , linewidth = 3 , label = 'Prior Mean' , rebit_axes = rebit_axes ) plot_rebit_modelparams ( updater . est_mean ( ) [ None , : ] , edgecolors = pallette [ 2 ] , s = 250 , facecolors = 'none' , linewidth = 3 , label = 'Posterior Mean' , rebit_axes = rebit_axes ) if region_est_method == 'cov' : cov = 2 * updater . est_covariance_mtx ( ) cov = cov [ rebit_axes , : ] [ : , rebit_axes ] plot_cov_ellipse ( cov , updater . est_mean ( ) [ rebit_axes ] * np . sqrt ( 2 ) , nstd = n_std , edgecolor = 'k' , fill = True , lw = 2 , facecolor = pallette [ 0 ] , alpha = 0.4 , zorder = - 9 , label = 'Posterior Cov Ellipse ($Z = {}$)' . format ( n_std ) ) elif region_est_method == 'hull' : faces , vertices = updater . region_est_hull ( level , modelparam_slice = rebit_axes ) polygon = Polygon ( vertices * np . sqrt ( 2 ) , facecolor = pallette [ 0 ] , alpha = 0.4 , zorder = - 9 , label = r'Credible Region ($\alpha = {}$)' . format ( level ) , edgecolor = 'k' , lw = 2 , fill = True ) plt . gca ( ) . add_patch ( polygon ) plot_decorate_rebits ( updater . model . base_model . _basis , rebit_axes = rebit_axes ) if legend : plt . legend ( loc = 'lower left' , ncol = 4 , scatterpoints = 1 ) | Plots posterior distributions over rebits including covariance ellipsoids |
21,351 | def data_to_params ( data , expparams_dtype , col_outcomes = ( 0 , 'counts' ) , cols_expparams = None ) : BY_IDX , BY_NAME = range ( 2 ) is_exp_scalar = np . issctype ( expparams_dtype ) is_data_scalar = np . issctype ( data . dtype ) and not data . dtype . fields s_ = ( ( lambda idx : np . s_ [ ... , idx [ BY_IDX ] ] ) if is_data_scalar else ( lambda idx : np . s_ [ idx [ BY_NAME ] ] ) ) outcomes = data [ s_ ( col_outcomes ) ] . astype ( int ) expparams = np . empty ( outcomes . shape , dtype = expparams_dtype ) if is_exp_scalar : expparams [ : ] = data [ s_ ( cols_expparams ) ] else : for expparams_key , column in cols_expparams . items ( ) : expparams [ expparams_key ] = data [ s_ ( column ) ] return outcomes , expparams | Given data as a NumPy array separates out each column either as the outcomes or as a field of an expparams array . Columns may be specified either as indices into a two - axis scalar array or as field names for a one - axis record array . |
21,352 | def canonicalize ( self , modelparams ) : modelparams = np . apply_along_axis ( self . trunc_neg_eigs , 1 , modelparams ) if not self . _allow_subnormalied : modelparams = self . renormalize ( modelparams ) return modelparams | Truncates negative eigenvalues and from each state represented by a tensor of model parameter vectors and renormalizes as appropriate . |
21,353 | def trunc_neg_eigs ( self , particle ) : arr = np . tensordot ( particle , self . _basis . data . conj ( ) , 1 ) w , v = np . linalg . eig ( arr ) if np . all ( w >= 0 ) : return particle else : w [ w < 0 ] = 0 new_arr = np . dot ( v * w , v . conj ( ) . T ) new_particle = np . real ( np . dot ( self . _basis . flat ( ) , new_arr . flatten ( ) ) ) assert new_particle [ 0 ] > 0 return new_particle | Given a state represented as a model parameter vector returns a model parameter vector representing the same state with any negative eigenvalues set to zero . |
21,354 | def renormalize ( self , modelparams ) : norm = modelparams [ : , 0 ] * np . sqrt ( self . _dim ) assert not np . sum ( norm == 0 ) return modelparams / norm [ : , None ] | Renormalizes one or more states represented as model parameter vectors such that each state has trace 1 . |
21,355 | def values ( self ) : separate_values = [ domain . values for domain in self . _domains ] return np . concatenate ( [ join_struct_arrays ( list ( map ( np . array , value ) ) ) for value in product ( * separate_values ) ] ) | Returns an np . array of type dtype containing some values from the domain . For domains where is_finite is True all elements of the domain will be yielded exactly once . |
21,356 | def min ( self ) : return int ( self . _min ) if not np . isinf ( self . _min ) else self . _min | Returns the minimum value of the domain . |
21,357 | def max ( self ) : return int ( self . _max ) if not np . isinf ( self . _max ) else self . _max | Returns the maximum value of the domain . |
21,358 | def is_finite ( self ) : return not np . isinf ( self . min ) and not np . isinf ( self . max ) | Whether or not the domain contains a finite number of points . |
21,359 | def n_members ( self ) : return int ( binom ( self . n_meas + self . n_elements - 1 , self . n_elements - 1 ) ) | Returns the number of members in the domain if it is_finite otherwise returns None . |
21,360 | def to_regular_array ( self , A ) : return A . view ( ( int , len ( A . dtype . names ) ) ) . reshape ( A . shape + ( - 1 , ) ) | Converts from an array of type self . dtype to an array of type int with an additional index labeling the tuple indeces . |
21,361 | def from_regular_array ( self , A ) : dims = A . shape [ : - 1 ] return A . reshape ( ( np . prod ( dims ) , - 1 ) ) . view ( dtype = self . dtype ) . squeeze ( - 1 ) . reshape ( dims ) | Converts from an array of type int where the last index is assumed to have length self . n_elements to an array of type self . d_type with one fewer index . |
21,362 | def start ( self , max ) : try : self . widget . max = max display ( self . widget ) except : pass | Displays the progress bar for a given maximum value . |
21,363 | def likelihood ( self , outcomes , modelparams , expparams ) : super ( MultiQubitStatePauliModel , self ) . likelihood ( outcomes , modelparams , expparams ) pr0 = 0.5 * ( 1 + modelparams [ : , expparams [ 'pauli' ] ] ) pr0 [ pr0 < 0 ] = 0 pr0 [ pr0 > 1 ] = 1 pr0 = expparams [ 'vis' ] * pr0 + ( 1 - expparams [ 'vis' ] ) * 0.5 return Model . pr0_to_likelihood_array ( outcomes , pr0 ) | Calculates the likelihood function at the states specified by modelparams and measurement specified by expparams . This is given by the Born rule and is the probability of outcomes given the state and measurement operator . |
21,364 | def domain ( self , expparams ) : return [ IntegerDomain ( min = 0 , max = n_o - 1 ) for n_o in self . n_outcomes ( expparams ) ] | Returns a list of Domain s one for each input expparam . |
21,365 | def underlying_likelihood ( self , binary_outcomes , modelparams , expparams ) : original_mps = modelparams [ ... , self . _orig_mps_slice ] return self . underlying_model . likelihood ( binary_outcomes , original_mps , expparams ) | Given outcomes hypothesized for the underlying model returns the likelihood which which those outcomes occur . |
21,366 | def are_expparam_dtypes_consistent ( self , expparams ) : if self . is_n_outcomes_constant : return True if expparams . size > 0 : domains = self . domain ( expparams ) first_dtype = domains [ 0 ] . dtype return all ( domain . dtype == first_dtype for domain in domains [ 1 : ] ) else : return True | Returns True iff all of the given expparams correspond to outcome domains with the same dtype . For efficiency concrete subclasses should override this method if the result is always True . |
21,367 | def simulate_experiment ( self , modelparams , expparams , repeat = 1 ) : self . _sim_count += modelparams . shape [ 0 ] * expparams . shape [ 0 ] * repeat assert ( self . are_expparam_dtypes_consistent ( expparams ) ) | Produces data according to the given model parameters and experimental parameters structured as a NumPy array . |
21,368 | def likelihood ( self , outcomes , modelparams , expparams ) : r self . _call_count += ( safe_shape ( outcomes ) * safe_shape ( modelparams ) * safe_shape ( expparams ) ) | r Calculates the probability of each given outcome conditioned on each given model parameter vector and each given experimental control setting . |
21,369 | def get_qutip_module ( required_version = '3.2' ) : try : import qutip as qt from distutils . version import LooseVersion _qt_version = LooseVersion ( qt . version . version ) if _qt_version < LooseVersion ( required_version ) : return None except ImportError : return None return qt | Attempts to return the qutip module but silently returns None if it can t be imported or doesn t have version at least required_version . |
21,370 | def particle_covariance_mtx ( weights , locations ) : warnings . warn ( 'particle_covariance_mtx is deprecated, please use distributions.ParticleDistribution' , DeprecationWarning ) mu = particle_meanfn ( weights , locations ) xs = locations . transpose ( [ 1 , 0 ] ) ws = weights cov = ( np . einsum ( 'i,mi,ni' , ws , xs , xs ) - np . dot ( mu [ ... , np . newaxis ] , mu [ np . newaxis , ... ] ) ) assert np . all ( np . isfinite ( cov ) ) if not np . all ( la . eig ( cov ) [ 0 ] >= 0 ) : warnings . warn ( 'Numerical error in covariance estimation causing positive semidefinite violation.' , ApproximationWarning ) return cov | Returns an estimate of the covariance of a distribution represented by a given set of SMC particle . |
21,371 | def ellipsoid_volume ( A = None , invA = None ) : if invA is None and A is None : raise ValueError ( "Must pass either inverse(A) or A." ) if invA is None and A is not None : invA = la . inv ( A ) n = invA . shape [ 0 ] Vn = ( np . pi ** ( n / 2 ) ) / gamma ( 1 + ( n / 2 ) ) return Vn * la . det ( sqrtm ( invA ) ) | Returns the volume of an ellipsoid given either its matrix or the inverse of its matrix . |
21,372 | def in_ellipsoid ( x , A , c ) : if x . ndim == 1 : y = c - x return np . einsum ( 'j,jl,l' , y , np . linalg . inv ( A ) , y ) <= 1 else : y = c [ np . newaxis , : ] - x return np . einsum ( 'ij,jl,il->i' , y , np . linalg . inv ( A ) , y ) <= 1 | Determines which of the points x are in the closed ellipsoid with shape matrix A centered at c . For a single point x this is computed as |
21,373 | def assert_sigfigs_equal ( x , y , sigfigs = 3 ) : xpow = np . floor ( np . log10 ( x ) ) x = x * 10 ** ( - xpow ) y = y * 10 ** ( - xpow ) assert_almost_equal ( x , y , sigfigs ) | Tests if all elements in x and y agree up to a certain number of significant figures . |
21,374 | def format_uncertainty ( value , uncertianty , scinotn_break = 4 ) : if uncertianty == 0 : return "{0:f}" . format ( value ) else : mag_unc = int ( np . log10 ( np . abs ( uncertianty ) ) ) mag_val = int ( np . log10 ( np . abs ( value ) ) ) if value != 0 else 0 n_digits = max ( mag_val - mag_unc , 0 ) if abs ( mag_val ) < abs ( mag_unc ) and abs ( mag_unc ) > scinotn_break : scale = 10 ** mag_unc return r"({{0:0.{0}f}} \pm {{1:0.{0}f}}) \times 10^{{2}}" . format ( n_digits ) . format ( value / scale , uncertianty / scale , mag_unc ) if abs ( mag_val ) <= scinotn_break : return r"{{0:0.{n_digits}f}} \pm {{1:0.{n_digits}f}}" . format ( n_digits = n_digits ) . format ( value , uncertianty ) else : scale = 10 ** mag_val return r"({{0:0.{0}f}} \pm {{1:0.{0}f}}) \times 10^{{2}}" . format ( n_digits ) . format ( value / scale , uncertianty / scale , mag_val ) | Given a value and its uncertianty format as a LaTeX string for pretty - printing . |
21,375 | def from_simplex ( x ) : r n = x . shape [ - 1 ] z = np . empty ( shape = x . shape ) z [ ... , 0 ] = x [ ... , 0 ] z [ ... , 1 : - 1 ] = x [ ... , 1 : - 1 ] / ( 1 - x [ ... , : - 2 ] . cumsum ( axis = - 1 ) ) z [ ... , : - 1 ] = logit ( z [ ... , : - 1 ] ) - logit ( 1 / ( n - np . arange ( n - 1 , dtype = np . float ) ) ) z [ ... , - 1 ] = 0 return z | r Inteprets the last index of x as unit simplices and returns a real array of the sampe shape in logit space . |
21,376 | def join_struct_arrays ( arrays ) : sizes = np . array ( [ a . itemsize for a in arrays ] ) offsets = np . r_ [ 0 , sizes . cumsum ( ) ] shape = arrays [ 0 ] . shape joint = np . empty ( shape + ( offsets [ - 1 ] , ) , dtype = np . uint8 ) for a , size , offset in zip ( arrays , sizes , offsets ) : joint [ ... , offset : offset + size ] = np . atleast_1d ( a ) . view ( np . uint8 ) . reshape ( shape + ( size , ) ) dtype = sum ( ( a . dtype . descr for a in arrays ) , [ ] ) return joint . ravel ( ) . view ( dtype ) | Takes a list of possibly structured arrays concatenates their dtypes and returns one big array with that dtype . Does the inverse of separate_struct_array . |
21,377 | def separate_struct_array ( array , dtypes ) : try : offsets = np . cumsum ( [ np . dtype ( dtype ) . itemsize for dtype in dtypes ] ) except TypeError : dtype_size = np . dtype ( dtypes ) . itemsize num_fields = int ( array . nbytes / ( array . size * dtype_size ) ) offsets = np . cumsum ( [ dtype_size ] * num_fields ) dtypes = [ dtypes ] * num_fields offsets = np . concatenate ( [ [ 0 ] , offsets ] ) . astype ( int ) uint_array = array . view ( np . uint8 ) . reshape ( array . shape + ( - 1 , ) ) return [ uint_array [ ... , offsets [ idx ] : offsets [ idx + 1 ] ] . flatten ( ) . view ( dtype ) for idx , dtype in enumerate ( dtypes ) ] | Takes an array with a structured dtype and separates it out into a list of arrays with dtypes coming from the input dtypes . Does the inverse of join_struct_arrays . |
21,378 | def sqrtm_psd ( A , est_error = True , check_finite = True ) : w , v = eigh ( A , check_finite = check_finite ) mask = w <= 0 w [ mask ] = 0 np . sqrt ( w , out = w ) A_sqrt = ( v * w ) . dot ( v . conj ( ) . T ) if est_error : return A_sqrt , np . linalg . norm ( np . dot ( A_sqrt , A_sqrt ) - A , 'fro' ) else : return A_sqrt | Returns the matrix square root of a positive semidefinite matrix truncating negative eigenvalues . |
21,379 | def tensor_product_basis ( * bases ) : dim = np . prod ( [ basis . data . shape [ 1 ] for basis in bases ] ) tp_basis = np . zeros ( ( dim ** 2 , dim , dim ) , dtype = complex ) for idx_factors , factors in enumerate ( it . product ( * [ basis . data for basis in bases ] ) ) : tp_basis [ idx_factors , : , : ] = reduce ( np . kron , factors ) return TomographyBasis ( tp_basis , sum ( ( factor . dims for factor in bases ) , [ ] ) , list ( map ( r"\otimes" . join , it . product ( * [ basis . labels for basis in bases ] ) ) ) ) | Returns a TomographyBasis formed by the tensor product of two or more factor bases . Each basis element is the tensor product of basis elements from the underlying factors . |
21,380 | def state_to_modelparams ( self , state ) : basis = self . flat ( ) data = state . data . todense ( ) . view ( np . ndarray ) . flatten ( ) return np . real ( np . dot ( basis . conj ( ) , data ) ) | Converts a QuTiP - represented state into a model parameter vector . |
21,381 | def modelparams_to_state ( self , modelparams ) : if modelparams . ndim == 1 : qobj = qt . Qobj ( np . tensordot ( modelparams , self . data , 1 ) , dims = [ self . dims , self . dims ] ) if self . superrep is not None : qobj . superrep = self . superrep return qobj else : return list ( map ( self . modelparams_to_state , modelparams ) ) | Converts one or more vectors of model parameters into QuTiP - represented states . |
21,382 | def covariance_mtx_to_superop ( self , mtx ) : M = self . flat ( ) return qt . Qobj ( np . dot ( np . dot ( M . conj ( ) . T , mtx ) , M ) , dims = [ [ self . dims ] * 2 ] * 2 ) | Converts a covariance matrix to the corresponding superoperator represented as a QuTiP Qobj with type = super . |
21,383 | def _dist_kw_arg ( self , k ) : if self . _dist_kw_args is not None : return { key : self . _dist_kw_args [ key ] [ k , : ] for key in self . _dist_kw_args . keys ( ) } else : return { } | Returns a dictionary of keyword arguments for the k th distribution . |
21,384 | def sample ( self , n = 1 ) : cumsum_weights = np . cumsum ( self . particle_weights ) return self . particle_locations [ np . minimum ( cumsum_weights . searchsorted ( np . random . random ( ( n , ) ) , side = 'right' ) , len ( cumsum_weights ) - 1 ) ] | Returns random samples from the current particle distribution according to particle weights . |
21,385 | def est_covariance_mtx ( self , corr = False ) : cov = self . particle_covariance_mtx ( self . particle_weights , self . particle_locations ) if corr : dstd = np . sqrt ( np . diag ( cov ) ) cov /= ( np . outer ( dstd , dstd ) ) return cov | Returns the full - rank covariance matrix of the current particle distribution . |
21,386 | def est_credible_region ( self , level = 0.95 , return_outside = False , modelparam_slice = None ) : s_ = np . s_ [ modelparam_slice ] if modelparam_slice is not None else np . s_ [ : ] mps = self . particle_locations [ : , s_ ] id_sort = np . argsort ( self . particle_weights ) [ : : - 1 ] cumsum_weights = np . cumsum ( self . particle_weights [ id_sort ] ) id_cred = cumsum_weights <= level id_cred [ np . sum ( id_cred ) ] = True if return_outside : return ( mps [ id_sort ] [ id_cred ] , mps [ id_sort ] [ np . logical_not ( id_cred ) ] ) else : return mps [ id_sort ] [ id_cred ] | Returns an array containing particles inside a credible region of a given level such that the described region has probability mass no less than the desired level . |
21,387 | def region_est_hull ( self , level = 0.95 , modelparam_slice = None ) : points = self . est_credible_region ( level = level , modelparam_slice = modelparam_slice ) hull = ConvexHull ( points ) return points [ hull . simplices ] , points [ u . uniquify ( hull . vertices . flatten ( ) ) ] | Estimates a credible region over models by taking the convex hull of a credible subset of particles . |
21,388 | def in_credible_region ( self , points , level = 0.95 , modelparam_slice = None , method = 'hpd-hull' , tol = 0.0001 ) : if method == 'pce' : s_ = np . s_ [ modelparam_slice ] if modelparam_slice is not None else np . s_ [ : ] A = self . est_covariance_mtx ( ) [ s_ , s_ ] c = self . est_mean ( ) [ s_ ] mult = st . chi2 . ppf ( level , c . size ) results = u . in_ellipsoid ( points , mult * A , c ) elif method == 'hpd-mvee' : tol = 0.0001 if tol is None else tol A , c = self . region_est_ellipsoid ( level = level , tol = tol , modelparam_slice = modelparam_slice ) results = u . in_ellipsoid ( points , np . linalg . inv ( A ) , c ) elif method == 'hpd-hull' : hull = Delaunay ( self . est_credible_region ( level = level , modelparam_slice = modelparam_slice ) ) results = hull . find_simplex ( points ) >= 0 return results | Decides whether each of the points lie within a credible region of the current distribution . |
21,389 | def sample ( self , n = 1 ) : samples = np . empty ( ( n , self . n_rvs ) ) idxs_to_sample = np . arange ( n ) iters = 0 while idxs_to_sample . size and iters < self . _maxiters : samples [ idxs_to_sample ] = self . _dist . sample ( len ( idxs_to_sample ) ) idxs_to_sample = idxs_to_sample [ np . nonzero ( np . logical_not ( self . _model . are_models_valid ( samples [ idxs_to_sample , : ] ) ) ) [ 0 ] ] iters += 1 if idxs_to_sample . size : raise RuntimeError ( "Did not successfully postselect within {} iterations." . format ( self . _maxiters ) ) return samples | Returns one or more samples from this probability distribution . |
21,390 | def iter_actions ( self ) : ns = '{urn:schemas-upnp-org:service-1-0}' scpd_body = requests . get ( self . base_url + self . scpd_url ) . content tree = XML . fromstring ( scpd_body ) vartypes = { } srvStateTables = tree . findall ( '{}serviceStateTable' . format ( ns ) ) for srvStateTable in srvStateTables : statevars = srvStateTable . findall ( '{}stateVariable' . format ( ns ) ) for state in statevars : name = state . findtext ( '{}name' . format ( ns ) ) datatype = state . findtext ( '{}dataType' . format ( ns ) ) default = state . findtext ( '{}defaultValue' . format ( ns ) ) value_list_elt = state . find ( '{}allowedValueList' . format ( ns ) ) if value_list_elt is None : value_list_elt = ( ) value_list = [ item . text for item in value_list_elt ] or None value_range_elt = state . find ( '{}allowedValueRange' . format ( ns ) ) if value_range_elt is None : value_range_elt = ( ) value_range = [ item . text for item in value_range_elt ] or None vartypes [ name ] = Vartype ( datatype , default , value_list , value_range ) actionLists = tree . findall ( '{}actionList' . format ( ns ) ) for actionList in actionLists : actions = actionList . findall ( '{}action' . format ( ns ) ) for i in actions : action_name = i . findtext ( '{}name' . format ( ns ) ) argLists = i . findall ( '{}argumentList' . format ( ns ) ) for argList in argLists : args_iter = argList . findall ( '{}argument' . format ( ns ) ) in_args = [ ] out_args = [ ] for arg in args_iter : arg_name = arg . findtext ( '{}name' . format ( ns ) ) direction = arg . findtext ( '{}direction' . format ( ns ) ) related_variable = arg . findtext ( '{}relatedStateVariable' . format ( ns ) ) vartype = vartypes [ related_variable ] if direction == "in" : in_args . append ( Argument ( arg_name , vartype ) ) else : out_args . append ( Argument ( arg_name , vartype ) ) yield Action ( action_name , in_args , out_args ) | Yield the service s actions with their arguments . |
21,391 | def parse_event_xml ( xml_event ) : result = { } tree = XML . fromstring ( xml_event ) properties = tree . findall ( '{urn:schemas-upnp-org:event-1-0}property' ) for prop in properties : for variable in prop : if variable . tag == "LastChange" : last_change_tree = XML . fromstring ( variable . text . encode ( 'utf-8' ) ) instance = last_change_tree . find ( "{urn:schemas-upnp-org:metadata-1-0/AVT/}InstanceID" ) if instance is None : instance = last_change_tree . find ( "{urn:schemas-upnp-org:metadata-1-0/RCS/}InstanceID" ) if instance is None : instance = last_change_tree . find ( "{urn:schemas-sonos-com:metadata-1-0/Queue/}QueueID" ) for last_change_var in instance : tag = last_change_var . tag if tag . startswith ( '{' ) : tag = tag . split ( '}' , 1 ) [ 1 ] tag = camel_to_underscore ( tag ) value = last_change_var . get ( 'val' ) if value is None : value = last_change_var . text if value . startswith ( '<DIDL-Lite' ) : try : didl = from_didl_string ( value ) if not didl : continue value = didl [ 0 ] except SoCoException as original_exception : log . debug ( "Event contains illegal metadata" "for '%s'.\n" "Error message: '%s'\n" "The result will be a SoCoFault." , tag , str ( original_exception ) ) event_parse_exception = EventParseException ( tag , value , original_exception ) value = SoCoFault ( event_parse_exception ) channel = last_change_var . get ( 'channel' ) if channel is not None : if result . get ( tag ) is None : result [ tag ] = { } result [ tag ] [ channel ] = value else : result [ tag ] = value else : result [ camel_to_underscore ( variable . tag ) ] = variable . text return result | Parse the body of a UPnP event . |
21,392 | def unsubscribe ( self ) : if self . _has_been_unsubscribed or not self . is_subscribed : return self . _auto_renew_thread_flag . set ( ) headers = { 'SID' : self . sid } response = None try : response = requests . request ( 'UNSUBSCRIBE' , self . service . base_url + self . service . event_subscription_url , headers = headers , timeout = 3 ) except requests . exceptions . RequestException : pass self . is_subscribed = False self . _timestamp = None log . info ( "Unsubscribed from %s, sid: %s" , self . service . base_url + self . service . event_subscription_url , self . sid ) with _subscriptions_lock : try : del _subscriptions [ self . sid ] except KeyError : pass self . _has_been_unsubscribed = True if response and response . status_code != 412 : response . raise_for_status ( ) | Unsubscribe from the service s events . |
21,393 | def play_mode ( self , playmode ) : playmode = playmode . upper ( ) if playmode not in PLAY_MODES . keys ( ) : raise KeyError ( "'%s' is not a valid play mode" % playmode ) self . avTransport . SetPlayMode ( [ ( 'InstanceID' , 0 ) , ( 'NewPlayMode' , playmode ) ] ) | Set the speaker s mode . |
21,394 | def repeat ( self , repeat ) : shuffle = self . shuffle self . play_mode = PLAY_MODE_BY_MEANING [ ( shuffle , repeat ) ] | Set the queue s repeat option |
21,395 | def join ( self , master ) : self . avTransport . SetAVTransportURI ( [ ( 'InstanceID' , 0 ) , ( 'CurrentURI' , 'x-rincon:{0}' . format ( master . uid ) ) , ( 'CurrentURIMetaData' , '' ) ] ) self . _zgs_cache . clear ( ) self . _parse_zone_group_state ( ) | Join this speaker to another master speaker . |
21,396 | def unjoin ( self ) : self . avTransport . BecomeCoordinatorOfStandaloneGroup ( [ ( 'InstanceID' , 0 ) ] ) self . _zgs_cache . clear ( ) self . _parse_zone_group_state ( ) | Remove this speaker from a group . |
21,397 | def set_sleep_timer ( self , sleep_time_seconds ) : try : if sleep_time_seconds is None : sleep_time = '' else : sleep_time = format ( datetime . timedelta ( seconds = int ( sleep_time_seconds ) ) ) self . avTransport . ConfigureSleepTimer ( [ ( 'InstanceID' , 0 ) , ( 'NewSleepTimerDuration' , sleep_time ) , ] ) except SoCoUPnPException as err : if 'Error 402 received' in str ( err ) : raise ValueError ( 'invalid sleep_time_seconds, must be integer \ value between 0 and 86399 inclusive or None' ) raise except ValueError : raise ValueError ( 'invalid sleep_time_seconds, must be integer \ value between 0 and 86399 inclusive or None' ) | Sets the sleep timer . |
21,398 | def _restore_coordinator ( self ) : transport_info = self . device . get_current_transport_info ( ) if transport_info is not None : if transport_info [ 'current_transport_state' ] == 'PLAYING' : self . device . pause ( ) self . _restore_queue ( ) if self . is_playing_queue and self . playlist_position > 0 : if self . playlist_position is not None : self . playlist_position -= 1 self . device . play_from_queue ( self . playlist_position , False ) if self . track_position is not None : if self . track_position != "" : self . device . seek ( self . track_position ) self . device . play_mode = self . play_mode self . device . cross_fade = self . cross_fade elif self . is_playing_cloud_queue : pass else : if self . media_uri != "" : self . device . play_uri ( self . media_uri , self . media_metadata , start = False ) | Do the coordinator - only part of the restore . |
21,399 | def _restore_volume ( self , fade ) : self . device . mute = self . mute if self . volume == 100 : fixed_vol = self . device . renderingControl . GetOutputFixed ( [ ( 'InstanceID' , 0 ) ] ) [ 'CurrentFixed' ] else : fixed_vol = False if not fixed_vol : self . device . bass = self . bass self . device . treble = self . treble self . device . loudness = self . loudness if fade : self . device . volume = 0 self . device . ramp_to_volume ( self . volume ) else : self . device . volume = self . volume | Reinstate volume . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.