idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
20,300
def _GetUsers ( self , key_path_suffix ) : user_key_name , _ , key_path_suffix = key_path_suffix . partition ( definitions . KEY_PATH_SEPARATOR ) if user_key_name == '.DEFAULT' : search_key_name = 'S-1-5-18' else : search_key_name = user_key_name user_profile_list_key = self . GetKeyByPath ( self . _USER_PROFILE_LIST_KEY_PATH ) if not user_profile_list_key : return None for user_profile_key in user_profile_list_key . GetSubkeys ( ) : if search_key_name == user_profile_key . name : profile_path_value = user_profile_key . GetValueByName ( 'ProfileImagePath' ) if not profile_path_value : break profile_path = profile_path_value . GetDataAsObject ( ) if not profile_path : break key_name_upper = user_profile_key . name . upper ( ) if key_name_upper . endswith ( '_CLASSES' ) : profile_path = '\\' . join ( [ profile_path , 'AppData' , 'Local' , 'Microsoft' , 'Windows' , 'UsrClass.dat' ] ) else : profile_path = '\\' . join ( [ profile_path , 'NTUSER.DAT' ] ) profile_path_upper = profile_path . upper ( ) registry_file = self . _GetCachedUserFileByPath ( profile_path_upper ) if not registry_file : break key_path_prefix = definitions . KEY_PATH_SEPARATOR . join ( [ 'HKEY_USERS' , user_key_name ] ) key_path = '' . join ( [ key_path_prefix , key_path_suffix ] ) registry_file . SetKeyPathPrefix ( key_path_prefix ) return registry_file . GetKeyByPath ( key_path ) return None
Virtual key callback to determine the users sub keys .
20,301
def _GetFileByPath ( self , key_path_upper ) : key_path_prefix , registry_file = self . _GetCachedFileByPath ( key_path_upper ) if not registry_file : for mapping in self . _GetFileMappingsByPath ( key_path_upper ) : try : registry_file = self . _OpenFile ( mapping . windows_path ) except IOError : registry_file = None if not registry_file : continue if not key_path_prefix : key_path_prefix = mapping . key_path_prefix self . MapFile ( key_path_prefix , registry_file ) key_path_prefix = key_path_prefix . upper ( ) break return key_path_prefix , registry_file
Retrieves a Windows Registry file for a specific path .
20,302
def _GetFileMappingsByPath ( self , key_path_upper ) : candidate_mappings = [ ] for mapping in self . _REGISTRY_FILE_MAPPINGS_NT : if key_path_upper . startswith ( mapping . key_path_prefix . upper ( ) ) : candidate_mappings . append ( mapping ) candidate_mappings . sort ( key = lambda mapping : len ( mapping . key_path_prefix ) , reverse = True ) for mapping in candidate_mappings : yield mapping
Retrieves the Windows Registry file mappings for a specific path .
20,303
def _OpenFile ( self , path ) : if not self . _registry_file_reader : return None return self . _registry_file_reader . Open ( path , ascii_codepage = self . _ascii_codepage )
Opens a Windows Registry file .
20,304
def GetRegistryFileMapping ( self , registry_file ) : if not registry_file : return '' candidate_mappings = [ ] for mapping in self . _REGISTRY_FILE_MAPPINGS_NT : if not mapping . unique_key_paths : continue match = True for key_path in mapping . unique_key_paths : registry_key = registry_file . GetKeyByPath ( key_path ) if not registry_key : match = False if match : candidate_mappings . append ( mapping ) if not candidate_mappings : return '' if len ( candidate_mappings ) == 1 : return candidate_mappings [ 0 ] . key_path_prefix key_path_prefixes = frozenset ( [ mapping . key_path_prefix for mapping in candidate_mappings ] ) expected_key_path_prefixes = frozenset ( [ 'HKEY_CURRENT_USER' , 'HKEY_CURRENT_USER\\Software\\Classes' ] ) if key_path_prefixes == expected_key_path_prefixes : return 'HKEY_CURRENT_USER' raise RuntimeError ( 'Unable to resolve Windows Registry file mapping.' )
Determines the Registry file mapping based on the content of the file .
20,305
def GetRootKey ( self ) : root_registry_key = virtual . VirtualWinRegistryKey ( '' ) for mapped_key in self . _MAPPED_KEYS : key_path_segments = key_paths . SplitKeyPath ( mapped_key ) if not key_path_segments : continue registry_key = root_registry_key for name in key_path_segments [ : - 1 ] : sub_registry_key = registry_key . GetSubkeyByName ( name ) if not sub_registry_key : sub_registry_key = virtual . VirtualWinRegistryKey ( name ) registry_key . AddSubkey ( sub_registry_key ) registry_key = sub_registry_key sub_registry_key = registry_key . GetSubkeyByName ( key_path_segments [ - 1 ] ) if ( not sub_registry_key and isinstance ( registry_key , virtual . VirtualWinRegistryKey ) ) : sub_registry_key = virtual . VirtualWinRegistryKey ( key_path_segments [ - 1 ] , registry = self ) registry_key . AddSubkey ( sub_registry_key ) return root_registry_key
Retrieves the Windows Registry root key .
20,306
def MapFile ( self , key_path_prefix , registry_file ) : self . _registry_files [ key_path_prefix . upper ( ) ] = registry_file registry_file . SetKeyPathPrefix ( key_path_prefix )
Maps the Windows Registry file to a specific key path prefix .
20,307
def GetRootKey ( self ) : regf_key = self . _regf_file . get_root_key ( ) if not regf_key : return None return REGFWinRegistryKey ( regf_key , key_path = self . _key_path_prefix )
Retrieves the root key .
20,308
def Open ( self , file_object ) : self . _file_object = file_object self . _regf_file . open_file_object ( self . _file_object ) return True
Opens the Windows Registry file using a file - like object .
20,309
def SplitKeyPath ( key_path , path_separator = definitions . KEY_PATH_SEPARATOR ) : return list ( filter ( None , key_path . split ( path_separator ) ) )
Splits the key path into path segments .
20,310
def _GetKeyFromRegistry ( self ) : if not self . _registry : return try : self . _registry_key = self . _registry . GetKeyByPath ( self . _key_path ) except RuntimeError : pass if not self . _registry_key : return for sub_registry_key in self . _registry_key . GetSubkeys ( ) : self . AddSubkey ( sub_registry_key ) if self . _key_path == 'HKEY_LOCAL_MACHINE\\System' : sub_registry_key = VirtualWinRegistryKey ( 'CurrentControlSet' , registry = self . _registry ) self . AddSubkey ( sub_registry_key ) self . _registry = None
Determines the key from the Windows Registry .
20,311
def GetValues ( self ) : if not self . _registry_key and self . _registry : self . _GetKeyFromRegistry ( ) if self . _registry_key : return self . _registry_key . GetValues ( ) return iter ( [ ] )
Retrieves all values within the key .
20,312
def perform ( self ) : payload_class_str = self . _payload [ "class" ] payload_class = self . safe_str_to_class ( payload_class_str ) payload_class . resq = self . resq args = self . _payload . get ( "args" ) metadata = dict ( args = args ) if self . enqueue_timestamp : metadata [ "enqueue_timestamp" ] = self . enqueue_timestamp before_perform = getattr ( payload_class , "before_perform" , None ) metadata [ "failed" ] = False metadata [ "perform_timestamp" ] = time . time ( ) check_after = True try : if before_perform : payload_class . before_perform ( metadata ) return payload_class . perform ( * args ) except Exception as e : metadata [ "failed" ] = True metadata [ "exception" ] = e if not self . retry ( payload_class , args ) : metadata [ "retried" ] = False raise else : metadata [ "retried" ] = True logging . exception ( "Retry scheduled after error in %s" , self . _payload ) finally : after_perform = getattr ( payload_class , "after_perform" , None ) if after_perform : payload_class . after_perform ( metadata ) delattr ( payload_class , 'resq' )
This method converts payload into args and calls the perform method on the payload class .
20,313
def fail ( self , exception ) : fail = failure . create ( exception , self . _queue , self . _payload , self . _worker ) fail . save ( self . resq ) return fail
This method provides a way to fail a job and will use whatever failure backend you ve provided . The default is the RedisBackend .
20,314
def retry ( self , payload_class , args ) : retry_every = getattr ( payload_class , 'retry_every' , None ) retry_timeout = getattr ( payload_class , 'retry_timeout' , 0 ) if retry_every : now = ResQ . _current_time ( ) first_attempt = self . _payload . get ( "first_attempt" , now ) retry_until = first_attempt + timedelta ( seconds = retry_timeout ) retry_at = now + timedelta ( seconds = retry_every ) if retry_at < retry_until : self . resq . enqueue_at ( retry_at , payload_class , * args , ** { 'first_attempt' : first_attempt } ) return True return False
This method provides a way to retry a job after a failure . If the jobclass defined by the payload containes a retry_every attribute then pyres will attempt to retry the job until successful or until timeout defined by retry_timeout on the payload class .
20,315
def reserve ( cls , queues , res , worker = None , timeout = 10 ) : if isinstance ( queues , string_types ) : queues = [ queues ] queue , payload = res . pop ( queues , timeout = timeout ) if payload : return cls ( queue , payload , res , worker )
Reserve a job on one of the queues . This marks this job so that other workers will not pick it up .
20,316
def my_import ( name ) : mod = __import__ ( name ) components = name . split ( '.' ) for comp in components [ 1 : ] : mod = getattr ( mod , comp ) return mod
Helper function for walking import calls when searching for classes by string names .
20,317
def safe_str_to_class ( s ) : lst = s . split ( "." ) klass = lst [ - 1 ] mod_list = lst [ : - 1 ] module = "." . join ( mod_list ) if not module : module = klass mod = my_import ( module ) if hasattr ( mod , klass ) : return getattr ( mod , klass ) else : raise ImportError ( '' )
Helper function to map string class names to module classes .
20,318
def str_to_class ( s ) : lst = s . split ( "." ) klass = lst [ - 1 ] mod_list = lst [ : - 1 ] module = "." . join ( mod_list ) try : mod = __import__ ( module ) if hasattr ( mod , klass ) : return getattr ( mod , klass ) else : return None except ImportError : return None
Alternate helper function to map string class names to module classes .
20,319
def info ( self ) : pending = 0 for q in self . queues ( ) : pending += self . size ( q ) return { 'pending' : pending , 'processed' : Stat ( 'processed' , self ) . get ( ) , 'queues' : len ( self . queues ( ) ) , 'workers' : len ( self . workers ( ) ) , 'failed' : Stat ( 'failed' , self ) . get ( ) , 'servers' : [ '%s:%s' % ( self . host , self . port ) ] }
Returns a dictionary of the current status of the pending jobs processed no . of queues no . of workers no . of failed jobs .
20,320
def _shutdown_minions ( self ) : setproctitle ( 'pyres_manager: Waiting on children to shutdown.' ) for minion in self . _workers . values ( ) : minion . terminate ( ) minion . join ( )
send the SIGNINT signal to each worker in the pool .
20,321
def work ( self , interval = 5 ) : self . _setproctitle ( "Starting" ) logger . info ( "starting" ) self . startup ( ) while True : if self . _shutdown : logger . info ( 'shutdown scheduled' ) break self . register_worker ( ) job = self . reserve ( interval ) if job : self . fork_worker ( job ) else : if interval == 0 : break self . _setproctitle ( "Waiting" ) self . unregister_worker ( )
Invoked by run method . work listens on a list of queues and sleeps for interval time .
20,322
def fork_worker ( self , job ) : logger . debug ( 'picked up job' ) logger . debug ( 'job details: %s' % job ) self . before_fork ( job ) self . child = os . fork ( ) if self . child : self . _setproctitle ( "Forked %s at %s" % ( self . child , datetime . datetime . now ( ) ) ) logger . info ( 'Forked %s at %s' % ( self . child , datetime . datetime . now ( ) ) ) try : start = datetime . datetime . now ( ) while True : pid , status = os . waitpid ( self . child , os . WNOHANG ) if pid != 0 : if os . WIFEXITED ( status ) and os . WEXITSTATUS ( status ) == 0 : break if os . WIFSTOPPED ( status ) : logger . warning ( "Process stopped by signal %d" % os . WSTOPSIG ( status ) ) else : if os . WIFSIGNALED ( status ) : raise CrashError ( "Unexpected exit by signal %d" % os . WTERMSIG ( status ) ) raise CrashError ( "Unexpected exit status %d" % os . WEXITSTATUS ( status ) ) time . sleep ( 0.5 ) now = datetime . datetime . now ( ) if self . timeout and ( ( now - start ) . seconds > self . timeout ) : os . kill ( self . child , signal . SIGKILL ) os . waitpid ( - 1 , os . WNOHANG ) raise TimeoutError ( "Timed out after %d seconds" % self . timeout ) except OSError as ose : import errno if ose . errno != errno . EINTR : raise ose except JobError : self . _handle_job_exception ( job ) finally : if self . job ( ) : self . done_working ( job ) logger . debug ( 'done waiting' ) else : self . _setproctitle ( "Processing %s since %s" % ( job , datetime . datetime . now ( ) ) ) logger . info ( 'Processing %s since %s' % ( job , datetime . datetime . now ( ) ) ) self . after_fork ( job ) random . seed ( ) self . process ( job ) os . _exit ( 0 ) self . child = None
Invoked by work method . fork_worker does the actual forking to create the child process that will process the job . It s also responsible for monitoring the child process and handling hangs and crashes .
20,323
def save ( self , resq = None ) : if not resq : resq = ResQ ( ) data = { 'failed_at' : datetime . datetime . now ( ) . strftime ( '%Y/%m/%d %H:%M:%S' ) , 'payload' : self . _payload , 'exception' : self . _exception . __class__ . __name__ , 'error' : self . _parse_message ( self . _exception ) , 'backtrace' : self . _parse_traceback ( self . _traceback ) , 'queue' : self . _queue } if self . _worker : data [ 'worker' ] = self . _worker data = ResQ . encode ( data ) resq . redis . rpush ( 'resque:failed' , data )
Saves the failed Job into a failed Redis queue preserving all its original enqueud info .
20,324
def make_csr ( A ) : if not ( isspmatrix_csr ( A ) or isspmatrix_bsr ( A ) ) : try : A = csr_matrix ( A ) print ( 'Implicit conversion of A to CSR in pyamg.blackbox.make_csr' ) except BaseException : raise TypeError ( 'Argument A must have type csr_matrix or\ bsr_matrix, or be convertible to csr_matrix' ) if A . shape [ 0 ] != A . shape [ 1 ] : raise TypeError ( 'Argument A must be a square' ) A = A . asfptype ( ) return A
Convert A to CSR if A is not a CSR or BSR matrix already .
20,325
def solver ( A , config ) : A = make_csr ( A ) try : return smoothed_aggregation_solver ( A , B = config [ 'B' ] , BH = config [ 'BH' ] , smooth = config [ 'smooth' ] , strength = config [ 'strength' ] , max_levels = config [ 'max_levels' ] , max_coarse = config [ 'max_coarse' ] , coarse_solver = config [ 'coarse_solver' ] , symmetry = config [ 'symmetry' ] , aggregate = config [ 'aggregate' ] , presmoother = config [ 'presmoother' ] , postsmoother = config [ 'postsmoother' ] , keep = config [ 'keep' ] ) except BaseException : raise TypeError ( 'Failed generating smoothed_aggregation_solver' )
Generate an SA solver given matrix A and a configuration .
20,326
def solve ( A , b , x0 = None , tol = 1e-5 , maxiter = 400 , return_solver = False , existing_solver = None , verb = True , residuals = None ) : A = make_csr ( A ) if existing_solver is None : config = solver_configuration ( A , B = None , verb = verb ) existing_solver = solver ( A , config ) else : if existing_solver . levels [ 0 ] . A . shape [ 0 ] != A . shape [ 0 ] : raise TypeError ( 'Argument existing_solver must have level 0 matrix\ of same size as A' ) if existing_solver . levels [ 0 ] . A . symmetry == 'hermitian' : accel = 'cg' else : accel = 'gmres' if x0 is None : x0 = np . array ( sp . rand ( A . shape [ 0 ] , ) , dtype = A . dtype ) if verb : iteration = np . zeros ( ( 1 , ) ) print ( " maxiter = %d" % maxiter ) def callback ( x , iteration ) : iteration [ 0 ] = iteration [ 0 ] + 1 print ( " iteration %d" % iteration [ 0 ] ) def callback2 ( x ) : return callback ( x , iteration ) else : callback2 = None x = existing_solver . solve ( b , x0 = x0 , accel = accel , tol = tol , maxiter = maxiter , callback = callback2 , residuals = residuals ) if verb : r0 = b - A * x0 rk = b - A * x M = existing_solver . aspreconditioner ( ) nr0 = np . sqrt ( np . inner ( np . conjugate ( M * r0 ) , r0 ) ) nrk = np . sqrt ( np . inner ( np . conjugate ( M * rk ) , rk ) ) print ( " Residuals ||r_k||_M, ||r_0||_M = %1.2e, %1.2e" % ( nrk , nr0 ) ) if np . abs ( nr0 ) > 1e-15 : print ( " Residual reduction ||r_k||_M/||r_0||_M = %1.2e" % ( nrk / nr0 ) ) if return_solver : return ( x . reshape ( b . shape ) , existing_solver ) else : return x . reshape ( b . shape )
Solve Ax = b .
20,327
def find_comments ( fname , ch ) : with open ( fname , 'r' ) as inf : fdata = inf . readlines ( ) comments = { } for f in ch . functions : lineno = f [ 'line_number' ] - 1 lineptr = lineno - 1 if f [ 'template' ] : lineptr -= 1 start = lineptr while fdata [ lineptr ] . startswith ( '//' ) or fdata [ lineptr ] . startswith ( '/*' ) or fdata [ lineptr ] . startswith ( ' *' ) : lineptr -= 1 lineptr += 1 comment = fdata [ lineptr : ( start + 1 ) ] comment = [ c [ 3 : ] . rstrip ( ) for c in comment ] comments [ f [ 'name' ] ] = '\n' . join ( comment ) . strip ( ) return comments
Find the comments for a function .
20,328
def fit_candidates ( AggOp , B , tol = 1e-10 ) : if not isspmatrix_csr ( AggOp ) : raise TypeError ( 'expected csr_matrix for argument AggOp' ) B = np . asarray ( B ) if B . dtype not in [ 'float32' , 'float64' , 'complex64' , 'complex128' ] : B = np . asarray ( B , dtype = 'float64' ) if len ( B . shape ) != 2 : raise ValueError ( 'expected 2d array for argument B' ) if B . shape [ 0 ] % AggOp . shape [ 0 ] != 0 : raise ValueError ( 'dimensions of AggOp %s and B %s are \ incompatible' % ( AggOp . shape , B . shape ) ) N_fine , N_coarse = AggOp . shape K1 = int ( B . shape [ 0 ] / N_fine ) K2 = B . shape [ 1 ] R = np . empty ( ( N_coarse , K2 , K2 ) , dtype = B . dtype ) Qx = np . empty ( ( AggOp . nnz , K1 , K2 ) , dtype = B . dtype ) AggOp_csc = AggOp . tocsc ( ) fn = amg_core . fit_candidates fn ( N_fine , N_coarse , K1 , K2 , AggOp_csc . indptr , AggOp_csc . indices , Qx . ravel ( ) , B . ravel ( ) , R . ravel ( ) , tol ) Q = bsr_matrix ( ( Qx . swapaxes ( 1 , 2 ) . copy ( ) , AggOp_csc . indices , AggOp_csc . indptr ) , shape = ( K2 * N_coarse , K1 * N_fine ) ) Q = Q . T . tobsr ( ) R = R . reshape ( - 1 , K2 ) return Q , R
Fit near - nullspace candidates to form the tentative prolongator .
20,329
def _rand_sparse ( m , n , density , format = 'csr' ) : nnz = max ( min ( int ( m * n * density ) , m * n ) , 0 ) row = np . random . randint ( low = 0 , high = m - 1 , size = nnz ) col = np . random . randint ( low = 0 , high = n - 1 , size = nnz ) data = np . ones ( nnz , dtype = float ) return sp . sparse . csr_matrix ( ( data , ( row , col ) ) , shape = ( m , n ) )
Construct base function for sprand sprandn .
20,330
def sprand ( m , n , density , format = 'csr' ) : m , n = int ( m ) , int ( n ) A = _rand_sparse ( m , n , density , format = 'csr' ) A . data = sp . rand ( A . nnz ) return A . asformat ( format )
Return a random sparse matrix .
20,331
def linear_elasticity ( grid , spacing = None , E = 1e5 , nu = 0.3 , format = None ) : if len ( grid ) == 2 : return q12d ( grid , spacing = spacing , E = E , nu = nu , format = format ) else : raise NotImplemented ( 'no support for grid=%s' % str ( grid ) )
Linear elasticity problem discretizes with Q1 finite elements on a regular rectangular grid .
20,332
def q12d_local ( vertices , lame , mu ) : M = lame + 2 * mu R_11 = np . matrix ( [ [ 2 , - 2 , - 1 , 1 ] , [ - 2 , 2 , 1 , - 1 ] , [ - 1 , 1 , 2 , - 2 ] , [ 1 , - 1 , - 2 , 2 ] ] ) / 6.0 R_12 = np . matrix ( [ [ 1 , 1 , - 1 , - 1 ] , [ - 1 , - 1 , 1 , 1 ] , [ - 1 , - 1 , 1 , 1 ] , [ 1 , 1 , - 1 , - 1 ] ] ) / 4.0 R_22 = np . matrix ( [ [ 2 , 1 , - 1 , - 2 ] , [ 1 , 2 , - 2 , - 1 ] , [ - 1 , - 2 , 2 , 1 ] , [ - 2 , - 1 , 1 , 2 ] ] ) / 6.0 F = inv ( np . vstack ( ( vertices [ 1 ] - vertices [ 0 ] , vertices [ 3 ] - vertices [ 0 ] ) ) ) K = np . zeros ( ( 8 , 8 ) ) E = F . T * np . matrix ( [ [ M , 0 ] , [ 0 , mu ] ] ) * F K [ 0 : : 2 , 0 : : 2 ] = E [ 0 , 0 ] * R_11 + E [ 0 , 1 ] * R_12 + E [ 1 , 0 ] * R_12 . T + E [ 1 , 1 ] * R_22 E = F . T * np . matrix ( [ [ mu , 0 ] , [ 0 , M ] ] ) * F K [ 1 : : 2 , 1 : : 2 ] = E [ 0 , 0 ] * R_11 + E [ 0 , 1 ] * R_12 + E [ 1 , 0 ] * R_12 . T + E [ 1 , 1 ] * R_22 E = F . T * np . matrix ( [ [ 0 , mu ] , [ lame , 0 ] ] ) * F K [ 1 : : 2 , 0 : : 2 ] = E [ 0 , 0 ] * R_11 + E [ 0 , 1 ] * R_12 + E [ 1 , 0 ] * R_12 . T + E [ 1 , 1 ] * R_22 K [ 0 : : 2 , 1 : : 2 ] = K [ 1 : : 2 , 0 : : 2 ] . T K /= det ( F ) return K
Local stiffness matrix for two dimensional elasticity on a square element .
20,333
def p12d_local ( vertices , lame , mu ) : assert ( vertices . shape == ( 3 , 2 ) ) A = np . vstack ( ( np . ones ( ( 1 , 3 ) ) , vertices . T ) ) PhiGrad = inv ( A ) [ : , 1 : ] R = np . zeros ( ( 3 , 6 ) ) R [ [ [ 0 ] , [ 2 ] ] , [ 0 , 2 , 4 ] ] = PhiGrad . T R [ [ [ 2 ] , [ 1 ] ] , [ 1 , 3 , 5 ] ] = PhiGrad . T C = mu * np . array ( [ [ 2 , 0 , 0 ] , [ 0 , 2 , 0 ] , [ 0 , 0 , 1 ] ] ) + lame * np . array ( [ [ 1 , 1 , 0 ] , [ 1 , 1 , 0 ] , [ 0 , 0 , 0 ] ] ) K = det ( A ) / 2.0 * np . dot ( np . dot ( R . T , C ) , R ) return K
Local stiffness matrix for P1 elements in 2d .
20,334
def write_basic_mesh ( Verts , E2V = None , mesh_type = 'tri' , pdata = None , pvdata = None , cdata = None , cvdata = None , fname = 'output.vtk' ) : if E2V is None : mesh_type = 'vertex' map_type_to_key = { 'vertex' : 1 , 'tri' : 5 , 'quad' : 9 , 'tet' : 10 , 'hex' : 12 } if mesh_type not in map_type_to_key : raise ValueError ( 'unknown mesh_type=%s' % mesh_type ) key = map_type_to_key [ mesh_type ] if mesh_type == 'vertex' : uidx = np . arange ( 0 , Verts . shape [ 0 ] ) . reshape ( ( Verts . shape [ 0 ] , 1 ) ) E2V = { key : uidx } else : E2V = { key : E2V } if cdata is not None : cdata = { key : cdata } if cvdata is not None : cvdata = { key : cvdata } write_vtu ( Verts = Verts , Cells = E2V , pdata = pdata , pvdata = pvdata , cdata = cdata , cvdata = cvdata , fname = fname )
Write mesh file for basic types of elements .
20,335
def set_attributes ( d , elm ) : for key in d : elm . setAttribute ( key , d [ key ] )
Set attributes from dictionary of values .
20,336
def eliminate_local_candidates ( x , AggOp , A , T , Ca = 1.0 , ** kwargs ) : if not ( isspmatrix_csr ( AggOp ) or isspmatrix_csc ( AggOp ) ) : raise TypeError ( 'AggOp must be a CSR or CSC matrix' ) else : AggOp = AggOp . tocsc ( ) ndof = max ( x . shape ) nPDEs = int ( ndof / AggOp . shape [ 0 ] ) def aggregate_wise_inner_product ( z , AggOp , nPDEs , ndof ) : z = np . ravel ( z ) * np . ravel ( z ) innerp = np . zeros ( ( 1 , AggOp . shape [ 1 ] ) , dtype = z . dtype ) for j in range ( nPDEs ) : innerp += z [ slice ( j , ndof , nPDEs ) ] . reshape ( 1 , - 1 ) * AggOp return innerp . reshape ( - 1 , 1 ) def get_aggregate_weights ( AggOp , A , z , nPDEs , ndof ) : rho = approximate_spectral_radius ( A ) zAz = np . dot ( z . reshape ( 1 , - 1 ) , A * z . reshape ( - 1 , 1 ) ) card = nPDEs * ( AggOp . indptr [ 1 : ] - AggOp . indptr [ : - 1 ] ) weights = ( np . ravel ( card ) * zAz ) / ( A . shape [ 0 ] * rho ) return weights . reshape ( - 1 , 1 ) weights = Ca * get_aggregate_weights ( AggOp , A , x , nPDEs , ndof ) mask1 = aggregate_wise_inner_product ( x , AggOp , nPDEs , ndof ) <= weights projected_x = x - T * ( T . T * x ) mask2 = aggregate_wise_inner_product ( projected_x , AggOp , nPDEs , ndof ) <= weights mask = np . ravel ( mask1 + mask2 ) . nonzero ( ) [ 0 ] if mask . shape [ 0 ] > 0 : mask = nPDEs * AggOp [ : , mask ] . indices for j in range ( nPDEs ) : x [ mask + j ] = 0.0
Eliminate canidates locally .
20,337
def poisson ( grid , spacing = None , dtype = float , format = None , type = 'FD' ) : grid = tuple ( grid ) N = len ( grid ) if N < 1 or min ( grid ) < 1 : raise ValueError ( 'invalid grid shape: %s' % str ( grid ) ) if type == 'FD' : stencil = np . zeros ( ( 3 , ) * N , dtype = dtype ) for i in range ( N ) : stencil [ ( 1 , ) * i + ( 0 , ) + ( 1 , ) * ( N - i - 1 ) ] = - 1 stencil [ ( 1 , ) * i + ( 2 , ) + ( 1 , ) * ( N - i - 1 ) ] = - 1 stencil [ ( 1 , ) * N ] = 2 * N if type == 'FE' : stencil = - np . ones ( ( 3 , ) * N , dtype = dtype ) stencil [ ( 1 , ) * N ] = 3 ** N - 1 return stencil_grid ( stencil , grid , format = format )
Return a sparse matrix for the N - dimensional Poisson problem .
20,338
def norm ( x , pnorm = '2' ) : x = np . ravel ( x ) if pnorm == '2' : return np . sqrt ( np . inner ( x . conj ( ) , x ) . real ) elif pnorm == 'inf' : return np . max ( np . abs ( x ) ) else : raise ValueError ( 'Only the 2-norm and infinity-norm are supported' )
2 - norm of a vector .
20,339
def approximate_spectral_radius ( A , tol = 0.01 , maxiter = 15 , restart = 5 , symmetric = None , initial_guess = None , return_vector = False ) : if not hasattr ( A , 'rho' ) or return_vector : symmetric = False if maxiter < 1 : raise ValueError ( 'expected maxiter > 0' ) if restart < 0 : raise ValueError ( 'expected restart >= 0' ) if A . dtype == int : raise ValueError ( 'expected A to be float (complex or real)' ) if A . shape [ 0 ] != A . shape [ 1 ] : raise ValueError ( 'expected square A' ) if initial_guess is None : v0 = sp . rand ( A . shape [ 1 ] , 1 ) if A . dtype == complex : v0 = v0 + 1.0j * sp . rand ( A . shape [ 1 ] , 1 ) else : if initial_guess . shape [ 0 ] != A . shape [ 0 ] : raise ValueError ( 'initial_guess and A must have same shape' ) if ( len ( initial_guess . shape ) > 1 ) and ( initial_guess . shape [ 1 ] > 1 ) : raise ValueError ( 'initial_guess must be an (n,1) or\ (n,) vector' ) v0 = initial_guess . reshape ( - 1 , 1 ) v0 = np . array ( v0 , dtype = A . dtype ) for j in range ( restart + 1 ) : [ evect , ev , H , V , breakdown_flag ] = _approximate_eigenvalues ( A , tol , maxiter , symmetric , initial_guess = v0 ) nvecs = ev . shape [ 0 ] max_index = np . abs ( ev ) . argmax ( ) error = H [ nvecs , nvecs - 1 ] * evect [ - 1 , max_index ] if ( np . abs ( error ) / np . abs ( ev [ max_index ] ) < tol ) or breakdown_flag : v0 = np . dot ( np . hstack ( V [ : - 1 ] ) , evect [ : , max_index ] . reshape ( - 1 , 1 ) ) break else : v0 = np . dot ( np . hstack ( V [ : - 1 ] ) , evect [ : , max_index ] . reshape ( - 1 , 1 ) ) rho = np . abs ( ev [ max_index ] ) if sparse . isspmatrix ( A ) : A . rho = rho if return_vector : return ( rho , v0 ) else : return rho else : return A . rho
Approximate the spectral radius of a matrix .
20,340
def condest ( A , tol = 0.1 , maxiter = 25 , symmetric = False ) : r [ evect , ev , H , V , breakdown_flag ] = _approximate_eigenvalues ( A , tol , maxiter , symmetric ) return np . max ( [ norm ( x ) for x in ev ] ) / min ( [ norm ( x ) for x in ev ] )
r Estimates the condition number of A .
20,341
def cond ( A ) : if A . shape [ 0 ] != A . shape [ 1 ] : raise ValueError ( 'expected square matrix' ) if sparse . isspmatrix ( A ) : A = A . todense ( ) from scipy . linalg import svd U , Sigma , Vh = svd ( A ) return np . max ( Sigma ) / min ( Sigma )
Return condition number of A .
20,342
def ishermitian ( A , fast_check = True , tol = 1e-6 , verbose = False ) : r if not sparse . isspmatrix ( A ) : A = np . asmatrix ( A ) if fast_check : x = sp . rand ( A . shape [ 0 ] , 1 ) y = sp . rand ( A . shape [ 0 ] , 1 ) if A . dtype == complex : x = x + 1.0j * sp . rand ( A . shape [ 0 ] , 1 ) y = y + 1.0j * sp . rand ( A . shape [ 0 ] , 1 ) xAy = np . dot ( ( A * x ) . conjugate ( ) . T , y ) xAty = np . dot ( x . conjugate ( ) . T , A * y ) diff = float ( np . abs ( xAy - xAty ) / np . sqrt ( np . abs ( xAy * xAty ) ) ) else : if sparse . isspmatrix ( A ) : diff = np . ravel ( ( A - A . H ) . data ) else : diff = np . ravel ( A - A . H ) if np . max ( diff . shape ) == 0 : diff = 0 else : diff = np . max ( np . abs ( diff ) ) if diff < tol : diff = 0 return True else : if verbose : print ( diff ) return False return diff
r Return True if A is Hermitian to within tol .
20,343
def pinv_array ( a , cond = None ) : n = a . shape [ 0 ] m = a . shape [ 1 ] if m == 1 : zero_entries = ( a == 0.0 ) . nonzero ( ) [ 0 ] a [ zero_entries ] = 1.0 a [ : ] = 1.0 / a a [ zero_entries ] = 0.0 del zero_entries else : gelss , gelss_lwork = get_lapack_funcs ( ( 'gelss' , 'gelss_lwork' ) , ( np . ones ( ( 1 , ) , dtype = a . dtype ) ) ) RHS = np . eye ( m , dtype = a . dtype ) lwork = _compute_lwork ( gelss_lwork , m , m , m ) if cond is None : t = a . dtype . char eps = np . finfo ( np . float ) . eps feps = np . finfo ( np . single ) . eps geps = np . finfo ( np . longfloat ) . eps _array_precision = { 'f' : 0 , 'd' : 1 , 'g' : 2 , 'F' : 0 , 'D' : 1 , 'G' : 2 } cond = { 0 : feps * 1e3 , 1 : eps * 1e6 , 2 : geps * 1e6 } [ _array_precision [ t ] ] for kk in range ( n ) : gelssoutput = gelss ( a [ kk ] , RHS , cond = cond , lwork = lwork , overwrite_a = True , overwrite_b = False ) a [ kk ] = gelssoutput [ 1 ]
Calculate the Moore - Penrose pseudo inverse of each block of the three dimensional array a .
20,344
def distance_strength_of_connection ( A , V , theta = 2.0 , relative_drop = True ) : if sparse . isspmatrix_bsr ( A ) : sn = int ( A . shape [ 0 ] / A . blocksize [ 0 ] ) u = np . ones ( ( A . data . shape [ 0 ] , ) ) A = sparse . csr_matrix ( ( u , A . indices , A . indptr ) , shape = ( sn , sn ) ) if not sparse . isspmatrix_csr ( A ) : warn ( "Implicit conversion of A to csr" , sparse . SparseEfficiencyWarning ) A = sparse . csr_matrix ( A ) dim = V . shape [ 1 ] cols = A . indices rows = np . repeat ( np . arange ( A . shape [ 0 ] ) , A . indptr [ 1 : ] - A . indptr [ 0 : - 1 ] ) C = ( V [ rows , 0 ] - V [ cols , 0 ] ) ** 2 for d in range ( 1 , dim ) : C += ( V [ rows , d ] - V [ cols , d ] ) ** 2 C = np . sqrt ( C ) C [ C < 1e-6 ] = 1e-6 C = sparse . csr_matrix ( ( C , A . indices . copy ( ) , A . indptr . copy ( ) ) , shape = A . shape ) if relative_drop is True : if theta != np . inf : amg_core . apply_distance_filter ( C . shape [ 0 ] , theta , C . indptr , C . indices , C . data ) else : amg_core . apply_absolute_distance_filter ( C . shape [ 0 ] , theta , C . indptr , C . indices , C . data ) C . eliminate_zeros ( ) C = C + sparse . eye ( C . shape [ 0 ] , C . shape [ 1 ] , format = 'csr' ) C . data = 1.0 / C . data C = scale_rows_by_largest_entry ( C ) return C
Distance based strength - of - connection .
20,345
def classical_strength_of_connection ( A , theta = 0.0 , norm = 'abs' ) : if sparse . isspmatrix_bsr ( A ) : blocksize = A . blocksize [ 0 ] else : blocksize = 1 if not sparse . isspmatrix_csr ( A ) : warn ( "Implicit conversion of A to csr" , sparse . SparseEfficiencyWarning ) A = sparse . csr_matrix ( A ) if ( theta < 0 or theta > 1 ) : raise ValueError ( 'expected theta in [0,1]' ) Sp = np . empty_like ( A . indptr ) Sj = np . empty_like ( A . indices ) Sx = np . empty_like ( A . data ) if norm == 'abs' : amg_core . classical_strength_of_connection_abs ( A . shape [ 0 ] , theta , A . indptr , A . indices , A . data , Sp , Sj , Sx ) elif norm == 'min' : amg_core . classical_strength_of_connection_min ( A . shape [ 0 ] , theta , A . indptr , A . indices , A . data , Sp , Sj , Sx ) else : raise ValueError ( 'Unknown norm' ) S = sparse . csr_matrix ( ( Sx , Sj , Sp ) , shape = A . shape ) if blocksize > 1 : S = amalgamate ( S , blocksize ) S . data = np . abs ( S . data ) S = scale_rows_by_largest_entry ( S ) return S
Classical Strength Measure .
20,346
def symmetric_strength_of_connection ( A , theta = 0 ) : if theta < 0 : raise ValueError ( 'expected a positive theta' ) if sparse . isspmatrix_csr ( A ) : Sp = np . empty_like ( A . indptr ) Sj = np . empty_like ( A . indices ) Sx = np . empty_like ( A . data ) fn = amg_core . symmetric_strength_of_connection fn ( A . shape [ 0 ] , theta , A . indptr , A . indices , A . data , Sp , Sj , Sx ) S = sparse . csr_matrix ( ( Sx , Sj , Sp ) , shape = A . shape ) elif sparse . isspmatrix_bsr ( A ) : M , N = A . shape R , C = A . blocksize if R != C : raise ValueError ( 'matrix must have square blocks' ) if theta == 0 : data = np . ones ( len ( A . indices ) , dtype = A . dtype ) S = sparse . csr_matrix ( ( data , A . indices . copy ( ) , A . indptr . copy ( ) ) , shape = ( int ( M / R ) , int ( N / C ) ) ) else : data = ( np . conjugate ( A . data ) * A . data ) . reshape ( - 1 , R * C ) data = data . sum ( axis = 1 ) A = sparse . csr_matrix ( ( data , A . indices , A . indptr ) , shape = ( int ( M / R ) , int ( N / C ) ) ) return symmetric_strength_of_connection ( A , theta ) else : raise TypeError ( 'expected csr_matrix or bsr_matrix' ) S . data = np . abs ( S . data ) S = scale_rows_by_largest_entry ( S ) return S
Symmetric Strength Measure .
20,347
def relaxation_vectors ( A , R , k , alpha ) : n = A . shape [ 0 ] x = np . random . rand ( n * R ) - 0.5 x = np . reshape ( x , ( n , R ) , order = 'F' ) b = np . zeros ( ( n , 1 ) ) for r in range ( 0 , R ) : jacobi ( A , x [ : , r ] , b , iterations = k , omega = alpha ) return x
Generate test vectors by relaxing on Ax = 0 for some random vectors x .
20,348
def affinity_distance ( A , alpha = 0.5 , R = 5 , k = 20 , epsilon = 4.0 ) : if not sparse . isspmatrix_csr ( A ) : A = sparse . csr_matrix ( A ) if alpha < 0 : raise ValueError ( 'expected alpha>0' ) if R <= 0 or not isinstance ( R , int ) : raise ValueError ( 'expected integer R>0' ) if k <= 0 or not isinstance ( k , int ) : raise ValueError ( 'expected integer k>0' ) if epsilon < 1 : raise ValueError ( 'expected epsilon>1.0' ) def distance ( x ) : ( rows , cols ) = A . nonzero ( ) return 1 - np . sum ( x [ rows ] * x [ cols ] , axis = 1 ) ** 2 / ( np . sum ( x [ rows ] ** 2 , axis = 1 ) * np . sum ( x [ cols ] ** 2 , axis = 1 ) ) return distance_measure_common ( A , distance , alpha , R , k , epsilon )
Affinity Distance Strength Measure .
20,349
def algebraic_distance ( A , alpha = 0.5 , R = 5 , k = 20 , epsilon = 2.0 , p = 2 ) : if not sparse . isspmatrix_csr ( A ) : A = sparse . csr_matrix ( A ) if alpha < 0 : raise ValueError ( 'expected alpha>0' ) if R <= 0 or not isinstance ( R , int ) : raise ValueError ( 'expected integer R>0' ) if k <= 0 or not isinstance ( k , int ) : raise ValueError ( 'expected integer k>0' ) if epsilon < 1 : raise ValueError ( 'expected epsilon>1.0' ) if p < 1 : raise ValueError ( 'expected p>1 or equal to numpy.inf' ) def distance ( x ) : ( rows , cols ) = A . nonzero ( ) if p != np . inf : avg = np . sum ( np . abs ( x [ rows ] - x [ cols ] ) ** p , axis = 1 ) / R return ( avg ) ** ( 1.0 / p ) else : return np . abs ( x [ rows ] - x [ cols ] ) . max ( axis = 1 ) return distance_measure_common ( A , distance , alpha , R , k , epsilon )
Algebraic Distance Strength Measure .
20,350
def distance_measure_common ( A , func , alpha , R , k , epsilon ) : x = relaxation_vectors ( A , R , k , alpha ) d = func ( x ) ( rows , cols ) = A . nonzero ( ) weak = np . where ( rows == cols ) [ 0 ] d [ weak ] = 0 C = sparse . csr_matrix ( ( d , ( rows , cols ) ) , shape = A . shape ) C . eliminate_zeros ( ) amg_core . apply_distance_filter ( C . shape [ 0 ] , epsilon , C . indptr , C . indices , C . data ) C . eliminate_zeros ( ) C . data = 1.0 / C . data C = C + sparse . eye ( C . shape [ 0 ] , C . shape [ 1 ] , format = 'csr' ) C = scale_rows_by_largest_entry ( C ) return C
Create strength of connection matrixfrom a function applied to relaxation vectors .
20,351
def jacobi_prolongation_smoother ( S , T , C , B , omega = 4.0 / 3.0 , degree = 1 , filter = False , weighting = 'diagonal' ) : if weighting == 'block' : if sparse . isspmatrix_csr ( S ) : weighting = 'diagonal' elif sparse . isspmatrix_bsr ( S ) : if S . blocksize [ 0 ] == 1 : weighting = 'diagonal' if filter : if sparse . isspmatrix_bsr ( S ) : numPDEs = S . blocksize [ 0 ] else : numPDEs = 1 C = UnAmal ( C , numPDEs , numPDEs ) S = S . multiply ( C ) S . eliminate_zeros ( ) if weighting == 'diagonal' : D_inv = get_diagonal ( S , inv = True ) D_inv_S = scale_rows ( S , D_inv , copy = True ) D_inv_S = ( omega / approximate_spectral_radius ( D_inv_S ) ) * D_inv_S elif weighting == 'block' : D_inv = get_block_diag ( S , blocksize = S . blocksize [ 0 ] , inv_flag = True ) D_inv = sparse . bsr_matrix ( ( D_inv , np . arange ( D_inv . shape [ 0 ] ) , np . arange ( D_inv . shape [ 0 ] + 1 ) ) , shape = S . shape ) D_inv_S = D_inv * S D_inv_S = ( omega / approximate_spectral_radius ( D_inv_S ) ) * D_inv_S elif weighting == 'local' : D = np . abs ( S ) * np . ones ( ( S . shape [ 0 ] , 1 ) , dtype = S . dtype ) D_inv = np . zeros_like ( D ) D_inv [ D != 0 ] = 1.0 / np . abs ( D [ D != 0 ] ) D_inv_S = scale_rows ( S , D_inv , copy = True ) D_inv_S = omega * D_inv_S else : raise ValueError ( 'Incorrect weighting option' ) if filter : P = T for i in range ( degree ) : U = ( D_inv_S * P ) . tobsr ( blocksize = P . blocksize ) BtBinv = compute_BtBinv ( B , U ) Satisfy_Constraints ( U , B , BtBinv ) P = P - U else : P = T for i in range ( degree ) : P = P - ( D_inv_S * P ) return P
Jacobi prolongation smoother .
20,352
def richardson_prolongation_smoother ( S , T , omega = 4.0 / 3.0 , degree = 1 ) : weight = omega / approximate_spectral_radius ( S ) P = T for i in range ( degree ) : P = P - weight * ( S * P ) return P
Richardson prolongation smoother .
20,353
def matrix_asformat ( lvl , name , format , blocksize = None ) : desired_matrix = name + format M = getattr ( lvl , name ) if format == 'bsr' : desired_matrix += str ( blocksize [ 0 ] ) + str ( blocksize [ 1 ] ) if hasattr ( lvl , desired_matrix ) : pass elif M . format == format and format != 'bsr' : setattr ( lvl , desired_matrix , M ) elif M . format == format and format == 'bsr' : setattr ( lvl , desired_matrix , M . tobsr ( blocksize = blocksize ) ) else : newM = getattr ( M , 'to' + format ) ( ) setattr ( lvl , desired_matrix , newM ) return getattr ( lvl , desired_matrix )
Set a matrix to a specific format .
20,354
def regular_triangle_mesh ( nx , ny ) : nx , ny = int ( nx ) , int ( ny ) if nx < 2 or ny < 2 : raise ValueError ( 'minimum mesh dimension is 2: %s' % ( ( nx , ny ) , ) ) Vert1 = np . tile ( np . arange ( 0 , nx - 1 ) , ny - 1 ) + np . repeat ( np . arange ( 0 , nx * ( ny - 1 ) , nx ) , nx - 1 ) Vert3 = np . tile ( np . arange ( 0 , nx - 1 ) , ny - 1 ) + np . repeat ( np . arange ( 0 , nx * ( ny - 1 ) , nx ) , nx - 1 ) + nx Vert2 = Vert3 + 1 Vert4 = Vert1 + 1 Verttmp = np . meshgrid ( np . arange ( 0 , nx , dtype = 'float' ) , np . arange ( 0 , ny , dtype = 'float' ) ) Verttmp = ( Verttmp [ 0 ] . ravel ( ) , Verttmp [ 1 ] . ravel ( ) ) Vert = np . vstack ( Verttmp ) . transpose ( ) Vert [ : , 0 ] = ( 1.0 / ( nx - 1 ) ) * Vert [ : , 0 ] Vert [ : , 1 ] = ( 1.0 / ( ny - 1 ) ) * Vert [ : , 1 ] E2V1 = np . vstack ( ( Vert1 , Vert2 , Vert3 ) ) . transpose ( ) E2V2 = np . vstack ( ( Vert1 , Vert4 , Vert2 ) ) . transpose ( ) E2V = np . vstack ( ( E2V1 , E2V2 ) ) return Vert , E2V
Construct a regular triangular mesh in the unit square .
20,355
def check_input ( Verts = None , E2V = None , Agg = None , A = None , splitting = None , mesh_type = None ) : if Verts is not None : if not np . issubdtype ( Verts . dtype , np . floating ) : raise ValueError ( 'Verts should be of type float' ) if E2V is not None : if not np . issubdtype ( E2V . dtype , np . integer ) : raise ValueError ( 'E2V should be of type integer' ) if E2V . min ( ) != 0 : warnings . warn ( 'element indices begin at %d' % E2V . min ( ) ) if Agg is not None : if Agg . shape [ 1 ] > Agg . shape [ 0 ] : raise ValueError ( 'Agg should be of size Npts x Nagg' ) if A is not None : if Agg is not None : if ( A . shape [ 0 ] != A . shape [ 1 ] ) or ( A . shape [ 0 ] != Agg . shape [ 0 ] ) : raise ValueError ( 'expected square matrix A\ and compatible with Agg' ) else : raise ValueError ( 'problem with check_input' ) if splitting is not None : splitting = splitting . ravel ( ) if Verts is not None : if ( len ( splitting ) % Verts . shape [ 0 ] ) != 0 : raise ValueError ( 'splitting must be a multiple of N' ) else : raise ValueError ( 'problem with check_input' ) if mesh_type is not None : valid_mesh_types = ( 'vertex' , 'tri' , 'quad' , 'tet' , 'hex' ) if mesh_type not in valid_mesh_types : raise ValueError ( 'mesh_type should be %s' % ' or ' . join ( valid_mesh_types ) )
Check input for local functions .
20,356
def MIS ( G , weights , maxiter = None ) : if not isspmatrix_csr ( G ) : raise TypeError ( 'expected csr_matrix' ) G = remove_diagonal ( G ) mis = np . empty ( G . shape [ 0 ] , dtype = 'intc' ) mis [ : ] = - 1 fn = amg_core . maximal_independent_set_parallel if maxiter is None : fn ( G . shape [ 0 ] , G . indptr , G . indices , - 1 , 1 , 0 , mis , weights , - 1 ) else : if maxiter < 0 : raise ValueError ( 'maxiter must be >= 0' ) fn ( G . shape [ 0 ] , G . indptr , G . indices , - 1 , 1 , 0 , mis , weights , maxiter ) return mis
Compute a maximal independent set of a graph in parallel .
20,357
def preprocess ( S , coloring_method = None ) : if not isspmatrix_csr ( S ) : raise TypeError ( 'expected csr_matrix' ) if S . shape [ 0 ] != S . shape [ 1 ] : raise ValueError ( 'expected square matrix, shape=%s' % ( S . shape , ) ) N = S . shape [ 0 ] S = csr_matrix ( ( np . ones ( S . nnz , dtype = 'int8' ) , S . indices , S . indptr ) , shape = ( N , N ) ) T = S . T . tocsr ( ) G = S + T G . data [ : ] = 1 weights = np . ravel ( T . sum ( axis = 1 ) ) if coloring_method is None : weights = weights + sp . rand ( len ( weights ) ) else : coloring = vertex_coloring ( G , coloring_method ) num_colors = coloring . max ( ) + 1 weights = weights + ( sp . rand ( len ( weights ) ) + coloring ) / num_colors return ( weights , G , S , T )
Preprocess splitting functions .
20,358
def load_example ( name ) : if name not in example_names : raise ValueError ( 'no example with name (%s)' % name ) else : return loadmat ( os . path . join ( example_dir , name + '.mat' ) , struct_as_record = True )
Load an example problem by name .
20,359
def stencil_grid ( S , grid , dtype = None , format = None ) : S = np . asarray ( S , dtype = dtype ) grid = tuple ( grid ) if not ( np . asarray ( S . shape ) % 2 == 1 ) . all ( ) : raise ValueError ( 'all stencil dimensions must be odd' ) if len ( grid ) != np . ndim ( S ) : raise ValueError ( 'stencil dimension must equal number of grid\ dimensions' ) if min ( grid ) < 1 : raise ValueError ( 'grid dimensions must be positive' ) N_v = np . prod ( grid ) N_s = ( S != 0 ) . sum ( ) diags = np . zeros ( N_s , dtype = int ) strides = np . cumprod ( [ 1 ] + list ( reversed ( grid ) ) ) [ : - 1 ] indices = tuple ( i . copy ( ) for i in S . nonzero ( ) ) for i , s in zip ( indices , S . shape ) : i -= s // 2 for stride , coords in zip ( strides , reversed ( indices ) ) : diags += stride * coords data = S [ S != 0 ] . repeat ( N_v ) . reshape ( N_s , N_v ) indices = np . vstack ( indices ) . T for index , diag in zip ( indices , data ) : diag = diag . reshape ( grid ) for n , i in enumerate ( index ) : if i > 0 : s = [ slice ( None ) ] * len ( grid ) s [ n ] = slice ( 0 , i ) s = tuple ( s ) diag [ s ] = 0 elif i < 0 : s = [ slice ( None ) ] * len ( grid ) s [ n ] = slice ( i , None ) s = tuple ( s ) diag [ s ] = 0 mask = abs ( diags ) < N_v if not mask . all ( ) : diags = diags [ mask ] data = data [ mask ] if len ( np . unique ( diags ) ) != len ( diags ) : new_diags = np . unique ( diags ) new_data = np . zeros ( ( len ( new_diags ) , data . shape [ 1 ] ) , dtype = data . dtype ) for dia , dat in zip ( diags , data ) : n = np . searchsorted ( new_diags , dia ) new_data [ n , : ] += dat diags = new_diags data = new_data return sparse . dia_matrix ( ( data , diags ) , shape = ( N_v , N_v ) ) . asformat ( format )
Construct a sparse matrix form a local matrix stencil .
20,360
def _CRsweep ( A , B , Findex , Cindex , nu , thetacr , method ) : n = A . shape [ 0 ] numax = nu z = np . zeros ( ( n , ) ) e = deepcopy ( B [ : , 0 ] ) e [ Cindex ] = 0.0 enorm = norm ( e ) rhok = 1 it = 0 while True : if method == 'habituated' : gauss_seidel ( A , e , z , iterations = 1 ) e [ Cindex ] = 0.0 elif method == 'concurrent' : gauss_seidel_indexed ( A , e , z , indices = Findex , iterations = 1 ) else : raise NotImplementedError ( 'method not recognized: need habituated ' 'or concurrent' ) enorm_old = enorm enorm = norm ( e ) rhok_old = rhok rhok = enorm / enorm_old it += 1 if rhok < 0.1 * thetacr : break elif ( ( abs ( rhok - rhok_old ) / rhok ) < 0.1 ) and ( it >= nu ) : break return rhok , e
Perform CR sweeps on a target vector .
20,361
def binormalize ( A , tol = 1e-5 , maxiter = 10 ) : if not isspmatrix ( A ) : raise TypeError ( 'expecting sparse matrix A' ) if A . dtype == complex : raise NotImplementedError ( 'complex A not implemented' ) n = A . shape [ 0 ] it = 0 x = np . ones ( ( n , 1 ) ) . ravel ( ) B = A . multiply ( A ) . tocsc ( ) d = B . diagonal ( ) . ravel ( ) beta = B * x betabar = ( 1.0 / n ) * np . dot ( x , beta ) stdev = rowsum_stdev ( x , beta ) while stdev > tol and it < maxiter : for i in range ( 0 , n ) : c2 = ( n - 1 ) * d [ i ] c1 = ( n - 2 ) * ( beta [ i ] - d [ i ] * x [ i ] ) c0 = - d [ i ] * x [ i ] * x [ i ] + 2 * beta [ i ] * x [ i ] - n * betabar if ( - c0 < 1e-14 ) : print ( 'warning: A nearly un-binormalizable...' ) return A else : xnew = ( 2 * c0 ) / ( - c1 - np . sqrt ( c1 * c1 - 4 * c0 * c2 ) ) dx = xnew - x [ i ] ii = B . indptr [ i ] iii = B . indptr [ i + 1 ] dot_Bcol = np . dot ( x [ B . indices [ ii : iii ] ] , B . data [ ii : iii ] ) betabar = betabar + ( 1.0 / n ) * dx * ( dot_Bcol + beta [ i ] + d [ i ] * dx ) beta [ B . indices [ ii : iii ] ] += dx * B . data [ ii : iii ] x [ i ] = xnew stdev = rowsum_stdev ( x , beta ) it += 1 d = np . sqrt ( x ) D = spdiags ( d . ravel ( ) , [ 0 ] , n , n ) C = D * A * D C = C . tocsr ( ) beta = C . multiply ( C ) . sum ( axis = 1 ) scale = np . sqrt ( ( 1.0 / n ) * np . sum ( beta ) ) return ( 1 / scale ) * C
Binormalize matrix A . Attempt to create unit l_1 norm rows .
20,362
def rowsum_stdev ( x , beta ) : r n = x . size betabar = ( 1.0 / n ) * np . dot ( x , beta ) stdev = np . sqrt ( ( 1.0 / n ) * np . sum ( np . power ( np . multiply ( x , beta ) - betabar , 2 ) ) ) return stdev / betabar
r Compute row sum standard deviation .
20,363
def mls_polynomial_coefficients ( rho , degree ) : roots = rho / 2.0 * ( 1.0 - np . cos ( 2 * np . pi * ( np . arange ( degree , dtype = 'float64' ) + 1 ) / ( 2.0 * degree + 1.0 ) ) ) roots = 1.0 / roots S = np . poly ( roots ) [ : : - 1 ] SSA_max = rho / ( ( 2.0 * degree + 1.0 ) ** 2 ) S_hat = np . polymul ( S , S ) S_hat = np . hstack ( ( ( - 1.0 / SSA_max ) * S_hat , [ 1 ] ) ) coeffs = np . polymul ( S_hat , S ) coeffs = - coeffs [ : - 1 ] return ( coeffs , roots )
Determine the coefficients for a MLS polynomial smoother .
20,364
def steepest_descent ( A , b , x0 = None , tol = 1e-5 , maxiter = None , xtype = None , M = None , callback = None , residuals = None ) : A , M , x , b , postprocess = make_system ( A , M , x0 , b ) import warnings warnings . filterwarnings ( 'always' , module = 'pyamg\.krylov\._steepest_descent' ) if maxiter is None : maxiter = int ( len ( b ) ) elif maxiter < 1 : raise ValueError ( 'Number of iterations must be positive' ) r = b - A * x z = M * r rz = np . inner ( r . conjugate ( ) , z ) normr = np . sqrt ( rz ) if residuals is not None : residuals [ : ] = [ normr ] normb = norm ( b ) if normb == 0.0 : normb = 1.0 if normr < tol * normb : return ( postprocess ( x ) , 0 ) if normr != 0.0 : tol = tol * normr recompute_r = 50 iter = 0 while True : iter = iter + 1 q = A * z zAz = np . inner ( z . conjugate ( ) , q ) if zAz < 0.0 : warn ( "\nIndefinite matrix detected in steepest descent,\ aborting\n" ) return ( postprocess ( x ) , - 1 ) alpha = rz / zAz x = x + alpha * z if np . mod ( iter , recompute_r ) and iter > 0 : r = b - A * x else : r = r - alpha * q z = M * r rz = np . inner ( r . conjugate ( ) , z ) if rz < 0.0 : warn ( "\nIndefinite preconditioner detected in steepest descent,\ aborting\n" ) return ( postprocess ( x ) , - 1 ) normr = np . sqrt ( rz ) if residuals is not None : residuals . append ( normr ) if callback is not None : callback ( x ) if normr < tol : return ( postprocess ( x ) , 0 ) elif rz == 0.0 : warn ( "\nSingular preconditioner detected in steepest descent,\ ceasing iterations\n" ) return ( postprocess ( x ) , - 1 ) if iter == maxiter : return ( postprocess ( x ) , iter )
Steepest descent algorithm .
20,365
def demo ( ) : A = poisson ( ( 100 , 100 ) , format = 'csr' ) B = None b = sp . rand ( A . shape [ 0 ] , 1 ) mls = smoothed_aggregation_solver ( A , B = B ) print ( mls ) standalone_residuals = [ ] x = mls . solve ( b , tol = 1e-10 , accel = None , residuals = standalone_residuals ) accelerated_residuals = [ ] x = mls . solve ( b , tol = 1e-10 , accel = 'cg' , residuals = accelerated_residuals ) del x standalone_residuals = np . array ( standalone_residuals ) / standalone_residuals [ 0 ] accelerated_residuals = np . array ( accelerated_residuals ) / accelerated_residuals [ 0 ] factor1 = standalone_residuals [ - 1 ] ** ( 1.0 / len ( standalone_residuals ) ) factor2 = accelerated_residuals [ - 1 ] ** ( 1.0 / len ( accelerated_residuals ) ) print ( " MG convergence factor: %g" % ( factor1 ) ) print ( "MG with CG acceleration convergence factor: %g" % ( factor2 ) ) try : import matplotlib . pyplot as plt plt . figure ( ) plt . title ( 'Convergence History' ) plt . xlabel ( 'Iteration' ) plt . ylabel ( 'Relative Residual' ) plt . semilogy ( standalone_residuals , label = 'Standalone' , linestyle = '-' , marker = 'o' ) plt . semilogy ( accelerated_residuals , label = 'Accelerated' , linestyle = '-' , marker = 's' ) plt . legend ( ) plt . show ( ) except ImportError : print ( "\n\nNote: pylab not available on your system." )
Outline basic demo .
20,366
def lloyd_aggregation ( C , ratio = 0.03 , distance = 'unit' , maxiter = 10 ) : if ratio <= 0 or ratio > 1 : raise ValueError ( 'ratio must be > 0.0 and <= 1.0' ) if not ( isspmatrix_csr ( C ) or isspmatrix_csc ( C ) ) : raise TypeError ( 'expected csr_matrix or csc_matrix' ) if distance == 'unit' : data = np . ones_like ( C . data ) . astype ( float ) elif distance == 'abs' : data = abs ( C . data ) elif distance == 'inv' : data = 1.0 / abs ( C . data ) elif distance is 'same' : data = C . data elif distance is 'min' : data = C . data - C . data . min ( ) else : raise ValueError ( 'unrecognized value distance=%s' % distance ) if C . dtype == complex : data = np . real ( data ) assert ( data . min ( ) >= 0 ) G = C . __class__ ( ( data , C . indices , C . indptr ) , shape = C . shape ) num_seeds = int ( min ( max ( ratio * G . shape [ 0 ] , 1 ) , G . shape [ 0 ] ) ) distances , clusters , seeds = lloyd_cluster ( G , num_seeds , maxiter = maxiter ) row = ( clusters >= 0 ) . nonzero ( ) [ 0 ] col = clusters [ row ] data = np . ones ( len ( row ) , dtype = 'int8' ) AggOp = coo_matrix ( ( data , ( row , col ) ) , shape = ( G . shape [ 0 ] , num_seeds ) ) . tocsr ( ) return AggOp , seeds
Aggregate nodes using Lloyd Clustering .
20,367
def cr ( A , b , x0 = None , tol = 1e-5 , maxiter = None , xtype = None , M = None , callback = None , residuals = None ) : A , M , x , b , postprocess = make_system ( A , M , x0 , b ) import warnings warnings . filterwarnings ( 'always' , module = 'pyamg\.krylov\._cr' ) if maxiter is None : maxiter = int ( 1.3 * len ( b ) ) + 2 elif maxiter < 1 : raise ValueError ( 'Number of iterations must be positive' ) r = b - A * x z = M * r p = z . copy ( ) zz = np . inner ( z . conjugate ( ) , z ) normr = np . sqrt ( zz ) if residuals is not None : residuals [ : ] = [ normr ] normb = norm ( b ) if normb == 0.0 : normb = 1.0 if normr < tol * normb : return ( postprocess ( x ) , 0 ) if normr != 0.0 : tol = tol * normr recompute_r = 8 iter = 0 Az = A * z rAz = np . inner ( r . conjugate ( ) , Az ) Ap = A * p while True : rAz_old = rAz alpha = rAz / np . inner ( Ap . conjugate ( ) , Ap ) x += alpha * p if np . mod ( iter , recompute_r ) and iter > 0 : r -= alpha * Ap else : r = b - A * x z = M * r Az = A * z rAz = np . inner ( r . conjugate ( ) , Az ) beta = rAz / rAz_old p *= beta p += z Ap *= beta Ap += Az iter += 1 zz = np . inner ( z . conjugate ( ) , z ) normr = np . sqrt ( zz ) if residuals is not None : residuals . append ( normr ) if callback is not None : callback ( x ) if normr < tol : return ( postprocess ( x ) , 0 ) elif zz == 0.0 : warn ( "\nSingular preconditioner detected in CR, ceasing \ iterations\n" ) return ( postprocess ( x ) , - 1 ) if iter == maxiter : return ( postprocess ( x ) , iter )
Conjugate Residual algorithm .
20,368
def BSR_Get_Row ( A , i ) : blocksize = A . blocksize [ 0 ] BlockIndx = int ( i / blocksize ) rowstart = A . indptr [ BlockIndx ] rowend = A . indptr [ BlockIndx + 1 ] localRowIndx = i % blocksize indys = A . data [ rowstart : rowend , localRowIndx , : ] . nonzero ( ) z = A . data [ rowstart : rowend , localRowIndx , : ] [ indys [ 0 ] , indys [ 1 ] ] colindx = np . zeros ( ( 1 , z . __len__ ( ) ) , dtype = np . int32 ) counter = 0 for j in range ( rowstart , rowend ) : coloffset = blocksize * A . indices [ j ] indys = A . data [ j , localRowIndx , : ] . nonzero ( ) [ 0 ] increment = indys . shape [ 0 ] colindx [ 0 , counter : ( counter + increment ) ] = coloffset + indys counter += increment return np . mat ( z ) . T , colindx [ 0 , : ]
Return row i in BSR matrix A .
20,369
def BSR_Row_WriteScalar ( A , i , x ) : blocksize = A . blocksize [ 0 ] BlockIndx = int ( i / blocksize ) rowstart = A . indptr [ BlockIndx ] rowend = A . indptr [ BlockIndx + 1 ] localRowIndx = i % blocksize indys = A . data [ rowstart : rowend , localRowIndx , : ] . nonzero ( ) A . data [ rowstart : rowend , localRowIndx , : ] [ indys [ 0 ] , indys [ 1 ] ] = x
Write a scalar at each nonzero location in row i of BSR matrix A .
20,370
def BSR_Row_WriteVect ( A , i , x ) : blocksize = A . blocksize [ 0 ] BlockIndx = int ( i / blocksize ) rowstart = A . indptr [ BlockIndx ] rowend = A . indptr [ BlockIndx + 1 ] localRowIndx = i % blocksize x = x . __array__ ( ) . reshape ( ( max ( x . shape ) , ) ) indys = A . data [ rowstart : rowend , localRowIndx , : ] . nonzero ( ) A . data [ rowstart : rowend , localRowIndx , : ] [ indys [ 0 ] , indys [ 1 ] ] = x
Overwrite the nonzeros in row i of BSR matrix A with the vector x .
20,371
def direct_interpolation ( A , C , splitting ) : if not isspmatrix_csr ( A ) : raise TypeError ( 'expected csr_matrix for A' ) if not isspmatrix_csr ( C ) : raise TypeError ( 'expected csr_matrix for C' ) C = C . copy ( ) C . data [ : ] = 1.0 C = C . multiply ( A ) Pp = np . empty_like ( A . indptr ) amg_core . rs_direct_interpolation_pass1 ( A . shape [ 0 ] , C . indptr , C . indices , splitting , Pp ) nnz = Pp [ - 1 ] Pj = np . empty ( nnz , dtype = Pp . dtype ) Px = np . empty ( nnz , dtype = A . dtype ) amg_core . rs_direct_interpolation_pass2 ( A . shape [ 0 ] , A . indptr , A . indices , A . data , C . indptr , C . indices , C . data , splitting , Pp , Pj , Px ) return csr_matrix ( ( Px , Pj , Pp ) )
Create prolongator using direct interpolation .
20,372
def apply_givens ( Q , v , k ) : for j in range ( k ) : Qloc = Q [ j ] v [ j : j + 2 ] = np . dot ( Qloc , v [ j : j + 2 ] )
Apply the first k Givens rotations in Q to v .
20,373
def diffusion_stencil_2d ( epsilon = 1.0 , theta = 0.0 , type = 'FE' ) : eps = float ( epsilon ) theta = float ( theta ) C = np . cos ( theta ) S = np . sin ( theta ) CS = C * S CC = C ** 2 SS = S ** 2 if ( type == 'FE' ) : a = ( - 1 * eps - 1 ) * CC + ( - 1 * eps - 1 ) * SS + ( 3 * eps - 3 ) * CS b = ( 2 * eps - 4 ) * CC + ( - 4 * eps + 2 ) * SS c = ( - 1 * eps - 1 ) * CC + ( - 1 * eps - 1 ) * SS + ( - 3 * eps + 3 ) * CS d = ( - 4 * eps + 2 ) * CC + ( 2 * eps - 4 ) * SS e = ( 8 * eps + 8 ) * CC + ( 8 * eps + 8 ) * SS stencil = np . array ( [ [ a , b , c ] , [ d , e , d ] , [ c , b , a ] ] ) / 6.0 elif type == 'FD' : a = 0.5 * ( eps - 1 ) * CS b = - ( eps * SS + CC ) c = - a d = - ( eps * CC + SS ) e = 2.0 * ( eps + 1 ) stencil = np . array ( [ [ a , b , c ] , [ d , e , d ] , [ c , b , a ] ] ) return stencil
Rotated Anisotropic diffusion in 2d of the form .
20,374
def _symbolic_rotation_helper ( ) : from sympy import symbols , Matrix cpsi , spsi = symbols ( 'cpsi, spsi' ) cth , sth = symbols ( 'cth, sth' ) cphi , sphi = symbols ( 'cphi, sphi' ) Rpsi = Matrix ( [ [ cpsi , spsi , 0 ] , [ - spsi , cpsi , 0 ] , [ 0 , 0 , 1 ] ] ) Rth = Matrix ( [ [ 1 , 0 , 0 ] , [ 0 , cth , sth ] , [ 0 , - sth , cth ] ] ) Rphi = Matrix ( [ [ cphi , sphi , 0 ] , [ - sphi , cphi , 0 ] , [ 0 , 0 , 1 ] ] ) Q = Rpsi * Rth * Rphi epsy , epsz = symbols ( 'epsy, epsz' ) A = Matrix ( [ [ 1 , 0 , 0 ] , [ 0 , epsy , 0 ] , [ 0 , 0 , epsz ] ] ) D = Q * A * Q . T for i in range ( 3 ) : for j in range ( 3 ) : print ( 'D[%d, %d] = %s' % ( i , j , D [ i , j ] ) )
Use SymPy to generate the 3D rotation matrix and products for diffusion_stencil_3d .
20,375
def _symbolic_product_helper ( ) : from sympy import symbols , Matrix D11 , D12 , D13 , D21 , D22 , D23 , D31 , D32 , D33 = symbols ( 'D11, D12, D13, D21, D22, D23, D31, D32, D33' ) D = Matrix ( [ [ D11 , D12 , D13 ] , [ D21 , D22 , D23 ] , [ D31 , D32 , D33 ] ] ) grad = Matrix ( [ [ 'dx' , 'dy' , 'dz' ] ] ) . T div = grad . T a = div * D * grad print ( a [ 0 ] )
Use SymPy to generate the 3D products for diffusion_stencil_3d .
20,376
def make_system ( A , x , b , formats = None ) : if formats is None : pass elif formats == [ 'csr' ] : if sparse . isspmatrix_csr ( A ) : pass elif sparse . isspmatrix_bsr ( A ) : A = A . tocsr ( ) else : warn ( 'implicit conversion to CSR' , sparse . SparseEfficiencyWarning ) A = sparse . csr_matrix ( A ) else : if sparse . isspmatrix ( A ) and A . format in formats : pass else : A = sparse . csr_matrix ( A ) . asformat ( formats [ 0 ] ) if not isinstance ( x , np . ndarray ) : raise ValueError ( 'expected numpy array for argument x' ) if not isinstance ( b , np . ndarray ) : raise ValueError ( 'expected numpy array for argument b' ) M , N = A . shape if M != N : raise ValueError ( 'expected square matrix' ) if x . shape not in [ ( M , ) , ( M , 1 ) ] : raise ValueError ( 'x has invalid dimensions' ) if b . shape not in [ ( M , ) , ( M , 1 ) ] : raise ValueError ( 'b has invalid dimensions' ) if A . dtype != x . dtype or A . dtype != b . dtype : raise TypeError ( 'arguments A, x, and b must have the same dtype' ) if not x . flags . carray : raise ValueError ( 'x must be contiguous in memory' ) x = np . ravel ( x ) b = np . ravel ( b ) return A , x , b
Return A x b suitable for relaxation or raise an exception .
20,377
def sor ( A , x , b , omega , iterations = 1 , sweep = 'forward' ) : A , x , b = make_system ( A , x , b , formats = [ 'csr' , 'bsr' ] ) x_old = np . empty_like ( x ) for i in range ( iterations ) : x_old [ : ] = x gauss_seidel ( A , x , b , iterations = 1 , sweep = sweep ) x *= omega x_old *= ( 1 - omega ) x += x_old
Perform SOR iteration on the linear system Ax = b .
20,378
def schwarz ( A , x , b , iterations = 1 , subdomain = None , subdomain_ptr = None , inv_subblock = None , inv_subblock_ptr = None , sweep = 'forward' ) : A , x , b = make_system ( A , x , b , formats = [ 'csr' ] ) A . sort_indices ( ) if subdomain is None and inv_subblock is not None : raise ValueError ( "inv_subblock must be None if subdomain is None" ) ( subdomain , subdomain_ptr , inv_subblock , inv_subblock_ptr ) = schwarz_parameters ( A , subdomain , subdomain_ptr , inv_subblock , inv_subblock_ptr ) if sweep == 'forward' : row_start , row_stop , row_step = 0 , subdomain_ptr . shape [ 0 ] - 1 , 1 elif sweep == 'backward' : row_start , row_stop , row_step = subdomain_ptr . shape [ 0 ] - 2 , - 1 , - 1 elif sweep == 'symmetric' : for iter in range ( iterations ) : schwarz ( A , x , b , iterations = 1 , subdomain = subdomain , subdomain_ptr = subdomain_ptr , inv_subblock = inv_subblock , inv_subblock_ptr = inv_subblock_ptr , sweep = 'forward' ) schwarz ( A , x , b , iterations = 1 , subdomain = subdomain , subdomain_ptr = subdomain_ptr , inv_subblock = inv_subblock , inv_subblock_ptr = inv_subblock_ptr , sweep = 'backward' ) return else : raise ValueError ( "valid sweep directions are 'forward',\ 'backward', and 'symmetric'" ) for iter in range ( iterations ) : amg_core . overlapping_schwarz_csr ( A . indptr , A . indices , A . data , x , b , inv_subblock , inv_subblock_ptr , subdomain , subdomain_ptr , subdomain_ptr . shape [ 0 ] - 1 , A . shape [ 0 ] , row_start , row_stop , row_step )
Perform Overlapping multiplicative Schwarz on the linear system Ax = b .
20,379
def gauss_seidel ( A , x , b , iterations = 1 , sweep = 'forward' ) : A , x , b = make_system ( A , x , b , formats = [ 'csr' , 'bsr' ] ) if sparse . isspmatrix_csr ( A ) : blocksize = 1 else : R , C = A . blocksize if R != C : raise ValueError ( 'BSR blocks must be square' ) blocksize = R if sweep == 'forward' : row_start , row_stop , row_step = 0 , int ( len ( x ) / blocksize ) , 1 elif sweep == 'backward' : row_start , row_stop , row_step = int ( len ( x ) / blocksize ) - 1 , - 1 , - 1 elif sweep == 'symmetric' : for iter in range ( iterations ) : gauss_seidel ( A , x , b , iterations = 1 , sweep = 'forward' ) gauss_seidel ( A , x , b , iterations = 1 , sweep = 'backward' ) return else : raise ValueError ( "valid sweep directions are 'forward',\ 'backward', and 'symmetric'" ) if sparse . isspmatrix_csr ( A ) : for iter in range ( iterations ) : amg_core . gauss_seidel ( A . indptr , A . indices , A . data , x , b , row_start , row_stop , row_step ) else : for iter in range ( iterations ) : amg_core . bsr_gauss_seidel ( A . indptr , A . indices , np . ravel ( A . data ) , x , b , row_start , row_stop , row_step , R )
Perform Gauss - Seidel iteration on the linear system Ax = b .
20,380
def jacobi ( A , x , b , iterations = 1 , omega = 1.0 ) : A , x , b = make_system ( A , x , b , formats = [ 'csr' , 'bsr' ] ) sweep = slice ( None ) ( row_start , row_stop , row_step ) = sweep . indices ( A . shape [ 0 ] ) if ( row_stop - row_start ) * row_step <= 0 : return temp = np . empty_like ( x ) [ omega ] = type_prep ( A . dtype , [ omega ] ) if sparse . isspmatrix_csr ( A ) : for iter in range ( iterations ) : amg_core . jacobi ( A . indptr , A . indices , A . data , x , b , temp , row_start , row_stop , row_step , omega ) else : R , C = A . blocksize if R != C : raise ValueError ( 'BSR blocks must be square' ) row_start = int ( row_start / R ) row_stop = int ( row_stop / R ) for iter in range ( iterations ) : amg_core . bsr_jacobi ( A . indptr , A . indices , np . ravel ( A . data ) , x , b , temp , row_start , row_stop , row_step , R , omega )
Perform Jacobi iteration on the linear system Ax = b .
20,381
def block_jacobi ( A , x , b , Dinv = None , blocksize = 1 , iterations = 1 , omega = 1.0 ) : A , x , b = make_system ( A , x , b , formats = [ 'csr' , 'bsr' ] ) A = A . tobsr ( blocksize = ( blocksize , blocksize ) ) if Dinv is None : Dinv = get_block_diag ( A , blocksize = blocksize , inv_flag = True ) elif Dinv . shape [ 0 ] != int ( A . shape [ 0 ] / blocksize ) : raise ValueError ( 'Dinv and A have incompatible dimensions' ) elif ( Dinv . shape [ 1 ] != blocksize ) or ( Dinv . shape [ 2 ] != blocksize ) : raise ValueError ( 'Dinv and blocksize are incompatible' ) sweep = slice ( None ) ( row_start , row_stop , row_step ) = sweep . indices ( int ( A . shape [ 0 ] / blocksize ) ) if ( row_stop - row_start ) * row_step <= 0 : return temp = np . empty_like ( x ) [ omega ] = type_prep ( A . dtype , [ omega ] ) for iter in range ( iterations ) : amg_core . block_jacobi ( A . indptr , A . indices , np . ravel ( A . data ) , x , b , np . ravel ( Dinv ) , temp , row_start , row_stop , row_step , omega , blocksize )
Perform block Jacobi iteration on the linear system Ax = b .
20,382
def block_gauss_seidel ( A , x , b , iterations = 1 , sweep = 'forward' , blocksize = 1 , Dinv = None ) : A , x , b = make_system ( A , x , b , formats = [ 'csr' , 'bsr' ] ) A = A . tobsr ( blocksize = ( blocksize , blocksize ) ) if Dinv is None : Dinv = get_block_diag ( A , blocksize = blocksize , inv_flag = True ) elif Dinv . shape [ 0 ] != int ( A . shape [ 0 ] / blocksize ) : raise ValueError ( 'Dinv and A have incompatible dimensions' ) elif ( Dinv . shape [ 1 ] != blocksize ) or ( Dinv . shape [ 2 ] != blocksize ) : raise ValueError ( 'Dinv and blocksize are incompatible' ) if sweep == 'forward' : row_start , row_stop , row_step = 0 , int ( len ( x ) / blocksize ) , 1 elif sweep == 'backward' : row_start , row_stop , row_step = int ( len ( x ) / blocksize ) - 1 , - 1 , - 1 elif sweep == 'symmetric' : for iter in range ( iterations ) : block_gauss_seidel ( A , x , b , iterations = 1 , sweep = 'forward' , blocksize = blocksize , Dinv = Dinv ) block_gauss_seidel ( A , x , b , iterations = 1 , sweep = 'backward' , blocksize = blocksize , Dinv = Dinv ) return else : raise ValueError ( "valid sweep directions are 'forward',\ 'backward', and 'symmetric'" ) for iter in range ( iterations ) : amg_core . block_gauss_seidel ( A . indptr , A . indices , np . ravel ( A . data ) , x , b , np . ravel ( Dinv ) , row_start , row_stop , row_step , blocksize )
Perform block Gauss - Seidel iteration on the linear system Ax = b .
20,383
def polynomial ( A , x , b , coefficients , iterations = 1 ) : A , x , b = make_system ( A , x , b , formats = None ) for i in range ( iterations ) : from pyamg . util . linalg import norm if norm ( x ) == 0 : residual = b else : residual = ( b - A * x ) h = coefficients [ 0 ] * residual for c in coefficients [ 1 : ] : h = c * residual + A * h x += h
Apply a polynomial smoother to the system Ax = b .
20,384
def gauss_seidel_indexed ( A , x , b , indices , iterations = 1 , sweep = 'forward' ) : A , x , b = make_system ( A , x , b , formats = [ 'csr' ] ) indices = np . asarray ( indices , dtype = 'intc' ) if sweep == 'forward' : row_start , row_stop , row_step = 0 , len ( indices ) , 1 elif sweep == 'backward' : row_start , row_stop , row_step = len ( indices ) - 1 , - 1 , - 1 elif sweep == 'symmetric' : for iter in range ( iterations ) : gauss_seidel_indexed ( A , x , b , indices , iterations = 1 , sweep = 'forward' ) gauss_seidel_indexed ( A , x , b , indices , iterations = 1 , sweep = 'backward' ) return else : raise ValueError ( "valid sweep directions are 'forward',\ 'backward', and 'symmetric'" ) for iter in range ( iterations ) : amg_core . gauss_seidel_indexed ( A . indptr , A . indices , A . data , x , b , indices , row_start , row_stop , row_step )
Perform indexed Gauss - Seidel iteration on the linear system Ax = b .
20,385
def jacobi_ne ( A , x , b , iterations = 1 , omega = 1.0 ) : A , x , b = make_system ( A , x , b , formats = [ 'csr' ] ) sweep = slice ( None ) ( row_start , row_stop , row_step ) = sweep . indices ( A . shape [ 0 ] ) temp = np . zeros_like ( x ) Dinv = get_diagonal ( A , norm_eq = 2 , inv = True ) [ omega ] = type_prep ( A . dtype , [ omega ] ) for i in range ( iterations ) : delta = ( np . ravel ( b - A * x ) * np . ravel ( Dinv ) ) . astype ( A . dtype ) amg_core . jacobi_ne ( A . indptr , A . indices , A . data , x , b , delta , temp , row_start , row_stop , row_step , omega )
Perform Jacobi iterations on the linear system A A . H x = A . H b .
20,386
def gauss_seidel_ne ( A , x , b , iterations = 1 , sweep = 'forward' , omega = 1.0 , Dinv = None ) : A , x , b = make_system ( A , x , b , formats = [ 'csr' ] ) if Dinv is None : Dinv = np . ravel ( get_diagonal ( A , norm_eq = 2 , inv = True ) ) if sweep == 'forward' : row_start , row_stop , row_step = 0 , len ( x ) , 1 elif sweep == 'backward' : row_start , row_stop , row_step = len ( x ) - 1 , - 1 , - 1 elif sweep == 'symmetric' : for iter in range ( iterations ) : gauss_seidel_ne ( A , x , b , iterations = 1 , sweep = 'forward' , omega = omega , Dinv = Dinv ) gauss_seidel_ne ( A , x , b , iterations = 1 , sweep = 'backward' , omega = omega , Dinv = Dinv ) return else : raise ValueError ( "valid sweep directions are 'forward',\ 'backward', and 'symmetric'" ) for i in range ( iterations ) : amg_core . gauss_seidel_ne ( A . indptr , A . indices , A . data , x , b , row_start , row_stop , row_step , Dinv , omega )
Perform Gauss - Seidel iterations on the linear system A A . H x = b .
20,387
def gauss_seidel_nr ( A , x , b , iterations = 1 , sweep = 'forward' , omega = 1.0 , Dinv = None ) : A , x , b = make_system ( A , x , b , formats = [ 'csc' ] ) if Dinv is None : Dinv = np . ravel ( get_diagonal ( A , norm_eq = 1 , inv = True ) ) if sweep == 'forward' : col_start , col_stop , col_step = 0 , len ( x ) , 1 elif sweep == 'backward' : col_start , col_stop , col_step = len ( x ) - 1 , - 1 , - 1 elif sweep == 'symmetric' : for iter in range ( iterations ) : gauss_seidel_nr ( A , x , b , iterations = 1 , sweep = 'forward' , omega = omega , Dinv = Dinv ) gauss_seidel_nr ( A , x , b , iterations = 1 , sweep = 'backward' , omega = omega , Dinv = Dinv ) return else : raise ValueError ( "valid sweep directions are 'forward',\ 'backward', and 'symmetric'" ) r = b - A * x for i in range ( iterations ) : amg_core . gauss_seidel_nr ( A . indptr , A . indices , A . data , x , r , col_start , col_stop , col_step , Dinv , omega )
Perform Gauss - Seidel iterations on the linear system A . H A x = A . H b .
20,388
def schwarz_parameters ( A , subdomain = None , subdomain_ptr = None , inv_subblock = None , inv_subblock_ptr = None ) : if hasattr ( A , 'schwarz_parameters' ) : if subdomain is not None and subdomain_ptr is not None : if np . array ( A . schwarz_parameters [ 0 ] == subdomain ) . all ( ) and np . array ( A . schwarz_parameters [ 1 ] == subdomain_ptr ) . all ( ) : return A . schwarz_parameters else : return A . schwarz_parameters if subdomain is None or subdomain_ptr is None : subdomain_ptr = A . indptr . copy ( ) subdomain = A . indices . copy ( ) if inv_subblock is None or inv_subblock_ptr is None : inv_subblock_ptr = np . zeros ( subdomain_ptr . shape , dtype = A . indices . dtype ) blocksize = ( subdomain_ptr [ 1 : ] - subdomain_ptr [ : - 1 ] ) inv_subblock_ptr [ 1 : ] = np . cumsum ( blocksize * blocksize ) inv_subblock = np . zeros ( ( inv_subblock_ptr [ - 1 ] , ) , dtype = A . dtype ) amg_core . extract_subblocks ( A . indptr , A . indices , A . data , inv_subblock , inv_subblock_ptr , subdomain , subdomain_ptr , int ( subdomain_ptr . shape [ 0 ] - 1 ) , A . shape [ 0 ] ) t = A . dtype . char eps = np . finfo ( np . float ) . eps feps = np . finfo ( np . single ) . eps geps = np . finfo ( np . longfloat ) . eps _array_precision = { 'f' : 0 , 'd' : 1 , 'g' : 2 , 'F' : 0 , 'D' : 1 , 'G' : 2 } cond = { 0 : feps * 1e3 , 1 : eps * 1e6 , 2 : geps * 1e6 } [ _array_precision [ t ] ] my_pinv , = la . get_lapack_funcs ( [ 'gelss' ] , ( np . ones ( ( 1 , ) , dtype = A . dtype ) ) ) for i in range ( subdomain_ptr . shape [ 0 ] - 1 ) : m = blocksize [ i ] rhs = sp . eye ( m , m , dtype = A . dtype ) j0 = inv_subblock_ptr [ i ] j1 = inv_subblock_ptr [ i + 1 ] gelssoutput = my_pinv ( inv_subblock [ j0 : j1 ] . reshape ( m , m ) , rhs , cond = cond , overwrite_a = True , overwrite_b = True ) inv_subblock [ j0 : j1 ] = np . ravel ( gelssoutput [ 1 ] ) A . schwarz_parameters = ( subdomain , subdomain_ptr , inv_subblock , inv_subblock_ptr ) return A . schwarz_parameters
Set Schwarz parameters .
20,389
def cg ( A , b , x0 = None , tol = 1e-5 , maxiter = None , xtype = None , M = None , callback = None , residuals = None ) : A , M , x , b , postprocess = make_system ( A , M , x0 , b ) import warnings warnings . filterwarnings ( 'always' , module = 'pyamg\.krylov\._cg' ) if maxiter is None : maxiter = int ( 1.3 * len ( b ) ) + 2 elif maxiter < 1 : raise ValueError ( 'Number of iterations must be positive' ) r = b - A * x z = M * r p = z . copy ( ) rz = np . inner ( r . conjugate ( ) , z ) normr = np . sqrt ( rz ) if residuals is not None : residuals [ : ] = [ normr ] normb = norm ( b ) if normb == 0.0 : normb = 1.0 if normr < tol * normb : return ( postprocess ( x ) , 0 ) if normr != 0.0 : tol = tol * normr recompute_r = 8 iter = 0 while True : Ap = A * p rz_old = rz pAp = np . inner ( Ap . conjugate ( ) , p ) if pAp < 0.0 : warn ( "\nIndefinite matrix detected in CG, aborting\n" ) return ( postprocess ( x ) , - 1 ) alpha = rz / pAp x += alpha * p if np . mod ( iter , recompute_r ) and iter > 0 : r -= alpha * Ap else : r = b - A * x z = M * r rz = np . inner ( r . conjugate ( ) , z ) if rz < 0.0 : warn ( "\nIndefinite preconditioner detected in CG, aborting\n" ) return ( postprocess ( x ) , - 1 ) beta = rz / rz_old p *= beta p += z iter += 1 normr = np . sqrt ( rz ) if residuals is not None : residuals . append ( normr ) if callback is not None : callback ( x ) if normr < tol : return ( postprocess ( x ) , 0 ) elif rz == 0.0 : warn ( "\nSingular preconditioner detected in CG, ceasing \ iterations\n" ) return ( postprocess ( x ) , - 1 ) if iter == maxiter : return ( postprocess ( x ) , iter )
Conjugate Gradient algorithm .
20,390
def bicgstab ( A , b , x0 = None , tol = 1e-5 , maxiter = None , xtype = None , M = None , callback = None , residuals = None ) : A , M , x , b , postprocess = make_system ( A , M , x0 , b ) import warnings warnings . filterwarnings ( 'always' , module = 'pyamg\.krylov\._bicgstab' ) if maxiter is None : maxiter = len ( x ) + 5 elif maxiter < 1 : raise ValueError ( 'Number of iterations must be positive' ) r = b - A * x normr = norm ( r ) if residuals is not None : residuals [ : ] = [ normr ] normb = norm ( b ) if normb == 0.0 : normb = 1.0 if normr < tol * normb : return ( postprocess ( x ) , 0 ) if normr != 0.0 : tol = tol * normr if A . shape [ 0 ] == 1 : entry = np . ravel ( A * np . array ( [ 1.0 ] , dtype = xtype ) ) return ( postprocess ( b / entry ) , 0 ) rstar = r . copy ( ) p = r . copy ( ) rrstarOld = np . inner ( rstar . conjugate ( ) , r ) iter = 0 while True : Mp = M * p AMp = A * Mp alpha = rrstarOld / np . inner ( rstar . conjugate ( ) , AMp ) s = r - alpha * AMp Ms = M * s AMs = A * Ms omega = np . inner ( AMs . conjugate ( ) , s ) / np . inner ( AMs . conjugate ( ) , AMs ) x = x + alpha * Mp + omega * Ms r = s - omega * AMs rrstarNew = np . inner ( rstar . conjugate ( ) , r ) beta = ( rrstarNew / rrstarOld ) * ( alpha / omega ) rrstarOld = rrstarNew p = r + beta * ( p - omega * AMp ) iter += 1 normr = norm ( r ) if residuals is not None : residuals . append ( normr ) if callback is not None : callback ( x ) if normr < tol : return ( postprocess ( x ) , 0 ) if iter == maxiter : return ( postprocess ( x ) , iter )
Biconjugate Gradient Algorithm with Stabilization .
20,391
def operator_complexity ( self ) : return sum ( [ level . A . nnz for level in self . levels ] ) / float ( self . levels [ 0 ] . A . nnz )
Operator complexity of this multigrid hierarchy .
20,392
def grid_complexity ( self ) : return sum ( [ level . A . shape [ 0 ] for level in self . levels ] ) / float ( self . levels [ 0 ] . A . shape [ 0 ] )
Grid complexity of this multigrid hierarchy .
20,393
def aspreconditioner ( self , cycle = 'V' ) : from scipy . sparse . linalg import LinearOperator shape = self . levels [ 0 ] . A . shape dtype = self . levels [ 0 ] . A . dtype def matvec ( b ) : return self . solve ( b , maxiter = 1 , cycle = cycle , tol = 1e-12 ) return LinearOperator ( shape , matvec , dtype = dtype )
Create a preconditioner using this multigrid cycle .
20,394
def __solve ( self , lvl , x , b , cycle ) : A = self . levels [ lvl ] . A self . levels [ lvl ] . presmoother ( A , x , b ) residual = b - A * x coarse_b = self . levels [ lvl ] . R * residual coarse_x = np . zeros_like ( coarse_b ) if lvl == len ( self . levels ) - 2 : coarse_x [ : ] = self . coarse_solver ( self . levels [ - 1 ] . A , coarse_b ) else : if cycle == 'V' : self . __solve ( lvl + 1 , coarse_x , coarse_b , 'V' ) elif cycle == 'W' : self . __solve ( lvl + 1 , coarse_x , coarse_b , cycle ) self . __solve ( lvl + 1 , coarse_x , coarse_b , cycle ) elif cycle == 'F' : self . __solve ( lvl + 1 , coarse_x , coarse_b , cycle ) self . __solve ( lvl + 1 , coarse_x , coarse_b , 'V' ) elif cycle == "AMLI" : nAMLI = 2 Ac = self . levels [ lvl + 1 ] . A p = np . zeros ( ( nAMLI , coarse_b . shape [ 0 ] ) , dtype = coarse_b . dtype ) beta = np . zeros ( ( nAMLI , nAMLI ) , dtype = coarse_b . dtype ) for k in range ( nAMLI ) : p [ k , : ] = 1 self . __solve ( lvl + 1 , p [ k , : ] . reshape ( coarse_b . shape ) , coarse_b , cycle ) for j in range ( k ) : beta [ k , j ] = np . inner ( p [ j , : ] . conj ( ) , Ac * p [ k , : ] ) / np . inner ( p [ j , : ] . conj ( ) , Ac * p [ j , : ] ) p [ k , : ] -= beta [ k , j ] * p [ j , : ] Ap = Ac * p [ k , : ] alpha = np . inner ( p [ k , : ] . conj ( ) , np . ravel ( coarse_b ) ) / np . inner ( p [ k , : ] . conj ( ) , Ap ) coarse_x += alpha * p [ k , : ] . reshape ( coarse_x . shape ) coarse_b -= alpha * Ap . reshape ( coarse_b . shape ) else : raise TypeError ( 'Unrecognized cycle type (%s)' % cycle ) x += self . levels [ lvl ] . P * coarse_x self . levels [ lvl ] . postsmoother ( A , x , b )
Multigrid cycling .
20,395
def maximal_independent_set ( G , algo = 'serial' , k = None ) : G = asgraph ( G ) N = G . shape [ 0 ] mis = np . empty ( N , dtype = 'intc' ) mis [ : ] = - 1 if k is None : if algo == 'serial' : fn = amg_core . maximal_independent_set_serial fn ( N , G . indptr , G . indices , - 1 , 1 , 0 , mis ) elif algo == 'parallel' : fn = amg_core . maximal_independent_set_parallel fn ( N , G . indptr , G . indices , - 1 , 1 , 0 , mis , sp . rand ( N ) , - 1 ) else : raise ValueError ( 'unknown algorithm (%s)' % algo ) else : fn = amg_core . maximal_independent_set_k_parallel fn ( N , G . indptr , G . indices , k , mis , sp . rand ( N ) , - 1 ) return mis
Compute a maximal independent vertex set for a graph .
20,396
def vertex_coloring ( G , method = 'MIS' ) : G = asgraph ( G ) N = G . shape [ 0 ] coloring = np . empty ( N , dtype = 'intc' ) if method == 'MIS' : fn = amg_core . vertex_coloring_mis fn ( N , G . indptr , G . indices , coloring ) elif method == 'JP' : fn = amg_core . vertex_coloring_jones_plassmann fn ( N , G . indptr , G . indices , coloring , sp . rand ( N ) ) elif method == 'LDF' : fn = amg_core . vertex_coloring_LDF fn ( N , G . indptr , G . indices , coloring , sp . rand ( N ) ) else : raise ValueError ( 'unknown method (%s)' % method ) return coloring
Compute a vertex coloring of a graph .
20,397
def bellman_ford ( G , seeds , maxiter = None ) : G = asgraph ( G ) N = G . shape [ 0 ] if maxiter is not None and maxiter < 0 : raise ValueError ( 'maxiter must be positive' ) if G . dtype == complex : raise ValueError ( 'Bellman-Ford algorithm only defined for real\ weights' ) seeds = np . asarray ( seeds , dtype = 'intc' ) distances = np . empty ( N , dtype = G . dtype ) distances [ : ] = max_value ( G . dtype ) distances [ seeds ] = 0 nearest_seed = np . empty ( N , dtype = 'intc' ) nearest_seed [ : ] = - 1 nearest_seed [ seeds ] = seeds old_distances = np . empty_like ( distances ) iter = 0 while maxiter is None or iter < maxiter : old_distances [ : ] = distances amg_core . bellman_ford ( N , G . indptr , G . indices , G . data , distances , nearest_seed ) if ( old_distances == distances ) . all ( ) : break return ( distances , nearest_seed )
Bellman - Ford iteration .
20,398
def lloyd_cluster ( G , seeds , maxiter = 10 ) : G = asgraph ( G ) N = G . shape [ 0 ] if G . dtype . kind == 'c' : G = np . abs ( G ) if np . isscalar ( seeds ) : seeds = np . random . permutation ( N ) [ : seeds ] seeds = seeds . astype ( 'intc' ) else : seeds = np . array ( seeds , dtype = 'intc' ) if len ( seeds ) < 1 : raise ValueError ( 'at least one seed is required' ) if seeds . min ( ) < 0 : raise ValueError ( 'invalid seed index (%d)' % seeds . min ( ) ) if seeds . max ( ) >= N : raise ValueError ( 'invalid seed index (%d)' % seeds . max ( ) ) clusters = np . empty ( N , dtype = 'intc' ) distances = np . empty ( N , dtype = G . dtype ) for i in range ( maxiter ) : last_seeds = seeds . copy ( ) amg_core . lloyd_cluster ( N , G . indptr , G . indices , G . data , len ( seeds ) , distances , clusters , seeds ) if ( seeds == last_seeds ) . all ( ) : break return ( distances , clusters , seeds )
Perform Lloyd clustering on graph with weighted edges .
20,399
def breadth_first_search ( G , seed ) : G = asgraph ( G ) N = G . shape [ 0 ] order = np . empty ( N , G . indptr . dtype ) level = np . empty ( N , G . indptr . dtype ) level [ : ] = - 1 BFS = amg_core . breadth_first_search BFS ( G . indptr , G . indices , int ( seed ) , order , level ) return order , level
Breadth First search of a graph .