text
stringlengths
81
112k
Check if a given period is possibly an alias. Parameters ---------- period : float A period to test if it is a possible alias or not. Returns ------- is_alias : boolean True if the given period is in a range of period alias. def is_period_alias(period): """ Check if a given period is possibly an alias. Parameters ---------- period : float A period to test if it is a possible alias or not. Returns ------- is_alias : boolean True if the given period is in a range of period alias. """ # Based on the period vs periodSN plot of EROS-2 dataset (Kim+ 2014). # Period alias occurs mostly at ~1 and ~30. # Check each 1, 2, 3, 4, 5 factors. for i in range(1, 6): # One-day and one-month alias if (.99 / float(i)) < period < (1.004 / float(i)): return True if (1.03 / float(i)) < period < (1.04 / float(i)): return True if (29.2 / float(i)) < period < (29.9 / float(i)): return True # From candidates from the two fields 01, 08. # All of them are close to one day (or sidereal) alias. if (0.96465 / float(i)) < period < (0.96485 / float(i)): return True if (0.96725 / float(i)) < period < (0.96745 / float(i)): return True if (0.98190 / float(i)) < period < (0.98230 / float(i)): return True if (1.01034 / float(i)) < period < (1.01076 / float(i)): return True if (1.01568 / float(i)) < period < (1.01604 / float(i)): return True if (1.01718 / float(i)) < period < (1.01742 / float(i)): return True # From the all candidates from the entire LMC fields. # Some of these could be overlapped with the above cuts. if (0.50776 / float(i)) < period < (0.50861 / float(i)): return True if (0.96434 / float(i)) < period < (0.9652 / float(i)): return True if (0.96688 / float(i)) < period < (0.96731 / float(i)): return True if (1.0722 / float(i)) < period < (1.0729 / float(i)): return True if (27.1 / float(i)) < period < (27.5 / float(i)): return True # Not in the range of any alias. return False
Serialize `object` to a file denoted by `filepath`. Parameters ---------- filepath : str A filename. If the suffix is `.joblib` and joblib can be imported, `joblib.dump` is used in place of the regular pickling mechanisms; this results in much faster saves by saving arrays as separate .npy files on disk. If the file suffix is `.npy` than `numpy.save` is attempted on `obj`. Otherwise, (c)pickle is used. obj : object A Python object to be serialized. on_overwrite: A string specifying what to do if the file already exists. ignore: just overwrite it backup: make a copy of the file (<filepath>.bak) and delete it when done saving the new copy. this allows recovery of the old version of the file if saving the new one fails def save(filepath, obj, on_overwrite = 'ignore'): """ Serialize `object` to a file denoted by `filepath`. Parameters ---------- filepath : str A filename. If the suffix is `.joblib` and joblib can be imported, `joblib.dump` is used in place of the regular pickling mechanisms; this results in much faster saves by saving arrays as separate .npy files on disk. If the file suffix is `.npy` than `numpy.save` is attempted on `obj`. Otherwise, (c)pickle is used. obj : object A Python object to be serialized. on_overwrite: A string specifying what to do if the file already exists. ignore: just overwrite it backup: make a copy of the file (<filepath>.bak) and delete it when done saving the new copy. this allows recovery of the old version of the file if saving the new one fails """ filepath = preprocess(filepath) if os.path.exists(filepath): if on_overwrite == 'backup': backup = filepath + '.bak' shutil.move(filepath, backup) save(filepath, obj) try: os.remove(backup) except Exception, e: warnings.warn("Got an error while traing to remove "+backup+":"+str(e)) return else: assert on_overwrite == 'ignore' try: _save(filepath, obj) except RuntimeError, e: """ Sometimes for large theano graphs, pickle/cPickle exceed the maximum recursion depth. This seems to me like a fundamental design flaw in pickle/cPickle. The workaround I employ here is the one recommended to someone who had a similar problem on stackexchange: http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle Obviously this does not scale and could cause a crash but I don't see another solution short of writing our own implementation of pickle. """ if str(e).find('recursion') != -1: warnings.warn('pylearn2.utils.save encountered the following ' 'error: ' + str(e) + '\nAttempting to resolve this error by calling ' + 'sys.setrecusionlimit and retrying') old_limit = sys.getrecursionlimit() try: sys.setrecursionlimit(50000) _save(filepath, obj) finally: sys.setrecursionlimit(old_limit)
Allow configuration of the pickle protocol on a per-machine basis. This way, if you use multiple platforms with different versions of pickle, you can configure each of them to use the highest protocol supported by all of the machines that you want to be able to communicate. def get_pickle_protocol(): """ Allow configuration of the pickle protocol on a per-machine basis. This way, if you use multiple platforms with different versions of pickle, you can configure each of them to use the highest protocol supported by all of the machines that you want to be able to communicate. """ try: protocol_str = os.environ['PYLEARN2_PICKLE_PROTOCOL'] except KeyError: # If not defined, we default to 0 because this is the default # protocol used by cPickle.dump (and because it results in # maximum portability) protocol_str = '0' if protocol_str == 'pickle.HIGHEST_PROTOCOL': return pickle.HIGHEST_PROTOCOL return int(protocol_str)
Loads and parses a yaml file for a Train object. Publishes the relevant training environment variables def load_train_file(config_file_path): """Loads and parses a yaml file for a Train object. Publishes the relevant training environment variables""" from pylearn2.config import yaml_parse suffix_to_strip = '.yaml' # publish environment variables related to file name if config_file_path.endswith(suffix_to_strip): config_file_full_stem = config_file_path[0:-len(suffix_to_strip)] else: config_file_full_stem = config_file_path for varname in ["PYLEARN2_TRAIN_FILE_NAME", #this one is deprecated "PYLEARN2_TRAIN_FILE_FULL_STEM"]: #this is the new, accepted name environ.putenv(varname, config_file_full_stem) directory = config_file_path.split('/')[:-1] directory = '/'.join(directory) if directory != '': directory += '/' environ.putenv("PYLEARN2_TRAIN_DIR", directory) environ.putenv("PYLEARN2_TRAIN_BASE_NAME", config_file_path.split('/')[-1] ) environ.putenv("PYLEARN2_TRAIN_FILE_STEM", config_file_full_stem.split('/')[-1] ) return yaml_parse.load_path(config_file_path)
Propagate forward through the layer **Parameters:** input_data : ``GPUArray`` Inpute data to perform dropout on. prediction : bool, optional Whether to use prediction model. If true, then the data is scaled by ``1 - dropout_probability`` uses dropout. **Returns:** dropout_data : ``GPUArray`` The data after performing dropout. def feed_forward(self, input_data, prediction=False): """Propagate forward through the layer **Parameters:** input_data : ``GPUArray`` Inpute data to perform dropout on. prediction : bool, optional Whether to use prediction model. If true, then the data is scaled by ``1 - dropout_probability`` uses dropout. **Returns:** dropout_data : ``GPUArray`` The data after performing dropout. """ if input_data.shape[1] != self.n_in: raise ValueError('Number of outputs from previous layer (%d) ' 'does not match number of inputs to this layer (%d)' % (input_data.shape[1], self.n_in)) if not prediction: dropout_input = gpuarray.empty_like(input_data) dropout_mask = sample_dropout_mask(input_data, self.dropout_probability, target=dropout_input ) return dropout_input, dropout_mask else: return (input_data * (1 - self.dropout_probability),)
Backpropagate through the hidden layer **Parameters:** input_data : ``GPUArray`` Inpute data to perform dropout on. df_output : ``GPUArray`` Gradients with respect to the output of this layer (received from the layer above). cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : empty tuple Gradients are empty since this layer has no parameters. df_input : ``GPUArray`` Gradients with respect to the input. def backprop(self, input_data, df_output, cache=None): """ Backpropagate through the hidden layer **Parameters:** input_data : ``GPUArray`` Inpute data to perform dropout on. df_output : ``GPUArray`` Gradients with respect to the output of this layer (received from the layer above). cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : empty tuple Gradients are empty since this layer has no parameters. df_input : ``GPUArray`` Gradients with respect to the input. """ if self.compute_input_gradients: apply_dropout_mask(df_output, dropout_mask) return tuple(), df_output
Create ctypes pointer to object. Notes ----- This function converts None to a real NULL pointer because of bug in how ctypes handles None on 64-bit platforms. def POINTER(obj): """ Create ctypes pointer to object. Notes ----- This function converts None to a real NULL pointer because of bug in how ctypes handles None on 64-bit platforms. """ p = ctypes.POINTER(obj) if not isinstance(p.from_param, classmethod): def from_param(cls, x): if x is None: return cls() else: return x p.from_param = classmethod(from_param) return p
Return ctypes pointer to data in GPUAarray object. def gpuarray_ptr(g): """ Return ctypes pointer to data in GPUAarray object. """ addr = int(g.gpudata) if g.dtype == np.int8: return ctypes.cast(addr, POINTER(ctypes.c_byte)) if g.dtype == np.uint8: return ctypes.cast(addr, POINTER(ctypes.c_ubyte)) if g.dtype == np.int16: return ctypes.cast(addr, POINTER(ctypes.c_short)) if g.dtype == np.uint16: return ctypes.cast(addr, POINTER(ctypes.c_ushort)) if g.dtype == np.int32: return ctypes.cast(addr, POINTER(ctypes.c_int)) if g.dtype == np.uint32: return ctypes.cast(addr, POINTER(ctypes.c_uint)) if g.dtype == np.int64: return ctypes.cast(addr, POINTER(ctypes.c_long)) if g.dtype == np.uint64: return ctypes.cast(addr, POINTER(ctypes.c_ulong)) if g.dtype == np.float32: return ctypes.cast(addr, POINTER(ctypes.c_float)) elif g.dtype == np.float64: return ctypes.cast(addr, POINTER(ctypes.c_double)) elif g.dtype == np.complex64: return ctypes.cast(addr, POINTER(cuFloatComplex)) elif g.dtype == np.complex128: return ctypes.cast(addr, POINTER(cuDoubleComplex)) else: raise ValueError('unrecognized type')
Allocate device memory. Allocate memory on the device associated with the current active context. Parameters ---------- count : int Number of bytes of memory to allocate ctype : _ctypes.SimpleType, optional ctypes type to cast returned pointer. Returns ------- ptr : ctypes pointer Pointer to allocated device memory. def cudaMalloc(count, ctype=None): """ Allocate device memory. Allocate memory on the device associated with the current active context. Parameters ---------- count : int Number of bytes of memory to allocate ctype : _ctypes.SimpleType, optional ctypes type to cast returned pointer. Returns ------- ptr : ctypes pointer Pointer to allocated device memory. """ ptr = ctypes.c_void_p() status = _libcudart.cudaMalloc(ctypes.byref(ptr), count) cudaCheckStatus(status) if ctype != None: ptr = ctypes.cast(ptr, ctypes.POINTER(ctype)) return ptr
Allocate pitched device memory. Allocate pitched memory on the device associated with the current active context. Parameters ---------- pitch : int Pitch for allocation. rows : int Requested pitched allocation height. cols : int Requested pitched allocation width. elesize : int Size of memory element. Returns ------- ptr : ctypes pointer Pointer to allocated device memory. def cudaMallocPitch(pitch, rows, cols, elesize): """ Allocate pitched device memory. Allocate pitched memory on the device associated with the current active context. Parameters ---------- pitch : int Pitch for allocation. rows : int Requested pitched allocation height. cols : int Requested pitched allocation width. elesize : int Size of memory element. Returns ------- ptr : ctypes pointer Pointer to allocated device memory. """ ptr = ctypes.c_void_p() status = _libcudart.cudaMallocPitch(ctypes.byref(ptr), ctypes.c_size_t(pitch), cols*elesize, rows) cudaCheckStatus(status) return ptr, pitch
Copy memory from host to device. Copy data from host memory to device memory. Parameters ---------- dst : ctypes pointer Device memory pointer. src : ctypes pointer Host memory pointer. count : int Number of bytes to copy. def cudaMemcpy_htod(dst, src, count): """ Copy memory from host to device. Copy data from host memory to device memory. Parameters ---------- dst : ctypes pointer Device memory pointer. src : ctypes pointer Host memory pointer. count : int Number of bytes to copy. """ status = _libcudart.cudaMemcpy(dst, src, ctypes.c_size_t(count), cudaMemcpyHostToDevice) cudaCheckStatus(status)
Copy memory from device to host. Copy data from device memory to host memory. Parameters ---------- dst : ctypes pointer Host memory pointer. src : ctypes pointer Device memory pointer. count : int Number of bytes to copy. def cudaMemcpy_dtoh(dst, src, count): """ Copy memory from device to host. Copy data from device memory to host memory. Parameters ---------- dst : ctypes pointer Host memory pointer. src : ctypes pointer Device memory pointer. count : int Number of bytes to copy. """ status = _libcudart.cudaMemcpy(dst, src, ctypes.c_size_t(count), cudaMemcpyDeviceToHost) cudaCheckStatus(status)
Return the amount of free and total device memory. Returns ------- free : long Free memory in bytes. total : long Total memory in bytes. def cudaMemGetInfo(): """ Return the amount of free and total device memory. Returns ------- free : long Free memory in bytes. total : long Total memory in bytes. """ free = ctypes.c_size_t() total = ctypes.c_size_t() status = _libcudart.cudaMemGetInfo(ctypes.byref(free), ctypes.byref(total)) cudaCheckStatus(status) return free.value, total.value
Get current CUDA device. Return the identifying number of the device currently used to process CUDA operations. Returns ------- dev : int Device number. def cudaGetDevice(): """ Get current CUDA device. Return the identifying number of the device currently used to process CUDA operations. Returns ------- dev : int Device number. """ dev = ctypes.c_int() status = _libcudart.cudaGetDevice(ctypes.byref(dev)) cudaCheckStatus(status) return dev.value
Get installed CUDA driver version. Return the version of the installed CUDA driver as an integer. If no driver is detected, 0 is returned. Returns ------- version : int Driver version. def cudaDriverGetVersion(): """ Get installed CUDA driver version. Return the version of the installed CUDA driver as an integer. If no driver is detected, 0 is returned. Returns ------- version : int Driver version. """ version = ctypes.c_int() status = _libcudart.cudaDriverGetVersion(ctypes.byref(version)) cudaCheckStatus(status) return version.value
Get memory pointer attributes. Returns attributes of the specified pointer. Parameters ---------- ptr : ctypes pointer Memory pointer to examine. Returns ------- memory_type : int Memory type; 1 indicates host memory, 2 indicates device memory. device : int Number of device associated with pointer. Notes ----- This function only works with CUDA 4.0 and later. def cudaPointerGetAttributes(ptr): """ Get memory pointer attributes. Returns attributes of the specified pointer. Parameters ---------- ptr : ctypes pointer Memory pointer to examine. Returns ------- memory_type : int Memory type; 1 indicates host memory, 2 indicates device memory. device : int Number of device associated with pointer. Notes ----- This function only works with CUDA 4.0 and later. """ attributes = cudaPointerAttributes() status = \ _libcudart.cudaPointerGetAttributes(ctypes.byref(attributes), ptr) cudaCheckStatus(status) return attributes.memoryType, attributes.device
Evaluate a thunk in an environment. Will defer the actual evaluation to the thunk itself, but adds two things: caching and recursion detection. Since we have to use a global evaluation stack (because there is a variety of functions that may be invoked, not just eval() but also __getitem__, and not all of them can pass along a context object), GCL evaluation is not thread safe. With regard to schemas: - A schema can be passed in from outside. The returned object will be validated to see that it conforms to the schema. The schema will be attached to the value if possible. - Some objects may contain their own schema, such as tuples. This would be out of scope of the eval() function, were it not for: - Schema validation can be disabled in an evaluation call stack. This is useful if we're evaluating a tuple only for its schema information. At that point, we're not interested if the object is value-complete. def eval(thunk, env): """Evaluate a thunk in an environment. Will defer the actual evaluation to the thunk itself, but adds two things: caching and recursion detection. Since we have to use a global evaluation stack (because there is a variety of functions that may be invoked, not just eval() but also __getitem__, and not all of them can pass along a context object), GCL evaluation is not thread safe. With regard to schemas: - A schema can be passed in from outside. The returned object will be validated to see that it conforms to the schema. The schema will be attached to the value if possible. - Some objects may contain their own schema, such as tuples. This would be out of scope of the eval() function, were it not for: - Schema validation can be disabled in an evaluation call stack. This is useful if we're evaluating a tuple only for its schema information. At that point, we're not interested if the object is value-complete. """ key = Activation.key(thunk, env) if Activation.activated(key): raise exceptions.RecursionError('Reference cycle') with Activation(key): return eval_cache.get(key, thunk.eval, env)
Delegate to our current "value provider" for the node belonging to this key. def get_node(self, key): """Delegate to our current "value provider" for the node belonging to this key.""" if key in self.names: return self.values.get_member_node(key) if hasattr(self.values, 'get_member_node') else None return self.parent.get_node(key)
create_table Manually create a temporary table for model in test data base. :return: def create_table(cls): """ create_table Manually create a temporary table for model in test data base. :return: """ schema_editor = getattr(connection, 'schema_editor', None) if schema_editor: with schema_editor() as schema_editor: schema_editor.create_model(cls) else: raw_sql, _ = connection.creation.sql_create_model( cls, no_style(), []) cls.delete_table() cursor = connection.cursor() try: cursor.execute(*raw_sql) finally: cursor.close()
delete_table Manually delete a temporary table for model in test data base. :return: def delete_table(cls): """ delete_table Manually delete a temporary table for model in test data base. :return: """ schema_editor = getattr(connection, 'schema_editor', None) if schema_editor: with connection.schema_editor() as schema_editor: schema_editor.delete_model(cls) else: cursor = connection.cursor() try: with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'unknown table') cursor.execute('DROP TABLE IF EXISTS {0}'.format(cls._meta.db_table)) finally: cursor.close()
fake_me Class or method decorator Class decorator: create temporary table for all tests in SimpleTestCase. Method decorator: create temporary model only for given test method. :param source: SimpleTestCase or test function :return: def fake_me(cls, source): """ fake_me Class or method decorator Class decorator: create temporary table for all tests in SimpleTestCase. Method decorator: create temporary model only for given test method. :param source: SimpleTestCase or test function :return: """ if source and type(source) == type and issubclass(source, SimpleTestCase): return cls._class_extension(source) elif hasattr(source, '__call__'): return cls._decorator(source) else: raise AttributeError('source - must be a SimpleTestCase subclass of function')
Decorator for capturing and simulating network communication ``debug`` : bool, optional Enables debug mode. ``overwrite`` : bool, optional Will run vcr in recording mode - overwrites any existing vcrtapes. ``playback_only`` : bool, optional Will run vcr in playback mode - will not create missing vcrtapes. ``disabled`` : bool, optional Completely disables vcr - same effect as removing the decorator. ``tape_name`` : str, optional Use given custom file name instead of an auto-generated name for the tape file. def vcr(decorated_func=None, debug=False, overwrite=False, disabled=False, playback_only=False, tape_name=None): """ Decorator for capturing and simulating network communication ``debug`` : bool, optional Enables debug mode. ``overwrite`` : bool, optional Will run vcr in recording mode - overwrites any existing vcrtapes. ``playback_only`` : bool, optional Will run vcr in playback mode - will not create missing vcrtapes. ``disabled`` : bool, optional Completely disables vcr - same effect as removing the decorator. ``tape_name`` : str, optional Use given custom file name instead of an auto-generated name for the tape file. """ def _vcr_outer(func): """ Wrapper around _vcr_inner allowing optional arguments on decorator """ def _vcr_inner(*args, **kwargs): """ The actual decorator doing a lot of monkey patching and auto magic """ if disabled or VCRSystem.disabled: # execute decorated function without VCR return func(*args, **kwargs) # prepare VCR tape if func.__module__ == 'doctest': source_filename = func.__self__._dt_test.filename file_name = os.path.splitext( os.path.basename(source_filename))[0] # check if a tests directory exists path = os.path.join(os.path.dirname(source_filename), 'tests') if os.path.exists(path): # ./test/vcrtapes/tape_name.vcr path = os.path.join(os.path.dirname(source_filename), 'tests', 'vcrtapes') else: # ./vcrtapes/tape_name.vcr path = os.path.join(os.path.dirname(source_filename), 'vcrtapes') func_name = func.__self__._dt_test.name.split('.')[-1] else: source_filename = func.__code__.co_filename file_name = os.path.splitext( os.path.basename(source_filename))[0] path = os.path.join( os.path.dirname(source_filename), 'vcrtapes') func_name = func.__name__ if tape_name: # tape file name is given - either full path is given or use # 'vcrtapes' directory if os.sep in tape_name: temp = os.path.abspath(tape_name) path = os.path.dirname(temp) if not os.path.isdir(path): os.makedirs(path) tape = os.path.join(path, '%s' % (tape_name)) else: # make sure 'vcrtapes' directory exists if not os.path.isdir(path): os.makedirs(path) # auto-generated file name tape = os.path.join(path, '%s.%s.vcr' % (file_name, func_name)) # enable VCR with VCRSystem(debug=debug): # check for tape file and determine mode if not (playback_only or VCRSystem.playback_only) and ( not os.path.isfile(tape) or overwrite or VCRSystem.overwrite): # record mode if PY2: msg = 'VCR records only in PY3 to be backward ' + \ 'compatible with PY2 - skipping VCR ' + \ 'mechanics for %s' warnings.warn(msg % (func.__name__)) # disable VCR VCRSystem.stop() # execute decorated function without VCR return func(*args, **kwargs) if VCRSystem.debug: print('\nVCR RECORDING (%s) ...' % (func_name)) VCRSystem.status = VCR_RECORD # execute decorated function value = func(*args, **kwargs) # check if vcr is actually used at all if len(VCRSystem.playlist) == 0: msg = 'no socket activity - @vcr unneeded for %s' msg = msg % (func.__name__) if VCRSystem.raise_if_not_needed: raise Exception(msg) else: warnings.warn(msg) else: # remove existing tape try: os.remove(tape) except OSError: pass # write playlist to file with gzip.open(tape, 'wb') as fh: pickle.dump(VCRSystem.playlist, fh, protocol=2) else: # playback mode if VCRSystem.debug: print('\nVCR PLAYBACK (%s) ...' % (func_name)) VCRSystem.status = VCR_PLAYBACK # if playback is requested and tape is missing: raise! if not os.path.exists(tape): msg = 'Missing VCR tape file for playback: {}' raise IOError(msg.format(tape)) # load playlist try: with gzip.open(tape, 'rb') as fh: VCRSystem.playlist = pickle.load(fh) except OSError: # support for older uncompressed tapes with open(tape, 'rb') as fh: VCRSystem.playlist = pickle.load(fh) if VCRSystem.debug: print('Loaded playlist:') for i, item in enumerate(VCRSystem.playlist): print('{:3d}: {} {} {}'.format(i, *item)) print() # execute decorated function value = func(*args, **kwargs) return value return _vcr_inner if decorated_func is None: # without arguments return _vcr_outer else: # with arguments return _vcr_outer(decorated_func)
Reset to default settings def reset(cls): """ Reset to default settings """ cls.debug = False cls.disabled = False cls.overwrite = False cls.playback_only = False cls.recv_timeout = 5 cls.recv_endmarkers = [] cls.recv_size = None
Reify values to their Python equivalents. Does recursion detection, failing when that happens. def to_python(value, seen=None): """Reify values to their Python equivalents. Does recursion detection, failing when that happens. """ seen = seen or set() if isinstance(value, framework.TupleLike): if value.ident in seen: raise RecursionException('to_python: infinite recursion while evaluating %r' % value) new_seen = seen.union([value.ident]) return {k: to_python(value[k], seen=new_seen) for k in value.exportable_keys()} if isinstance(value, dict): return {k: to_python(value[k], seen=seen) for k in value.keys()} if isinstance(value, list): return [to_python(x, seen=seen) for x in value] return value
Walks the _evaluated_ tree of the given GCL tuple. The appropriate methods of walker will be invoked for every element in the tree. def walk(value, walker, path=None, seen=None): """Walks the _evaluated_ tree of the given GCL tuple. The appropriate methods of walker will be invoked for every element in the tree. """ seen = seen or set() path = path or [] # Recursion if id(value) in seen: walker.visitRecursion(path) return # Error if isinstance(value, Exception): walker.visitError(path, value) return # List if isinstance(value, list): # Not actually a tuple, but okay recurse = walker.enterList(value, path) if not recurse: return next_walker = walker if recurse is True else recurse with TempSetAdd(seen, id(value)): for i, x in enumerate(value): walk(x, next_walker, path=path + ['[%d]' % i], seen=seen) walker.leaveList(value, path) return # Scalar if not isinstance(value, framework.TupleLike): walker.visitScalar(path, value) return # Tuple recurse = walker.enterTuple(value, path) if not recurse: return next_walker = walker if recurse is True else recurse with TempSetAdd(seen, id(value)): keys = sorted(value.keys()) for key in keys: key_path = path + [key] elm = get_or_error(value, key) walk(elm, next_walker, path=key_path, seen=seen) walker.leaveTuple(value, path)
Return a hash value that uniquely identifies the GCL value. def fingerprint(value): """Return a hash value that uniquely identifies the GCL value.""" h = hashlib.sha256() _digest(value, h) return h.digest().encode('hex')
Return the the last 2 error messages from an error stack. These error messages turns out to be the most descriptive. def compact_error(err): """Return the the last 2 error messages from an error stack. These error messages turns out to be the most descriptive. """ def err2(e): if isinstance(e, exceptions.EvaluationError) and e.inner: message, i = err2(e.inner) if i == 1: return ', '.join([e.args[0], str(e.inner)]), i + 1 else: return message, i + 1 else: return str(e), 1 return err2(err)[0]
Backpropagate through the logistic layer. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input. def backprop(self, input_data, targets, cache=None): """ Backpropagate through the logistic layer. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input. """ if cache is not None: activations = cache else: activations = self.feed_forward(input_data, prediction=False) if activations.shape != targets.shape: raise ValueError('Activations (shape = %s) and targets (shape = %s) are different sizes' % (activations.shape, targets.shape)) delta = substract_matrix(activations, targets) nan_to_zeros(delta, delta) # Gradient wrt weights df_W = linalg.dot(input_data, delta, transa='T') # Gradient wrt bias df_b = matrix_sum_out_axis(delta, 0) # Gradient wrt input df_input = linalg.dot(delta, self.W, transb='T') # L1 penalty if self.l1_penalty_weight: df_W += self.l1_penalty_weight * sign(self.W) # L2 penalty if self.l2_penalty_weight: df_W += self.l2_penalty_weight * self.W return (df_W, df_b), df_input
Return the cross entropy error def cross_entropy_error(self, input_data, targets, average=True, cache=None, prediction=False): """ Return the cross entropy error """ if cache is not None: activations = cache else: activations = \ self.feed_forward(input_data, prediction=prediction) loss = cross_entropy_logistic(activations, targets) if average: loss /= targets.shape[0] # assert np.isfinite(loss) return loss.get()
Parse comment lines and make subsequent indented lines into a code block block. def stylize_comment_block(lines): """Parse comment lines and make subsequent indented lines into a code block block. """ normal, sep, in_code = range(3) state = normal for line in lines: indented = line.startswith(' ') empty_line = line.strip() == '' if state == normal and empty_line: state = sep elif state in [sep, normal] and indented: yield '' if indented: yield '.. code-block:: javascript' yield '' yield line state = in_code else: state = normal elif state == sep and not empty_line: yield '' yield line state = normal else: yield line if state == in_code and not (indented or empty_line): sep = normal
Return two pairs of members, scalar and tuple members. The scalars will be sorted s.t. the unbound members are at the top. def sort_members(tup, names): """Return two pairs of members, scalar and tuple members. The scalars will be sorted s.t. the unbound members are at the top. """ scalars, tuples = partition(lambda x: not is_tuple_node(tup.member[x].value), names) unbound, bound = partition(lambda x: tup.member[x].value.is_unbound(), scalars) return usorted(unbound) + usorted(bound), usorted(tuples)
Resolve filename relatively against one of the given paths, if possible. def resolve_file(fname, paths): """Resolve filename relatively against one of the given paths, if possible.""" fpath = path.abspath(fname) for p in paths: spath = path.abspath(p) if fpath.startswith(spath): return fpath[len(spath) + 1:] return fname
Generate a list of strings representing the table in RST format. def generate(self): """Generate a list of strings representing the table in RST format.""" header = ' '.join('=' * self.width[i] for i in range(self.w)) lines = [ ' '.join(row[i].ljust(self.width[i]) for i in range(self.w)) for row in self.rows] return [header] + lines + [header]
Use a predicate to partition entries into false entries and true entries def partition(pred, iterable): 'Use a predicate to partition entries into false entries and true entries' # partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9 t1, t2 = itertools.tee(iterable) return list(filter(negate(pred), t1)), list(filter(pred, t2))
Select nodes according to the input selector. This can ALWAYS return multiple root elements. def select(self, model): """Select nodes according to the input selector. This can ALWAYS return multiple root elements. """ res = [] def doSelect(value, pre, remaining): if not remaining: res.append((pre, value)) else: # For the other selectors to work, value must be a Tuple or a list at this point. if not is_tuple(value) and not isinstance(value, list): return qhead, qtail = remaining[0], remaining[1:] if isinstance(qhead, tuple) and is_tuple(value): for alt in qhead: if alt in value: doSelect(value[alt], pre + [alt], qtail) elif qhead == '*': if isinstance(value, list): indices = range(len(value)) reprs = [listKey(i) for i in indices] else: indices = value.keys() reprs = indices for key, rep in zip(indices, reprs): doSelect(value[key], pre + [rep], qtail) elif isinstance(qhead, int) and isinstance(value, list): doSelect(value[qhead], pre + [listKey(qhead)], qtail) elif is_tuple(value): if qhead in value: doSelect(value[qhead], pre + [qhead], qtail) for selector in self.selectors: doSelect(model, [], selector) return QueryResult(res)
Return a deep dict of the values selected. The leaf values may still be gcl Tuples. Use util.to_python() if you want to reify everything to real Python values. def deep(self): """Return a deep dict of the values selected. The leaf values may still be gcl Tuples. Use util.to_python() if you want to reify everything to real Python values. """ self.lists = {} ret = {} for path, value in self.paths_values(): self.recursiveSet(ret, path, value) self.removeMissingValuesFromLists() return ret
List/dictionary-aware set. def ldSet(self, what, key, value): """List/dictionary-aware set.""" if isListKey(key): # Make sure we keep the indexes consistent, insert missing_values # as necessary. We do remember the lists, so that we can remove # missing values after inserting all values from all selectors. self.lists[id(what)] = what ix = listKeyIndex(key) while len(what) <= ix: what.append(missing_value) what[ix] = value else: what[key] = value return value
List-aware get. def ldGet(self, what, key): """List-aware get.""" if isListKey(key): return what[listKeyIndex(key)] else: return what[key]
List/dictinary/missing-aware contains. If the value is a "missing_value", we'll treat it as non-existent so it will be overwritten by an empty list/dict when necessary to assign child keys. def ldContains(self, what, key): """List/dictinary/missing-aware contains. If the value is a "missing_value", we'll treat it as non-existent so it will be overwritten by an empty list/dict when necessary to assign child keys. """ if isListKey(key): i = listKeyIndex(key) return i < len(what) and what[i] != missing_value else: return key in what and what[key] != missing_value
Return a list of nodes that have a recursive dependency. def find_recursive_dependency(self): """Return a list of nodes that have a recursive dependency.""" nodes_on_path = [] def helper(nodes): for node in nodes: cycle = node in nodes_on_path nodes_on_path.append(node) if cycle or helper(self.deps.get(node, [])): return True nodes_on_path.pop() return False helper(self.unordered) return nodes_on_path
Called for every tuple. If this returns False, the elements of the tuple will not be recursed over and leaveTuple() will not be called. def enterTuple(self, tuple, path): """Called for every tuple. If this returns False, the elements of the tuple will not be recursed over and leaveTuple() will not be called. """ if skip_name(path): return False node = Node(path, tuple) if self.condition.matches(node): self.unordered.append(node) return False return True
Convert with location. def convertAndMake(converter, handler): """Convert with location.""" def convertAction(loc, value): return handler(loc, converter(value)) return convertAction
Make a sequence of applications from a list of tokens. atoms is a list of atoms, which will be handled left-associatively. E.g: ['foo', [], []] == foo()() ==> Application(Application('foo', []), []) def mkApplications(location, *atoms): """Make a sequence of applications from a list of tokens. atoms is a list of atoms, which will be handled left-associatively. E.g: ['foo', [], []] == foo()() ==> Application(Application('foo', []), []) """ atoms = list(atoms) while len(atoms) > 1: atoms[0:2] = [Application(location, atoms[0], atoms[1])] # Nothing left to apply return atoms[0]
Call a function, respecting all the various types of functions that exist. def call_fn(fn, arglist, env): """Call a function, respecting all the various types of functions that exist.""" if isinstance(fn, framework.LazyFunction): # The following looks complicated, but this is necessary because you can't # construct closures over the loop variable directly. thunks = [(lambda thunk: lambda: framework.eval(thunk, env))(th) for th in arglist.values] return fn(*thunks) evaled_args = framework.eval(arglist, env) if isinstance(fn, framework.EnvironmentFunction): return fn(*evaled_args, env=env) return fn(*evaled_args)
Return the schema spec from a run-time tuple. def schema_spec_from_tuple(tup): """Return the schema spec from a run-time tuple.""" if hasattr(tup, 'get_schema_spec'): # Tuples have a TupleSchema field that contains a model of the schema return schema.from_spec({ 'fields': TupleSchemaAccess(tup), 'required': tup.get_required_fields()}) return schema.AnySchema()
Make a Schema object from the given spec. The input and output types of this function are super unclear, and are held together by ponies, wishes, duct tape, and a load of tests. See the comments for horrific entertainment. def make_schema_from(value, env): """Make a Schema object from the given spec. The input and output types of this function are super unclear, and are held together by ponies, wishes, duct tape, and a load of tests. See the comments for horrific entertainment. """ # So this thing may not need to evaluate anything[0] if isinstance(value, framework.Thunk): value = framework.eval(value, env) # We're a bit messy. In general, this has evaluated to a Schema object, but not necessarily: # for tuples and lists, we still need to treat the objects as specs. if isinstance(value, schema.Schema): return value if framework.is_tuple(value): # If it so happens that the thing is a tuple, we need to pass in the data in a bit of a # different way into the schema factory (in a dictionary with {fields, required} keys). return schema_spec_from_tuple(value) if framework.is_list(value): # [0] This list may contain tuples, which oughta be treated as specs, or already-resolved schema # objects (as returned by 'int' and 'string' literals). make_schema_from # deals with both. return schema.from_spec([make_schema_from(x, env) for x in value]) raise exceptions.EvaluationError('Can\'t make a schema from %r' % value)
Parse bracketed list. Empty list is possible, as is a trailing separator. def bracketedList(l, r, sep, expr, allow_missing_close=False): """Parse bracketed list. Empty list is possible, as is a trailing separator. """ # We may need to backtrack for lists, because of list comprehension, but not for # any of the other lists strict = l != '[' closer = sym(r) if not allow_missing_close else p.Optional(sym(r)) if strict: return sym(l) - listMembers(sep, expr) - closer else: return sym(l) + listMembers(sep, expr) + closer
Unquote the indicated string. def unquote(s): """Unquote the indicated string.""" # Ignore the left- and rightmost chars (which should be quotes). # Use the Python engine to decode the escape sequence i, N = 1, len(s) - 1 ret = [] while i < N: if s[i] == '\\' and i < N - 1: ret.append(UNQUOTE_MAP.get(s[i+1], s[i+1])) i += 2 else: ret.append(s[i]) i += 1 return ''.join(ret)
Function to put a name on a pyparsing pattern. Just for ease of debugging/tracing parse errors. def pattern(name, pattern): """Function to put a name on a pyparsing pattern. Just for ease of debugging/tracing parse errors. """ pattern.setName(name) astracing.maybe_trace(pattern) return pattern
Make the part of the grammar that depends on whether we swallow errors or not. def make_grammar(allow_errors): """Make the part of the grammar that depends on whether we swallow errors or not.""" if allow_errors in GRAMMAR_CACHE: return GRAMMAR_CACHE[allow_errors] tuple = p.Forward() catch_errors = p.Forward() catch_errors << (p.Regex('[^{};]*') - p.Optional(tuple) - p.Regex('[^;}]*')) def swallow_remainder(): if allow_errors: return pattern('swallow_remainder', p.Suppress(catch_errors)) return p.Empty() def swallow_errors(rule): """Extend the production rule by potentially eating errors. This does not return a p.NoMatch() because that messes up the error messages. """ ret = rule if allow_errors: # Synchronize on the first semicolon or the first unbalanced closing curly ret = rule | pattern('catch_errors', parseWithLocation(p.Suppress(catch_errors), UnparseableNode)) return ret class Grammar: keywords = ['and', 'or', 'not', 'if', 'then', 'else', 'include', 'inherit', 'null', 'true', 'false', 'for', 'in'] # This is a hack: this condition helps uselessly recursing into the grammar for # juxtapositions. early_abort_scan = ~p.oneOf([';', ',', ']', '}', 'for' ]) expression = pattern('expression', p.Forward()) comment = p.Regex('#') + ~p.FollowedBy(sym('.')) + p.restOfLine doc_comment = pattern('doc_comment', (sym('#.') - p.restOfLine)) quotedIdentifier = pattern('quotedIdentifier', p.QuotedString('`', multiline=False)) # - Must start with an alphascore # - May contain alphanumericscores and special characters such as : and - # - Must not end in a special character identifier = pattern('identifier', parseWithLocation(quotedIdentifier | p.Regex(r'[a-zA-Z_]([a-zA-Z0-9_:-]*[a-zA-Z0-9_])?'), Identifier)) # Variable identifier (can't be any of the keywords, which may have lower matching priority) variable = pattern('variable', ~p.MatchFirst(p.oneOf(keywords)) + pattern('identifier', parseWithLocation(identifier.copy(), Var))) # Contants integer = pattern('integer', parseWithLocation(p.Word(p.nums), convertAndMake(int, Literal))) floating = pattern('floating', parseWithLocation(p.Regex(r'\d*\.\d+'), convertAndMake(float, Literal))) dq_string = pattern('dq_string', parseWithLocation(p.QuotedString('"', escChar='\\', unquoteResults=False, multiline=True), convertAndMake(unquote, Literal))) sq_string = pattern('sq_string', parseWithLocation(p.QuotedString("'", escChar='\\', unquoteResults=False, multiline=True), convertAndMake(unquote, Literal))) boolean = pattern('boolean', parseWithLocation(p.Keyword('true') | p.Keyword('false'), convertAndMake(mkBool, Literal))) null = pattern('null', parseWithLocation(p.Keyword('null'), Null)) # List list_ = pattern('list', parseWithLocation(bracketedList('[', ']', ',', expression), List)) # Tuple inherit = pattern('inherit', (kw('inherit') - p.ZeroOrMore(variable)).setParseAction(inheritNodes)) schema_spec = pattern('schema_spec', parseWithLocation(p.Optional(p.Keyword('private').setParseAction(lambda: True), default=False) - p.Optional(p.Keyword('required').setParseAction(lambda: True), default=False) - p.Optional(expression, default=any_schema_expr), MemberSchemaNode)) optional_schema = pattern('optional_schema', p.Optional(p.Suppress(':') - schema_spec, default=no_schema)) expression_value = pattern('expression_value', sym('=') - swallow_errors(expression)) void_value = pattern('void_value', parseWithLocation(p.FollowedBy(sym(';') | sym('}')), lambda loc: Void(loc, 'nonameyet'))) member_value = pattern('member_value', swallow_errors(expression_value | void_value)) named_member = pattern('named_member', parseWithLocation(identifier - optional_schema - member_value - swallow_remainder(), TupleMemberNode)) documented_member = pattern('documented_member', parseWithLocation(parseWithLocation(p.ZeroOrMore(doc_comment), DocComment) + named_member, attach_doc_comment)) tuple_member = early_abort_scan + pattern('tuple_member', swallow_errors(inherit | documented_member) - swallow_remainder()) ErrorAwareTupleNode = functools.partial(TupleNode, allow_errors) tuple_members = pattern('tuple_members', parseWithLocation(listMembers(';', tuple_member), ErrorAwareTupleNode)) tuple << pattern('tuple', parseWithLocation(bracketedList('{', '}', ';', tuple_member, allow_missing_close=allow_errors), ErrorAwareTupleNode)) # Argument list will live by itself as a atom. Actually, it's a tuple, but we # don't call it that because we use that term for something else already :) arg_list = pattern('arg_list', bracketedList('(', ')', ',', expression).setParseAction(ArgList)) parenthesized_expr = pattern('parenthesized_expr', (sym('(') - expression - ')').setParseAction(head)) unary_op = pattern('unary_op', (p.oneOf(' '.join(functions.unary_operators.keys())) - expression).setParseAction(mkUnOp)) if_then_else = pattern('if_then_else', parseWithLocation(kw('if') + expression + kw('then') + expression + kw('else') + expression, Condition)) list_comprehension = pattern('list_comprehension', parseWithLocation(sym('[') + expression + kw('for') + variable + kw('in') + expression + p.Optional(kw('if') + expression) + sym(']'), ListComprehension)) # We don't allow space-application here # Now our grammar is becoming very dirty and hackish deref = pattern('deref', p.Forward()) include = pattern('include', parseWithLocation(kw('include') - deref, Include)) atom = pattern('atom', (tuple | sq_string | dq_string | variable | floating | integer | boolean | list_ | null | unary_op | parenthesized_expr | if_then_else | include | list_comprehension )) # We have two different forms of function application, so they can have 2 # different precedences. This one: fn(args), which binds stronger than # dereferencing (fn(args).attr == (fn(args)).attr) applic1 = pattern('applic1', parseWithLocation(atom - p.ZeroOrMore(arg_list), mkApplications)) # Dereferencing of an expression (obj.bar) deref << parseWithLocation(applic1 - p.ZeroOrMore(p.Suppress('.') - swallow_errors(identifier)), mkDerefs) # All binary operators at various precedence levels go here: # This piece of code does the moral equivalent of: # # T = F*F | F/F | F # E = T+T | T-T | T # # etc. term = deref for op_level in functions.binary_operators_before_juxtaposition: operator_syms = list(op_level.keys()) term = (term - p.ZeroOrMore(p.oneOf(operator_syms) - term)).setParseAction(mkBinOps) # Juxtaposition function application (fn arg), must be 1-arg every time applic2 = pattern('applic2', parseWithLocation(term - p.ZeroOrMore(early_abort_scan + term), mkApplications)) term = applic2 for op_level in functions.binary_operators_after_juxtaposition: operator_syms = list(op_level.keys()) term = (term - p.ZeroOrMore(p.oneOf(operator_syms) - term)).setParseAction(mkBinOps) expression << term # Two entry points: start at an arbitrary expression, or expect the top-level # scope to be a tuple. start = pattern('start', expression.copy().ignore(comment)) start_tuple = tuple_members.ignore(comment) GRAMMAR_CACHE[allow_errors] = Grammar return Grammar
Load but don't evaluate a GCL expression from a string. def reads(s, filename, loader, implicit_tuple, allow_errors): """Load but don't evaluate a GCL expression from a string.""" try: the_context.filename = filename the_context.loader = loader grammar = make_grammar(allow_errors=allow_errors) root = grammar.start_tuple if implicit_tuple else grammar.start return root.parseWithTabs().parseString(s, parseAll=True)[0] except (p.ParseException, p.ParseSyntaxException) as e: loc = SourceLocation(s, find_offset(s, e.lineno, e.col)) raise exceptions.ParseError(the_context.filename, loc, e.msg)
Find all AST nodes at the given filename, line and column. def find_tokens(self, q): """Find all AST nodes at the given filename, line and column.""" found_me = [] if hasattr(self, 'location'): if self.location.contains(q): found_me = [self] elif self._found_by(q): found_me = [self] cs = [n.find_tokens(q) for n in self._children()] return found_me + list(itertools.chain(*cs))
Instantiate the Tuple based on this TupleNode. def _make_tuple(self, env): """Instantiate the Tuple based on this TupleNode.""" t = runtime.Tuple(self, env, dict2tuple) # A tuple also provides its own schema spec schema = schema_spec_from_tuple(t) t.attach_schema(schema) return t
Apply a tuple to something else. def applyTuple(self, tuple, right, env): """Apply a tuple to something else.""" if len(right) != 1: raise exceptions.EvaluationError('Tuple (%r) can only be applied to one argument, got %r' % (self.left, self.right)) right = right[0] return tuple(right)
Apply a list to something else. def applyIndex(self, lst, right): """Apply a list to something else.""" if len(right) != 1: raise exceptions.EvaluationError('%r can only be applied to one argument, got %r' % (self.left, self.right)) right = right[0] if isinstance(right, int): return lst[right] raise exceptions.EvaluationError("Can't apply %r to argument (%r): integer expected, got %r" % (self.left, self.right, right))
First step of Nesterov momentum method: take step in direction of accumulated gradient def pre_gradient_update(self): """ First step of Nesterov momentum method: take step in direction of accumulated gradient """ updates = zip(self.velocity, self.model.n_parameters * [1.]) self.model.update_parameters(updates)
Return the classification error rate def class_error(self, input_data, targets, average=True, cache=None, prediction=False): """ Return the classification error rate """ if cache is not None: activations = cache else: activations = \ self.feed_forward(input_data, prediction=prediction) targets = targets.get().argmax(1) class_error = np.sum(activations.get().argmax(1) != targets) if average: class_error = float(class_error) / targets.shape[0] return class_error
The KL divergence error def kl_error(self, input_data, targets, average=True, cache=None, prediction=True): """ The KL divergence error """ if cache is not None: activations = cache else: activations = \ self.feed_forward(input_data, prediction=prediction) targets_non_nan = gpuarray.empty_like(targets) nan_to_zeros(targets, targets_non_nan) kl_error = gpuarray.sum(targets_non_nan * (cumath.log(targets_non_nan + eps) - cumath.log(activations + eps))) if average: kl_error /= targets.shape[0] return kl_error.get()
Dot product of two arrays. For 1D arrays, this function computes the inner product. For 2D arrays of shapes `(m, k)` and `(k, n)`, it computes the matrix product; the result has shape `(m, n)`. Parameters ---------- x_gpu : pycuda.gpuarray.GPUArray Input array. y_gpu : pycuda.gpuarray.GPUArray Input array. transa : char If 'T', compute the product of the transpose of `x_gpu`. If 'C', compute the product of the Hermitian of `x_gpu`. transb : char If 'T', compute the product of the transpose of `y_gpu`. If 'C', compute the product of the Hermitian of `y_gpu`. handle : int CUBLAS context. If no context is specified, the default handle from `scikits.cuda.misc._global_cublas_handle` is used. Returns ------- c_gpu : pycuda.gpuarray.GPUArray, float{32,64}, or complex{64,128} Inner product of `x_gpu` and `y_gpu`. When the inputs are 1D arrays, the result will be returned as a scalar. Notes ----- The input matrices must all contain elements of the same data type. Examples -------- >>> import pycuda.gpuarray as gpuarray >>> import pycuda.autoinit >>> import numpy as np >>> import linalg >>> import misc >>> linalg.init() >>> a = np.asarray(np.random.rand(4, 2), np.float32) >>> b = np.asarray(np.random.rand(2, 2), np.float32) >>> a_gpu = gpuarray.to_gpu(a) >>> b_gpu = gpuarray.to_gpu(b) >>> c_gpu = linalg.dot(a_gpu, b_gpu) >>> np.allclose(np.dot(a, b), c_gpu.get()) True >>> d = np.asarray(np.random.rand(5), np.float32) >>> e = np.asarray(np.random.rand(5), np.float32) >>> d_gpu = gpuarray.to_gpu(d) >>> e_gpu = gpuarray.to_gpu(e) >>> f = linalg.dot(d_gpu, e_gpu) >>> np.allclose(np.dot(d, e), f) True def dot(x_gpu, y_gpu, transa='N', transb='N', handle=None, target=None): """ Dot product of two arrays. For 1D arrays, this function computes the inner product. For 2D arrays of shapes `(m, k)` and `(k, n)`, it computes the matrix product; the result has shape `(m, n)`. Parameters ---------- x_gpu : pycuda.gpuarray.GPUArray Input array. y_gpu : pycuda.gpuarray.GPUArray Input array. transa : char If 'T', compute the product of the transpose of `x_gpu`. If 'C', compute the product of the Hermitian of `x_gpu`. transb : char If 'T', compute the product of the transpose of `y_gpu`. If 'C', compute the product of the Hermitian of `y_gpu`. handle : int CUBLAS context. If no context is specified, the default handle from `scikits.cuda.misc._global_cublas_handle` is used. Returns ------- c_gpu : pycuda.gpuarray.GPUArray, float{32,64}, or complex{64,128} Inner product of `x_gpu` and `y_gpu`. When the inputs are 1D arrays, the result will be returned as a scalar. Notes ----- The input matrices must all contain elements of the same data type. Examples -------- >>> import pycuda.gpuarray as gpuarray >>> import pycuda.autoinit >>> import numpy as np >>> import linalg >>> import misc >>> linalg.init() >>> a = np.asarray(np.random.rand(4, 2), np.float32) >>> b = np.asarray(np.random.rand(2, 2), np.float32) >>> a_gpu = gpuarray.to_gpu(a) >>> b_gpu = gpuarray.to_gpu(b) >>> c_gpu = linalg.dot(a_gpu, b_gpu) >>> np.allclose(np.dot(a, b), c_gpu.get()) True >>> d = np.asarray(np.random.rand(5), np.float32) >>> e = np.asarray(np.random.rand(5), np.float32) >>> d_gpu = gpuarray.to_gpu(d) >>> e_gpu = gpuarray.to_gpu(e) >>> f = linalg.dot(d_gpu, e_gpu) >>> np.allclose(np.dot(d, e), f) True """ if handle is None: handle = _global_cublas_handle if len(x_gpu.shape) == 1 and len(y_gpu.shape) == 1: if x_gpu.size != y_gpu.size: raise ValueError('arrays must be of same length: ' 'x_gpu.size = %d, y_gpu.size = %d' % (x_gpu.size, y_gpu.size)) # Compute inner product for 1D arrays: if (x_gpu.dtype == np.complex64 and y_gpu.dtype == np.complex64): cublas_func = cublas.cublasCdotu elif (x_gpu.dtype == np.float32 and y_gpu.dtype == np.float32): cublas_func = cublas.cublasSdot elif (x_gpu.dtype == np.complex128 and y_gpu.dtype == np.complex128): cublas_func = cublas.cublasZdotu elif (x_gpu.dtype == np.float64 and y_gpu.dtype == np.float64): cublas_func = cublas.cublasDdot else: raise ValueError('unsupported combination of input types: ' 'x_gpu.dtype = %s, y_gpu.dtype = %s' % (str(x_gpu.dtype), str(y_gpu.dtype))) return cublas_func(handle, x_gpu.size, x_gpu.gpudata, 1, y_gpu.gpudata, 1) else: # Get the shapes of the arguments (accounting for the # possibility that one of them may only have one dimension): x_shape = x_gpu.shape y_shape = y_gpu.shape if len(x_shape) == 1: x_shape = (1, x_shape[0]) if len(y_shape) == 1: y_shape = (1, y_shape[0]) # Perform matrix multiplication for 2D arrays: if (x_gpu.dtype == np.complex64 and y_gpu.dtype == np.complex64): cublas_func = cublas.cublasCgemm alpha = np.complex64(1.0) beta = np.complex64(0.0) elif (x_gpu.dtype == np.float32 and y_gpu.dtype == np.float32): cublas_func = cublas.cublasSgemm alpha = np.float32(1.0) beta = np.float32(0.0) elif (x_gpu.dtype == np.complex128 and y_gpu.dtype == np.complex128): cublas_func = cublas.cublasZgemm alpha = np.complex128(1.0) beta = np.complex128(0.0) elif (x_gpu.dtype == np.float64 and y_gpu.dtype == np.float64): cublas_func = cublas.cublasDgemm alpha = np.float64(1.0) beta = np.float64(0.0) else: raise ValueError('unsupported combination of input types: ' 'x_gpu.dtype = %s, y_gpu.dtype = %s' % (str(x_gpu.dtype), str(y_gpu.dtype))) transa = lower(transa) transb = lower(transb) if transb in ['t', 'c']: m, k = y_shape elif transb in ['n']: k, m = y_shape else: raise ValueError('invalid value "%s" for transb' % transb) if transa in ['t', 'c']: l, n = x_shape elif transa in ['n']: n, l = x_shape else: raise ValueError('invalid value "%s" for transa' % transa) if l != k: raise ValueError('objects are not aligned: x_shape = %s, y_shape = %s' % (x_shape, y_shape)) if transb == 'n': lda = max(1, m) else: lda = max(1, k) if transa == 'n': ldb = max(1, k) else: ldb = max(1, n) ldc = max(1, m) # Note that the desired shape of the output matrix is the transpose # of what CUBLAS assumes: if target is None: target = gpuarray.empty((n, ldc), x_gpu.dtype, allocator=memory_pool.allocate) cublas_func(handle, transb, transa, m, n, k, alpha, y_gpu.gpudata, lda, x_gpu.gpudata, ldb, beta, target.gpudata, ldc) return target
Create a temp file, write our PID into it. def make_tempfile(data=None): "Create a temp file, write our PID into it." with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp: temp.write(six.text_type(data if data is not None else os.getpid())) return temp.name
Return a list where each element contains the parameters for a task. def parameters(self): """Return a list where each element contains the parameters for a task. """ parameters = [] for task in self.tasks: parameters.extend(task.parameters) return parameters
Update the parameters. ``value`` must be a list/tuple of length ``MultitaskTopLayer.n_tasks``, each element of which must have the correct number of parameters for the task. def parameters(self, value): """Update the parameters. ``value`` must be a list/tuple of length ``MultitaskTopLayer.n_tasks``, each element of which must have the correct number of parameters for the task. """ assert len(value) == self.n_parameters i = 0 for task in self.tasks: task.parameters = value[i:i + task.n_parameters] i += task.n_parameters
Call ``feed_forward`` for each task and combine the activations. Passes ``input_data`` to all tasks and returns the activations as a list. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are multiplied by 1 - dropout if the layer uses dropout. **Returns:** activations : list of ``GPUArray`` The activations of the output units, one element for each task. def feed_forward(self, input_data, prediction=False): """Call ``feed_forward`` for each task and combine the activations. Passes ``input_data`` to all tasks and returns the activations as a list. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are multiplied by 1 - dropout if the layer uses dropout. **Returns:** activations : list of ``GPUArray`` The activations of the output units, one element for each task. """ activations = [] for task in self.tasks: activations_task = task.feed_forward(input_data, prediction) activations.append(activations_task) return activations
Compute gradients for each task and combine the results. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : list Gradients with respect to the weights and biases for each task df_input : ``GPUArray`` Gradients with respect to the input, obtained by adding the gradients with respect to the inputs from each task, weighted by ``MultitaskTopLayer.task_weights``. def backprop(self, input_data, targets, cache=None): """Compute gradients for each task and combine the results. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : list Gradients with respect to the weights and biases for each task df_input : ``GPUArray`` Gradients with respect to the input, obtained by adding the gradients with respect to the inputs from each task, weighted by ``MultitaskTopLayer.task_weights``. """ df_input = gpuarray.zeros_like(input_data) if cache is None: cache = self.n_tasks * [None] gradients = [] for targets_task, cache_task, task, task_weight in \ izip(targets, cache, self.tasks, self.task_weights): gradients_task, df_input_task = \ task.backprop(input_data, targets_task, cache_task) df_input = df_input.mul_add(1., df_input_task, task_weight) gradients.extend(gradients_task) return gradients, df_input
Computes the cross-entropy error for all tasks. def cross_entropy_error(self, input_data, targets, average=True, cache=None, prediction=False, sum_errors=True): """ Computes the cross-entropy error for all tasks. """ loss = [] if cache is None: cache = self.n_tasks * [None] for targets_task, cache_task, task in \ izip(targets, cache, self.tasks): loss.append(task.cross_entropy_error( input_data, targets_task, average=average, cache=cache_task, prediction=prediction)) if sum_errors: return sum(loss) else: return loss
Update the parameters. ``value`` must have the shape ``(weights, biases)`` def parameters(self, value): """Update the parameters. ``value`` must have the shape ``(weights, biases)``""" self.W = value[0] if isinstance(value[0], GPUArray) else \ gpuarray.to_gpu(value[0]) self.b = value[1] if isinstance(value[0], GPUArray) else \ gpuarray.to_gpu(value[1])
Returns a dictionary describing the architecture of the layer. def architecture(self): """Returns a dictionary describing the architecture of the layer.""" arch = {'class': self.__class__, 'n_in': self.n_in, 'n_units': self.n_units, 'activation_function': self.activation_function if hasattr(self, 'activation_function') else None} return arch
Propagate forward through the layer **Parameters:** input_data : ``GPUArray`` Input data to compute activations for. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are multiplied by 1 - dropout if the layer uses dropout. **Returns:** activations : ``GPUArray`` The activations of the hidden units. def feed_forward(self, input_data, prediction=False): """Propagate forward through the layer **Parameters:** input_data : ``GPUArray`` Input data to compute activations for. prediction : bool, optional Whether to use prediction model. Only relevant when using dropout. If true, then weights are multiplied by 1 - dropout if the layer uses dropout. **Returns:** activations : ``GPUArray`` The activations of the hidden units. """ if input_data.shape[1] != self.W.shape[0]: raise ValueError('Number of outputs from previous layer (%d) ' 'does not match number of inputs to this layer (%d)' % (input_data.shape[1], self.W.shape[0])) activations = linalg.dot(input_data, self.W) activations = add_vec_to_mat(activations, self.b, inplace=True) self.f(activations) if self.dropout > 0: if prediction: activations *= 1 - self.dropout else: dropout_mask = sample_dropout_mask(activations, self.dropout) return activations, dropout_mask return (activations,)
Backpropagate through the hidden layer **Parameters:** input_data : ``GPUArray`` Input data to compute activations for. df_output : ``GPUArray`` Gradients with respect to the activations of this layer (received from the layer above). cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input. def backprop(self, input_data, df_output, cache=None): """ Backpropagate through the hidden layer **Parameters:** input_data : ``GPUArray`` Input data to compute activations for. df_output : ``GPUArray`` Gradients with respect to the activations of this layer (received from the layer above). cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input. """ # Get cache if it wasn't provided if cache is None: cache = self.feed_forward(input_data, prediction=False) if len(cache) == 2: activations, dropout_mask = cache else: activations = cache[0] # Multiply the binary mask with the incoming gradients if self.dropout > 0 and dropout_mask is not None: apply_dropout_mask(df_output, dropout_mask) # Get gradient wrt activation function df_activations = self.df(activations) delta = mult_matrix(df_activations, df_output) # Gradient wrt weights df_W = linalg.dot(input_data, delta, transa='T') # Gradient wrt bias df_b = matrix_sum_out_axis(delta, 0) # Gradient wrt inputs df_input = linalg.dot(delta, self.W, transb='T') # L1 weight decay if self.l1_penalty_weight: df_W += self.l1_penalty_weight * sign(self.W) # L2 weight decay if self.l2_penalty_weight: df_W += self.l2_penalty_weight * self.W return (df_W, df_b), df_input
Initialize CUBLAS. Initializes CUBLAS and creates a handle to a structure holding the CUBLAS library context. Returns ------- handle : void_p CUBLAS context. def cublasCreate(): """ Initialize CUBLAS. Initializes CUBLAS and creates a handle to a structure holding the CUBLAS library context. Returns ------- handle : void_p CUBLAS context. """ handle = ctypes.c_void_p() status = _libcublas.cublasCreate_v2(ctypes.byref(handle)) cublasCheckStatus(status) return handle.value
Release CUBLAS resources. Releases hardware resources used by CUBLAS. Parameters ---------- handle : void_p CUBLAS context. def cublasDestroy(handle): """ Release CUBLAS resources. Releases hardware resources used by CUBLAS. Parameters ---------- handle : void_p CUBLAS context. """ status = _libcublas.cublasDestroy_v2(ctypes.c_void_p(handle)) cublasCheckStatus(status)
Get CUBLAS version. Returns version number of installed CUBLAS libraries. Parameters ---------- handle : void_p CUBLAS context. Returns ------- version : int CUBLAS version. def cublasGetVersion(handle): """ Get CUBLAS version. Returns version number of installed CUBLAS libraries. Parameters ---------- handle : void_p CUBLAS context. Returns ------- version : int CUBLAS version. """ version = ctypes.c_int() status = _libcublas.cublasGetVersion_v2(handle, ctypes.byref(version)) cublasCheckStatus(status) return version.value
Set current CUBLAS library stream. Parameters ---------- handle : id CUBLAS context. id : int Stream ID. def cublasSetStream(handle, id): """ Set current CUBLAS library stream. Parameters ---------- handle : id CUBLAS context. id : int Stream ID. """ status = _libcublas.cublasSetStream_v2(handle, id) cublasCheckStatus(status)
Set current CUBLAS library stream. Parameters ---------- handle : void_p CUBLAS context. Returns ------- id : int Stream ID. def cublasGetStream(handle): """ Set current CUBLAS library stream. Parameters ---------- handle : void_p CUBLAS context. Returns ------- id : int Stream ID. """ id = ctypes.c_int() status = _libcublas.cublasGetStream_v2(handle, ctypes.byref(id)) cublasCheckStatus(status) return id.value
Matrix-vector product for real general banded matrix. def cublasSgbmv(handle, trans, m, n, kl, ku, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for real general banded matrix. """ status = _libcublas.cublasSgbmv_v2(handle, trans, m, n, kl, ku, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_float(beta)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for complex general banded matrix. def cublasCgbmv(handle, trans, m, n, kl, ku, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for complex general banded matrix. """ status = _libcublas.cublasCgbmv_v2(handle, trans, m, n, kl, ku, ctypes.byref(cuda.cuFloatComplex(alpha.real, alpha.imag)), int(A), lda, int(x), incx, ctypes.byref(cuda.cuFloatComplex(beta.real, beta.imag)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for complex general banded matrix. def cublasZgbmv(handle, trans, m, n, kl, ku, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for complex general banded matrix. """ status = _libcublas.cublasZgbmv_v2(handle, trans, m, n, kl, ku, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(A), lda, int(x), incx, ctypes.byref(cuda.cuDoubleComplex(beta.real, beta.imag)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for real general matrix. def cublasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for real general matrix. """ status = _libcublas.cublasSgemv_v2(handle, _CUBLAS_OP[trans], m, n, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_float(beta)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for real general matrix. def cublasDgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for real general matrix. """ status = _libcublas.cublasDgemv_v2(handle, _CUBLAS_OP[trans], m, n, ctypes.byref(ctypes.c_double(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_double(beta)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for complex general matrix. def cublasCgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for complex general matrix. """ status = _libcublas.cublasCgemv_v2(handle, _CUBLAS_OP[trans], m, n, ctypes.byref(cuda.cuFloatComplex(alpha.real, alpha.imag)), int(A), lda, int(x), incx, ctypes.byref(cuda.cuFloatComplex(beta.real, beta.imag)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for complex general matrix. def cublasZgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for complex general matrix. """ status = _libcublas.cublasZgemv_v2(handle, _CUBLAS_OP[trans], m, n, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(A), lda, int(x), incx, ctypes.byref(cuda.cuDoubleComplex(beta.real, beta.imag)), int(y), incy) cublasCheckStatus(status)
Rank-1 operation on real general matrix. def cublasSger(handle, m, n, alpha, x, incx, y, incy, A, lda): """ Rank-1 operation on real general matrix. """ status = _libcublas.cublasSger_v2(handle, m, n, ctypes.byref(ctypes.c_float(alpha)), int(x), incx, int(y), incy, int(A), lda) cublasCheckStatus(status)
Rank-1 operation on real general matrix. def cublasDger(handle, m, n, alpha, x, incx, y, incy, A, lda): """ Rank-1 operation on real general matrix. """ status = _libcublas.cublasDger_v2(handle, m, n, ctypes.byref(ctypes.c_double(alpha)), int(x), incx, int(y), incy, int(A), lda) cublasCheckStatus(status)
Rank-1 operation on complex general matrix. def cublasCgeru(handle, m, n, alpha, x, incx, y, incy, A, lda): """ Rank-1 operation on complex general matrix. """ status = _libcublas.cublasCgeru_v2(handle, m, n, ctypes.byref(cuda.cuFloatComplex(alpha.real, alpha.imag)), int(x), incx, int(y), incy, int(A), lda) cublasCheckStatus(status)
Rank-1 operation on complex general matrix. def cublasZgerc(handle, m, n, alpha, x, incx, y, incy, A, lda): """ Rank-1 operation on complex general matrix. """ status = _libcublas.cublasZgerc_v2(handle, m, n, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(x), incx, int(y), incy, int(A), lda) cublasCheckStatus(status)
Matrix-vector product for real symmetric-banded matrix. def cublasSsbmv(handle, uplo, n, k, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for real symmetric-banded matrix. """ status = _libcublas.cublasSsbmv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, k, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_float(beta)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for real symmetric-banded matrix. def cublasDsbmv(handle, uplo, n, k, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for real symmetric-banded matrix. """ status = _libcublas.cublasDsbmv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, k, ctypes.byref(ctypes.c_double(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_double(beta)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for real symmetric-packed matrix. def cublasSspmv(handle, uplo, n, alpha, AP, x, incx, beta, y, incy): """ Matrix-vector product for real symmetric-packed matrix. """ status = _libcublas.cublasSspmv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_float(alpha)), ctypes.byref(ctypes.c_float(AP)), int(x), incx, ctypes.byref(ctypes.c_float(beta)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for real symmetric-packed matrix. def cublasDspmv(handle, uplo, n, alpha, AP, x, incx, beta, y, incy): """ Matrix-vector product for real symmetric-packed matrix. """ status = _libcublas.cublasDspmv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_double(alpha)), ctypes.byref(ctypes.c_double(AP)), int(x), incx, ctypes.byref(ctypes.c_double(beta)), int(y), incy) cublasCheckStatus(status)
Rank-1 operation on real symmetric-packed matrix. def cublasSspr(handle, uplo, n, alpha, x, incx, AP): """ Rank-1 operation on real symmetric-packed matrix. """ status = _libcublas.cublasSspr_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_float(alpha)), int(x), incx, int(AP)) cublasCheckStatus(status)
Rank-1 operation on real symmetric-packed matrix. def cublasDspr(handle, uplo, n, alpha, x, incx, AP): """ Rank-1 operation on real symmetric-packed matrix. """ status = _libcublas.cublasDspr_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_double(alpha)), int(x), incx, int(AP)) cublasCheckStatus(status)
Rank-2 operation on real symmetric-packed matrix. def cublasSspr2(handle, uplo, n, alpha, x, incx, y, incy, AP): """ Rank-2 operation on real symmetric-packed matrix. """ status = _libcublas.cublasSspr2_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_float(alpha)), int(x), incx, int(y), incy, int(AP)) cublasCheckStatus(status)
Rank-2 operation on real symmetric-packed matrix. def cublasDspr2(handle, uplo, n, alpha, x, incx, y, incy, AP): """ Rank-2 operation on real symmetric-packed matrix. """ status = _libcublas.cublasDspr2_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_double(alpha)), int(x), incx, int(y), incy, int(AP)) cublasCheckStatus(status)
Matrix-vector product for real symmetric matrix. def cublasSsymv(handle, uplo, n, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for real symmetric matrix. """ status = _libcublas.cublasSsymv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_float(beta)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for real symmetric matrix. def cublasDsymv(handle, uplo, n, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for real symmetric matrix. """ status = _libcublas.cublasDsymv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_double(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_double(beta)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for complex symmetric matrix. def cublasCsymv(handle, uplo, n, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for complex symmetric matrix. """ status = _libcublas.cublasCsymv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(cuda.cuFloatComplex(alpha.real, alpha.imag)), int(A), lda, int(x), incx, ctypes.byref(cuda.cuFloatComplex(beta.real, beta.imag)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for complex symmetric matrix. def cublasZsymv(handle, uplo, n, alpha, A, lda, x, incx, beta, y, incy): """ Matrix-vector product for complex symmetric matrix. """ status = _libcublas.cublasZsymv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(A), lda, int(x), incx, ctypes.byref(cuda.cuDoubleComplex(beta.real, beta.imag)), int(y), incy) cublasCheckStatus(status)
Rank-1 operation on real symmetric matrix. def cublasSsyr(handle, uplo, n, alpha, x, incx, A, lda): """ Rank-1 operation on real symmetric matrix. """ status = _libcublas.cublasSsyr_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_float(alpha)), int(x), incx, int(A), lda) cublasCheckStatus(status)
Rank-1 operation on real symmetric matrix. def cublasDsyr(handle, uplo, n, alpha, x, incx, A, lda): """ Rank-1 operation on real symmetric matrix. """ status = _libcublas.cublasDsyr_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_double(alpha)), int(x), incx, int(A), lda) cublasCheckStatus(status)
Rank-1 operation on complex symmetric matrix. def cublasCsyr(handle, uplo, n, alpha, x, incx, A, lda): """ Rank-1 operation on complex symmetric matrix. """ status = _libcublas.cublasCsyr_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(cuda.cuFloatComplex(alpha.real, alpha.imag)), int(x), incx, int(A), lda) cublasCheckStatus(status)