code
stringlengths
1
1.72M
language
stringclasses
1 value
# Copyright 2012 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import numpy class ExemplarSet: """An interface for a set of feature vectors, referred to as exemplars - whilst a data matrix will typically be used this allows the possibility of exemplars for which that is impractical, i.e. calculating them on the fly if there is an unreasonable number of features within each exemplar. Also supports the concept of channels, which ties in with the test generation so you can have different generators for each channel. For trainning the 'answer' is stored in its own channel (Note that, because that channel will not exist for novel features it should always be the last channel, so that indexing is consistant, unless it is replaced with a dummy channel.), the type of which will depend on the problem being solved. Also allows the mixing of both continuous and discrete values.""" def exemplars(self): """Returns how many exemplars are provided.""" raise NotImplementedError def channels(self): """Returns how many channels of features are provided. They are indexed {0, ..., return-1}.""" raise NotImplementedError def name(self, channel): """Returns a string as a name for a channel - this is optional and provided more for human usage. Will return None if the channel in question has no name.""" return None def dtype(self, channel): """Returns the numpy dtype used for the given channel. numpy.float32 and numpy.int32 are considered to be the standard choices.""" raise NotImplementedError def features(self, channel): """Returns how many features exist for each exemplar for the given channel - they can then be indexed {0, .., return-1} for the given channel.""" raise NotImplementedError def __getitem__(self, index): """Actual data access is via the [] operator, with 3 entities - [channel, exemplar(s), feature(s)]. channel must index the channel, and indicates which channel to get the data from - must always be an integer. exemplars(s) indicates which examples to return and features(s) which features to return for each of the exemplars. For both of these 3 indexing types must be supported - a single integer, a slice, or a numpy array of indexing integers. For indexing integers the system is designed to work such that repetitions are never used, though that is in fact supported most of the time by actual implimentations. The return value must always have the type indicated by the dtype method for the channel in question. If both are indexed with an integer then it will return a single number (But still of the numpy dtype.); if only 1 is an integer a 1D numpy array; and if neither are integers a 2D numpy array, indexed [relative exemplar, relative feature]. Note that this last requirement is not the numpy default, which would actually continue to give a 1D array rather than the 2D subset defined by two sets of indicies.""" raise NotImplementedError def codeC(self, channel, name): """Returns a dictionary containning all the entities needed to access the given channel of the exemplar from within C, using name to provide a unique string to avoid namespace clashes. Will raise a NotImplementedError if not avaliable. `['type'] = The C type of the channel, often 'float'. ['input'] = The input object to be passed into the C code, must be protected from any messing around that scipy.weave might do. ['itype'] = The input type in C, as a string, usually 'PyObject *' or 'PyArrayObject *'. ['get'] = Returns code for a function to get values from the channel of the exemplar; has calling convention <type> <name>_get(<itype> input, int exemplar, int feature). ['exemplars'] = Code for a function to get the number of exemplars; has calling convention int <name>_exemplars(<itype> input). Will obviously return the same value for all channels, so can be a bit redundant. ['features'] = Code for a function that returns how many features the channel has; calling convention is int <name>_features(<itype> input). ['name'] is also provided, which contains the base name handed to this method, for conveniance.`""" raise NotImplementedError def listCodeC(self, name): """Helper method - returns a tuple indexed by channel that gives the dictionary returned by codeC for each channel in this exemplar. It generates the names using the provided name by adding the number indexing the channel to the end. Happens to by the required input elsewhere.""" return tuple(map(lambda c: self.codeC(c, name+str(c)), xrange(self.channels()))) def tupleInputC(self): """Helper method, that can be overriden - returns a tuple containing the inputs needed for the exemplar.""" return tuple(map(lambda c: self.codeC(c, name+str(c))['input'], xrange(self.channels()))) def key(self): """Provides a unique string that can be used to hash the results of codeC, to avoid repeated generation. Must be implimented if codeC is implimented.""" raise NotImplementedError class MatrixES(ExemplarSet): """The most common exemplar set - basically what you use when all the feature vectors can be computed and then stored in memory without issue. Contains a data matrix for each channel, where these are provided by the user.""" def __init__(self, *args): """Optionally allows you to provide a list of numpy data matrices to by the channels data matrices. Alternativly you can use the add method to add them, one after another, post construction, or some combination of both. All data matrices must be 2D numpy arrays, with the first dimension, indexing the exemplar, being the same size in all cases. (If there is only 1 exemplar then it will accept 1D arrays.)""" self.dm = list(map(lambda a: a.reshape((1,-1)) if len(a.shape)==1 else a, args)) for dm in self.dm: assert(len(dm.shape)==2) assert(dm.shape[0]==self.dm[0].shape[0]) def add(self, dm): """Adds a new data matrix of information as another channel. Returns its channel index. If given a 1D matrix assumes that there is only one exemplar and adjusts it accordingly.""" if len(dm.shape)==1: dm = dm.reshape((1,-1)) assert(len(dm.shape)==2) self.dm.append(dm) assert(dm.shape[0]==self.dm[0].shape[0]) return len(self.dm)-1 def append(self, *args): """Allows you to add exemplars to the structure, by providing a set of data matrices that align with those contained, which contain the new exemplars. Note that this is slow and generally ill advised. If adding a single new feature the arrays can be 1D.""" assert(len(args)==len(self.dm)) for i, (prev, extra) in enumerate(zip(self.dm, args)): if len(extra.shape)==1: extra = extra.reshape((1,-1)) self.dm[i] = numpy.append(prev, extra, 0) def exemplars(self): return self.dm[0].shape[0] def channels(self): return len(self.dm) def dtype(self, channel): return self.dm[channel].dtype def features(self, channel): return self.dm[channel].shape[1] def __getitem__(self, index): a = numpy.asarray(index[1]).reshape(-1) b = numpy.asarray(index[2]).reshape(-1) if a.shape[0]==1 or b.shape[0]==1: return self.dm[index[0]][index[1],index[2]] else: return self.dm[index[0]][numpy.ix_(a,b)] def codeC(self, channel, name): ret = dict() inp = self.dm[channel] if inp.dtype==numpy.float32: dtype = 'float' elif inp.dtype==numpy.float64: dtype = 'double' elif inp.dtype==numpy.int32: dtype = 'long' elif inp.dtype==numpy.int64: dtype = 'long long' elif inp.dtype==numpy.uint32: dtype = 'unsigned long' elif inp.dtype==numpy.uint64: dtype = 'unsigned long long' elif inp.dtype==numpy.int16: dtype = 'short' elif inp.dtype==numpy.uint16: dtype = 'unsigned short' elif inp.dtype==numpy.int8: dtype = 'char' elif inp.dtype==numpy.uint8: dtype = 'unsigned char' else: raise NotImplementedError ret['name'] = name ret['type'] = dtype ret['input'] = inp ret['itype'] = 'PyArrayObject *' ret['get'] = 'inline %s %s_get(PyArrayObject * input, int exemplar, int feature) {return *(%s *)(input->data + exemplar*input->strides[0] + feature*input->strides[1]);}' % (ret['type'], name, ret['type']) ret['exemplars'] = 'inline int %s_exemplars(PyArrayObject * input) {return input->dimensions[0];}'%name ret['features'] = 'inline int %s_features(PyArrayObject * input) {return input->dimensions[1];}'%name return ret def tupleInputC(self): return tuple(self.dm) def key(self): return 'MatrixES|' + reduce(lambda a,b: a+':'+b, map(lambda d: str(d.dtype), self.dm)) MatrixFS = MatrixES # For backward compatability. class MatrixGrow(ExemplarSet): """A slightly more advanced version of the basic exemplar set that has better support for incrimental learning, as it allows appends to be more efficient. It still assumes that all of the data can be fitted in memory, and makes use of numpy arrays for internal storage.""" def __init__(self, *args): """Optionally allows you to provide a list of numpy data matrices to by the channels data matrices. Alternativly you can use the add method to add them, one after another, post construction, or use append to start things going. All data matrices must be 2D numpy arrays, with the first dimension, indexing the exemplar, being the same size in all cases. (If there is only 1 exemplar then it will accept 1D arrays.)""" # Internal storage is as a list, where each entry in the list is a set of exemplars. The exmplars are represented as a further list, indexed by channel, of 2D data matrices. if len(args)!=0: self.dmcList = [list(map(lambda a: a.reshape((1,-1)) if len(a.shape)==1 else a, args))] for dm in self.dmcList[0]: assert(len(dm.shape)==2) assert(dm.shape[0]==self.dm[0].shape[0]) else: self.dmcList = [] def add(self, dm): """Adds a new data matrix of information as another channel. Returns its channel index. If given a 1D matrix assumes that there is only one exemplar and adjusts it accordingly.""" self.make_compact() if len(dm.shape)==1: dm = dm.reshape((1,-1)) assert(len(dm.shape)==2) if len(dmcList)==0: dmcList.append([]) self.dmcList[0].append(dm) assert(dm.shape[0]==self.dmcList[0][0].shape[0]) return len(self.dmcList[0])-1 def append(self, *args): """Allows you to add exemplars to the structure, by providing a set of data matrices that align with those contained, which contain the new exemplars. If adding a single new exemplar the arrays can be 1D.""" args = map(lambda dm: dm if len(dm.shape)!=1 else dm.reshape((1,-1)), args) for dm in args: assert(len(dm.shape)==2) assert(dm.shape[0]==args[0].shape[0]) if len(self.dmcList)!=0: assert(len(args)==len(self.dmcList[0])) for i, dm in enumerate(args): assert(dm.dtype==self.dmcList[0][i].dtype) assert(dm.shape[1]==self.dmcList[0][i].shape[1]) self.dmcList.append(args) def exemplars(self): return sum(map(lambda dmc: dmc[0].shape[0], self.dmcList)) def channels(self): return len(self.dmcList[0]) if len(self.dmcList)!=0 else 0 def dtype(self, channel): return self.dmcList[0][channel].dtype def features(self, channel): return self.dmcList[0][channel].shape[1] def make_compact(self): """Internal method really - converts the data structure so that len(dmcList)==1, by concatenating arrays as needed.""" if len(self.dmcList)>1: rep = [] for i in xrange(len(self.dmcList[0])): dml = map(lambda dmc: dmc[i], self.dmcList) dm = numpy.concatenate(dml, axis=0) rep.append(dm) self.dmcList = [rep] def __getitem__(self, index): self.make_compact() a = numpy.asarray(index[1]).reshape(-1) b = numpy.asarray(index[2]).reshape(-1) if not isinstance(index[1],numpy.ndarray) or not isinstance(index[2],numpy.ndarray): return self.dmcList[0][index[0]][index[1],index[2]] else: return self.dmcList[0][index[0]][numpy.ix_(a,b)] def codeC(self, channel, name): self.make_compact() ret = dict() inp = self.dmcList[0][channel] if inp.dtype==numpy.float32: dtype = 'float' elif inp.dtype==numpy.float64: dtype = 'double' elif inp.dtype==numpy.int32: dtype = 'long' elif inp.dtype==numpy.int64: dtype = 'long long' elif inp.dtype==numpy.uint32: dtype = 'unsigned long' elif inp.dtype==numpy.uint64: dtype = 'unsigned long long' elif inp.dtype==numpy.int16: dtype = 'short' elif inp.dtype==numpy.uint16: dtype = 'unsigned short' elif inp.dtype==numpy.int8: dtype = 'char' elif inp.dtype==numpy.uint8: dtype = 'unsigned char' else: raise NotImplementedError ret['name'] = name ret['type'] = dtype ret['input'] = inp ret['itype'] = 'PyArrayObject *' ret['get'] = 'inline %s %s_get(PyArrayObject * input, int exemplar, int feature) {return *(%s *)(input->data + exemplar*input->strides[0] + feature*input->strides[1]);}' % (ret['type'], name, ret['type']) ret['exemplars'] = 'inline int %s_exemplars(PyArrayObject * input) {return input->dimensions[0];}'%name ret['features'] = 'inline int %s_features(PyArrayObject * input) {return input->dimensions[1];}'%name return ret def tupleInputC(self): self.make_compact() return tuple(self.dmcList[0]) def key(self): self.make_compact() return 'MatrixGrow|' + reduce(lambda a,b: a+':'+b, map(lambda d: str(d.dtype), self.dmcList[0]))
Python
# Copyright 2012 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import random import numpy import numpy.random from generators import Generator from tests import * from utils.start_cpp import start_cpp class AxisClassifyGen(Generator, AxisSplit): """Provides a generator that creates axis-aligned split planes that have their position selected to maximise the information gain with respect to the task of classification.""" def __init__(self, channel, catChannel, count): """channel is which channel to select the values from; catChannel contains the true classes of the features so the split can be optimised; and count is how many tests it will return, where each has been constructed around a randomly selected feature from the channel.""" AxisSplit.__init__(self, channel) self.catChannel = catChannel self.count = count def clone(self): return AxisClassifyGen(self.channel, self.catChannel, self.count) def itertests(self, es, index, weights = None): def entropy(histo): histo = histo[histo>1e-6] return -(histo*(numpy.log(histo) - numpy.log(histo.sum()))).sum() for _ in xrange(self.count): ind = numpy.random.randint(es.features(self.channel)) values = es[self.channel, index, ind] cats = es[self.catChannel, index, 0] if cats.shape[0]<2: split = 0.0 else: indices = numpy.argsort(values) values = values[indices] cats = cats[indices] high = numpy.bincount(cats, weights=weights[index] if weights!=None else None) low = numpy.zeros(high.shape[0], dtype=numpy.float32) improvement = -1e100 for i in xrange(values.shape[0]-1): # Move the selected item from high to low... w = weights[index[indices[i]]] if weights!=None else 1 high[cats[i]] -= w low[cats[i]] += w # Calculate the improvement (Within a scalar factor constant for the entire field - only care about the relative value.)... imp = -(entropy(low) + entropy(high)) # Keep if best... if imp>improvement: split = 0.5*(values[i] + values[i+1]) improvement = imp yield numpy.asarray([ind], dtype=numpy.int32).tostring() + numpy.asarray([split], dtype=numpy.float32).tostring() def genCodeC(self, name, exemplar_list): code = start_cpp() + """ struct Store%(name)s { int cat; float value; float weight; }; struct State%(name)s { void * test; // Will be the length of a 32 bit int followed by a float. size_t length; int countRemain; Store%(name)s * temp; // Temporary used for storing the sorted values. int catCount; float * low; float * high; }; int %(name)s_store_comp(const void * lhs, const void * rhs) { const Store%(name)s & l = (*(Store%(name)s*)lhs); const Store%(name)s & r = (*(Store%(name)s*)rhs); if (l.value<r.value) return -1; if (l.value>r.value) return 1; return 0; } void %(name)s_init(State%(name)s & state, PyObject * data, Exemplar * test_set) { assert(sizeof(int)==4); int count = 0; state.catCount = 0; %(catChannelType)s ccd = (%(catChannelType)s)PyTuple_GetItem(data, %(catChannel)i); while (test_set) { count++; int cat = %(catChannelName)s_get(ccd, test_set->index, 0); if (cat>=state.catCount) state.catCount = cat+1; test_set = test_set->next; } state.length = sizeof(int) + sizeof(float); state.test = malloc(state.length); state.countRemain = %(count)i; state.temp = (Store%(name)s*)malloc(sizeof(Store%(name)s) * count); state.low = (float*)malloc(state.catCount*sizeof(float)); state.high = (float*)malloc(state.catCount*sizeof(float)); } bool %(name)s_next(State%(name)s & state, PyObject * data, Exemplar * test_set) { // Check if we are done... if (state.countRemain==0) { free(state.test); free(state.temp); free(state.low); free(state.high); return false; } state.countRemain--; // Select a random feature... %(channelType)s cd = (%(channelType)s)PyTuple_GetItem(data, %(channel)i); int feat = lrand48() %% %(channelName)s_features(cd); %(catChannelType)s ccd = (%(catChannelType)s)PyTuple_GetItem(data, %(catChannel)i); // Extract the values... int count = 0; while (test_set) { state.temp[count].cat = %(catChannelName)s_get(ccd, test_set->index, 0); state.temp[count].value = %(channelName)s_get(cd, test_set->index, feat); state.temp[count].weight = test_set->weight; count++; test_set = test_set->next; } // Sort them... qsort(state.temp, count, sizeof(Store%(name)s), %(name)s_store_comp); // Find the optimal split point... float bestSplit = 0.0; float bestImp = -1e100; for (int c=0; c<state.catCount; c++) { state.low[c] = 0.0; state.high[c] = 0.0; } for (int i=0; i<count; i++) { state.high[state.temp[i].cat] += state.temp[i].weight; } for (int i=0; i<(count-1); i++) { // Move the indexed element across... int c = state.temp[i].cat; float w = state.temp[i].weight; state.low[c] += w; state.high[c] -= w; // Calculate the improvement... float lowSum = 0.0; float highSum = 0.0; for (int c=0; c<state.catCount; c++) { lowSum += state.low[c]; highSum += state.high[c]; } float logLowSum = log(lowSum); float logHighSum = log(highSum); float imp = 0.0; for (int c=0; c<state.catCount; c++) { if (state.low[c]>1e-6) imp += state.low[c] * (log(state.low[c]) - logLowSum); if (state.high[c]>1e-6) imp += state.high[c] * (log(state.high[c]) - logHighSum); } // If its the best calculate and store the split point... if (imp>bestImp) { bestSplit = 0.5 * (state.temp[i].value + state.temp[i+1].value); bestImp = imp; } } // Store the test and return... ((int*)state.test)[0] = feat; ((float*)state.test)[1] = bestSplit; return true; } """%{'name':name, 'channel':self.channel, 'channelName':exemplar_list[self.channel]['name'], 'channelType':exemplar_list[self.channel]['itype'], 'catChannel':self.catChannel, 'catChannelName':exemplar_list[self.catChannel]['name'], 'catChannelType':exemplar_list[self.catChannel]['itype'], 'count':self.count} return (code, 'State'+name) class LinearClassifyGen(Generator, LinearSplit): """Provides a generator for split planes that projected the features perpendicular to a random plane direction but then optimises where to put the split plane to maximise classification performance. Randomly selects which dimensions to work with and the orientation of the split plane.""" def __init__(self, channel, catChannel, dims, dimCount, dirCount): """channel is which channel to select for and catChannel the channel to get the classification answers from. dims is how many features (dimensions) to test on for any given test. dimCount is how many sets of dimensions to randomly select to generate tests for, whilst dirCount is how many random dimensions (From a uniform distribution over a hyper-sphere.) to try. It actually generates the two independantly and trys every combination, as generating uniform random directions is somewhat expensive.""" LinearSplit.__init__(self, channel, dims) self.catChannel = catChannel self.dimCount = dimCount self.dirCount = dirCount def clone(self): return LinearClassifyGen(self.channel, self.catChannel, self.dims, self.dimCount, self.dirCount) def itertests(self, es, index, weights = None): def entropy(histo): histo = histo[histo>1e-6] return -(histo*(numpy.log(histo) - numpy.log(histo.sum()))).sum() # Generate random points on the hyper-sphere... dirs = numpy.random.normal(size=(self.dirCount, self.dims)) dirs /= numpy.sqrt(numpy.square(dirs).sum(axis=1)).reshape((-1,1)) # Iterate and select a set of dimensions before trying each direction on them... for _ in xrange(self.dimCount): #dims = numpy.random.choice(es.features(self.channel), size=self.dims, replace=False) For when numpy 1.7.0 is common dims = numpy.zeros(self.dims, dtype=numpy.int32) feats = es.features(self.channel) for i in xrange(self.dims): dims[i] = numpy.random.randint(feats-i) dims[i] += (dims[:i]<=dims[i]).sum() for di in dirs: dists = (es[self.channel, index, dims] * di.reshape((1,-1))).sum(axis=1) cats = es[self.catChannel, index, 0] split = 0.0 if cats.shape[0]>1: indices = numpy.argsort(dists) dists = dists[indices] cats = cats[indices] high = numpy.bincount(cats, weights=weights[index[indices]] if weights!=None else None) low = numpy.zeros(high.shape[0], dtype=numpy.float32) improvement = -1e100 for i in xrange(dists.shape[0]-1): # Move the selected item from high to low... w = weights[index[indices[i]]] if weights!=None else 1 high[cats[i]] -= w low[cats[i]] += w # Calculate the improvement (Within a scalar factor constant for the entire field - only care about the relative value.)... imp = -(entropy(low) + entropy(high)) # Keep if best... if imp>improvement: ratio = numpy.random.random() split = ratio*dists[i] + (1.0-ratio)*dists[i+1] improvement = imp yield numpy.asarray(dims, dtype=numpy.int32).tostring() + numpy.asarray(di, dtype=numpy.float32).tostring() + numpy.asarray([split], dtype=numpy.float32).tostring() def genCodeC(self, name, exemplar_list): code = start_cpp() + """ struct Store%(name)s { int cat; float value; float weight; }; struct State%(name)s { void * test; // Will be the length of a 32 bit int followed by a float. size_t length; float * dirs; // Vectors giving points uniformly distributed on the hyper-sphere. int * feat; // The features to index at this moment. int dimRemain; int dirRemain; Store%(name)s * temp; // Temporary used for storing the sorted values. int catCount; float * low; float * high; }; int %(name)s_store_comp(const void * lhs, const void * rhs) { const Store%(name)s & l = (*(Store%(name)s*)lhs); const Store%(name)s & r = (*(Store%(name)s*)rhs); if (l.value<r.value) return -1; if (l.value>r.value) return 1; return 0; } void %(name)s_init(State%(name)s & state, PyObject * data, Exemplar * test_set) { assert(sizeof(int)==4); // Count how many exemplars are in the input, and how many classes are represented... int count = 0; state.catCount = 0; %(catChannelType)s ccd = (%(catChannelType)s)PyTuple_GetItem(data, %(catChannel)i); while (test_set) { count++; int cat = %(catChannelName)s_get(ccd, test_set->index, 0); if (cat>=state.catCount) state.catCount = cat+1; test_set = test_set->next; } // Setup the output... state.length = sizeof(int) * %(dims)i + sizeof(float) * (%(dims)i+1); state.test = malloc(state.length); // Counters so we know when we are done... state.dimRemain = %(dimCount)i; state.dirRemain = 0; // Generate a bunch of random directions... state.dirs = (float*)malloc(sizeof(float)*%(dims)i*%(dirCount)i); for (int d=0;d<%(dirCount)i;d++) { float length = 0.0; int base = %(dims)i * d; for (int f=0; f<%(dims)i; f++) { double u = 1.0-drand48(); double v = 1.0-drand48(); float bg = sqrt(-2.0*log(u)) * cos(2.0*M_PI*v); length += bg*bg; state.dirs[base+f] = bg; } length = sqrt(length); for (int f=0; f<%(dims)i; f++) { state.dirs[base+f] /= length; } } // Which features are currently being used... state.feat = (int*)malloc(sizeof(int)*%(dims)i); // Temporary for sorting the exemplars by value... state.temp = (Store%(name)s*)malloc(sizeof(Store%(name)s) * count); // Class count arrays for optimal split selection... state.low = (float*)malloc(state.catCount*sizeof(float)); state.high = (float*)malloc(state.catCount*sizeof(float)); // Safety... %(channelType)s cd = (%(channelType)s)PyTuple_GetItem(data, %(channel)i); int featCount = %(channelName)s_features(cd); if (%(dims)i>featCount) { state.dimRemain = 0; // Effectivly cancels work. } } bool %(name)s_next(State%(name)s & state, PyObject * data, Exemplar * test_set) { // Need access to the data... %(channelType)s cd = (%(channelType)s)PyTuple_GetItem(data, %(channel)i); %(catChannelType)s ccd = (%(catChannelType)s)PyTuple_GetItem(data, %(catChannel)i); // If we are done for this set of features select a new set... if (state.dirRemain==0) { if (state.dimRemain==0) { free(state.test); free(state.dirs); free(state.feat); free(state.temp); return false; } state.dimRemain--; // Select a new set of features... int featCount = %(channelName)s_features(cd); for (int f=0; f<%(dims)i; f++) { state.feat[f] = lrand48() %% (featCount-f); for (int j=0; j<f; j++) { if (state.feat[j]<=state.feat[f]) state.feat[f]++; } } // Reset the counter... state.dirRemain = %(dirCount)i; } state.dirRemain--; // Extract the values, projecting them using the current direction... int count = 0; while (test_set) { float val = 0.0; int base = %(dims)i * state.dirRemain; for (int f=0; f<%(dims)i; f++) { val += state.dirs[base+f] * %(channelName)s_get(cd, test_set->index, state.feat[f]); } state.temp[count].cat = %(catChannelName)s_get(ccd, test_set->index, 0); state.temp[count].value = val; state.temp[count].weight = test_set->weight; count++; test_set = test_set->next; } // Sort them... qsort(state.temp, count, sizeof(Store%(name)s), %(name)s_store_comp); // Find the optimal split point... float bestSplit = 0.0; float bestImp = -1e100; for (int c=0; c<state.catCount; c++) { state.low[c] = 0.0; state.high[c] = 0.0; } for (int i=0; i<count; i++) { state.high[state.temp[i].cat] += state.temp[i].weight; } for (int i=0; i<(count-1); i++) { // Move the indexed element across... int c = state.temp[i].cat; float w = state.temp[i].weight; state.low[c] += w; state.high[c] -= w; // Calculate the improvement... float lowSum = 0.0; float highSum = 0.0; for (int c=0; c<state.catCount; c++) { lowSum += state.low[c]; highSum += state.high[c]; } float logLowSum = log(lowSum); float logHighSum = log(highSum); float imp = 0.0; for (int c=0; c<state.catCount; c++) { if (state.low[c]>1e-6) imp += state.low[c] * (log(state.low[c]) - logLowSum); if (state.high[c]>1e-6) imp += state.high[c] * (log(state.high[c]) - logHighSum); } // If its the best calculate and store the split point... if (imp>bestImp) { bestSplit = 0.5 * (state.temp[i].value + state.temp[i+1].value); bestImp = imp; } } // Store it all in the output... for (int i=0; i<%(dims)i;i++) { ((int*)state.test)[i] = state.feat[i]; } int base = %(dims)i * state.dirRemain; for (int i=0; i<%(dims)i;i++) { ((float*)state.test)[%(dims)i+i] = state.dirs[base+i]; } ((float*)state.test)[2*%(dims)i] = bestSplit; return true; } """%{'name':name, 'channel':self.channel, 'channelName':exemplar_list[self.channel]['name'], 'channelType':exemplar_list[self.channel]['itype'], 'catChannel':self.catChannel, 'catChannelName':exemplar_list[self.catChannel]['name'], 'catChannelType':exemplar_list[self.catChannel]['itype'], 'dims':self.dims, 'dimCount':self.dimCount, 'dirCount':self.dirCount} return (code, 'State'+name) class DiscreteClassifyGen(Generator, DiscreteBucket): """Defines a generator for discrete data. It basically takes a single discrete feature and then greedily optimises to get the best classification performance, As it won't necesarilly converge to the global optimum multiple restarts are provided. The discrete values must form a contiguous set, starting at 0 and going upwards. When splitting it only uses values it can see - unseen values will fail the test, though it always arranges for the most informative half to be the one that passes the test.""" def __init__(self, channel, catChannel, featCount, initCount): """channel is the channel to build discrete tests for; featCount is how many random features to randomly select and initCount how many random initialisations to try for each feature.""" DiscreteBucket.__init__(self, channel) self.catChannel = catChannel self.featCount = featCount self.initCount = initCount def clone(self): return DiscreteClassifyGen(self.channel, self.catChannel, self.featCount, self.initCount) def itertests(self, es, index, weights = None): # Helper function used below... def entropy(histo): histo = histo[histo>1e-6] return -(histo*(numpy.log(histo) - numpy.log(histo.sum()))).sum() # Iterate and yield the right number of tests... for _ in xrange(self.featCount): # Randomly select a feature, get the values and categories... feat = numpy.random.randint(es.features(self.channel)) values = es[self.channel, index, feat] cats = es[self.catChannel, index, 0] # Create histograms of category counts for each value... histos = dict() maxHistoSize = 0 for value in numpy.unique(values): use = values==value histos[value] = numpy.bincount(cats[use], weights = weights[index[use]] if weights!=None else None) maxHistoSize = max(maxHistoSize, histos[value].shape[0]) if len(histos)<2: # Can't optimise - give up. yield numpy.asarray(feat, dtype=numpy.int32).tostring() continue # Optimise from multiple starting points... for _ in xrange(self.initCount): # Generate a random greedy order... order = numpy.random.permutation(histos.keys()) # Initialise by putting the first two entrys in different halfs... low = numpy.zeros(maxHistoSize, dtype=numpy.float32) high = numpy.zeros(maxHistoSize, dtype=numpy.float32) low[:histos[order[0]].shape[0]] += histos[order[0]] lowEnt = entropy(low) keepLow = [order[0]] high[:histos[order[1]].shape[0]] += histos[order[1]] highEnt = entropy(high) keepHigh = [order[1]] # Loop the rest and put each of them in the best half... for i in xrange(2, order.shape[0]): # Get the histogram... histo = histos[order[i]] # Calculate the options... lowOp = low.copy() lowOp[:histo.shape[0]] += histo lowOpEnt = entropy(lowOp) highOp = high.copy() highOp[:histo.shape[0]] += histo highOpEnt = entropy(highOp) # Choose the best... if (lowEnt+highOpEnt)<(lowOpEnt+highEnt): high = highOp highEnt = highOpEnt keepHigh.append(order[i]) else: low = lowOp lowEnt = lowOpEnt keepLow.append(order[i]) # Swap the halfs so the half that passes has the lowest entropy - this is because unseen values will fail, so might as well send them to the least certain side... if lowEnt<highEnt: keepHigh = keepLow # Yield a discrete decision object... yield numpy.asarray(feat, dtype=numpy.int32).tostring() + numpy.asarray(keepHigh, dtype=numpy.int32).tostring() try: from svm import svm class SVMClassifyGen(Generator, Test): """Allows you to use the SVM library as a classifier for a node. Note that it detects if the SVM library is avaliable - if not then this class will not exist. Be warned that its quite memory intensive, as it just wraps the SVM objects without any clever packing. Works by randomly selecting a class to seperate and training a one vs all classifier, with random parameters on random features. Parameters are quite complicated, due to all the svm options and randomness control being extensive.""" def __init__(self, params, paramDraw, catChannel, catDraw, featChannel, featCount, featDraw): """There are three parts - the svm parameters to use, the class to seperate and the features to train on, all of which allow for the introduction of randomness. The svm parameters are controlled by params - it must be either a single svm.Params or a list of them, which includes things like parameter sets provided by the svm library. For each test generation paramDraw parameter options are selected randomly from params and tried combinatorically with the other two parts. The class of each feature must be provided, as an integer in channel catChannel. For each test generation it selects one class randomly from the classes exhibited by the features, which it does catDraw times, combinatorically with the other two parts. The features to train on are found in channel featChannel, and it randomly selects featCount of them to be used for each trainning run, which it does featDraw times combinatorically with the other two parameters. Each time classifiers are generated it will produce the product of the three *Draw parameters generators, where it draws each set once and then tries all combinations between the three.""" # svm parameters... if isinstance(params, svm.Params): self.params = [params] else: self.params = [x for x in params] self.paramDraw = paramDraw # class parameters... self.catChannel = catChannel self.catDraw = catDraw # feature parameters... self.featChannel = featChannel self.featCount = featCount self.featDraw = featDraw def clone(self): return SVMClassifyGen(self.params, self.paramDraw, self.catChannel, self.catDraw, self.featChannel, self.featCount, self.featDraw) def do(self, test, es, index = slice(None)): # Test is (feat index, svm.Model) - feat index grabs the features to run the model on, which tells you which side they belong on... dataMatrix = numpy.asarray(es[self.featChannel, index, test[0]], dtype=numpy.float_) if len(dataMatrix.shape)!=2: dataMatrix = dataMatrix.reshape((1,-1)) values = test[1].multiClassify(dataMatrix) return values>0 def itertests(self, es, index, weights = None): # Generate the set of svm parameters to use... param_set = random.sample(self.params, self.paramDraw) # Generate the set of classes to train for... cats = es[self.catChannel, index, 0] if numpy.unique(cats).shape[0]<2: return cat_set = random.sample(cats, min(self.catDraw, cats.shape[0])) y = numpy.empty(cats.shape[0], dtype=numpy.float_) # Iterate and yield each decision boundary by learning a model - base iteration is over the features to use for trainning... smo = svm.smo.SMO() for _ in xrange(self.featDraw): # Draw the feature set to use... feat_index = random.sample(xrange(es.features(self.featChannel)), self.featCount) feat_index = numpy.array(feat_index, dtype=numpy.int32) dataMatrix = numpy.asarray(es[self.featChannel, index, feat_index], dtype=numpy.float_) # Try it combinatorically with the other two... for cat in cat_set: y[:] = -1.0 y[cats==cat] = 1.0 smo.setData(dataMatrix, y) for param in param_set: smo.setParams(param) smo.solve() yield (feat_index, smo.getModel()) except ImportError: pass # Allow it to still work when the svm module is not avaliable.
Python
# Copyright 2012 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import numpy import numpy.random from generators import Generator from tests import * from utils.start_cpp import start_cpp class AxisRandomGen(Generator, AxisSplit): """Provides a generator for axis-aligned split planes that split the data set at random - uses a normal distribution constructed from the data. Has random selection of the dimension to split the axis on.""" def __init__(self, channel, dimCount, splitCount, ignoreWeights=False): """channel is which channel to select the values from; dimCount is how many dimensions to try splits on; splitCount how many random split points to try for each selected dimension. Setting ignore weights to True means it will not consider the weights when calculating the normal distribution to draw random split points from.""" AxisSplit.__init__(self, channel) self.dimCount = dimCount self.splitCount = splitCount self.ignoreWeights = ignoreWeights def clone(self): return AxisRandomGen(self.channel, self.dimCount, self.splitCount, self.ignoreWeights) def itertests(self, es, index, weights = None): for _ in xrange(self.dimCount): ind = numpy.random.randint(es.features(self.channel)) values = es[self.channel, index, ind] if weights==None or self.ignoreWeights: mean = numpy.mean(values) std = max(numpy.std(values), 1e-6) else: w = weights[index] mean = numpy.average(values, weights=w) std = max(numpy.average(numpy.fabs(values-mean), weights=w), 1e-6) for _ in xrange(self.splitCount): split = numpy.random.normal(mean, std) yield numpy.asarray([ind], dtype=numpy.int32).tostring() + numpy.asarray([split], dtype=numpy.float32).tostring() def genCodeC(self, name, exemplar_list): code = start_cpp() + """ struct State%(name)s { void * test; // Will be the length of a 32 bit int followed by a float. size_t length; int dimRemain; int splitRemain; int feat; float mean; float sd; }; void %(name)s_init(State%(name)s & state, PyObject * data, Exemplar * test_set) { assert(sizeof(int)==4); state.length = sizeof(int) + sizeof(float); state.test = malloc(state.length); state.dimRemain = %(dimCount)i; state.splitRemain = 0; } bool %(name)s_next(State%(name)s & state, PyObject * data, Exemplar * test_set) { // If we have used up all splits for this feature select a new one... if (state.splitRemain==0) { // If we have run out of features to select we are done - return as such... if (state.dimRemain==0) { free(state.test); return false; } state.dimRemain--; // Get the relevent channels object... %(channelType)s cd = (%(channelType)s)PyTuple_GetItem(data, %(channel)i); // Select a new feature... state.feat = lrand48() %% %(channelName)s_features(cd); // Calculate the mean and standard deviation of the data set, for the selected feature... float sum = 0.0; float mean = 0.0; float mean2 = 0.0; while (test_set) { float x = %(channelName)s_get(cd, test_set->index, state.feat); if (%(ignoreWeights)s) { sum += 1.0; float delta = x - mean; mean += delta/sum; mean2 += delta * (x - mean); } else { float newSum = sum + test_set->weight; float delta = x - mean; float mean_delta = delta * test_set->weight / newSum; mean += mean_delta; mean2 += sum * delta * mean_delta; sum = newSum; } test_set = test_set->next; } state.mean = mean; state.sd = sqrt(mean2/sum); if (state.sd<1e-6) state.sd = 1e-6; state.splitRemain = %(splitCount)i; } // Output a split point drawn from the Gaussian... state.splitRemain--; double u = 1.0-drand48(); double v = 1.0-drand48(); float bg = sqrt(-2.0*log(u)) * cos(2.0*M_PI*v); float split = state.mean + state.sd * bg; ((int*)state.test)[0] = state.feat; ((float*)state.test)[1] = split; return true; } """%{'name':name, 'channel':self.channel, 'channelName':exemplar_list[self.channel]['name'], 'channelType':exemplar_list[self.channel]['itype'], 'dimCount':self.dimCount, 'splitCount':self.splitCount, 'ignoreWeights':('true' if self.ignoreWeights else 'false')} return (code, 'State'+name) class LinearRandomGen(Generator, LinearSplit): """Provides a generator for split planes that it is entirly random. Randomly selects which dimensions to work with, the orientation of the split plane and then where to put the split plane, with this last bit done with a normal distribution.""" def __init__(self, channel, dims, dimCount, dirCount, splitCount, ignoreWeights = False): """channel is which channel to select for and dims how many features (dimensions) to test on for any given test. dimCount is how many sets of dimensions to randomly select to generate tests from, whilst dirCount is how many random dimensions (From a uniform distribution over a hyper-sphere.) to use for selection. It actually generates the two independantly and trys every combination, as generating uniform random directions is somewhat expensive. For each of these splitCount split points are then tried, as drawn from a normal distribution. Setting ignore weights to True means it will not consider the weights when calculating the normal distribution to draw random split points from.""" LinearSplit.__init__(self, channel, dims) self.dimCount = dimCount self.dirCount = dirCount self.splitCount = splitCount self.ignoreWeights = ignoreWeights def clone(self): return LinearRandomGen(self.channel, self.dims, self.dimCount, self.dirCount, self.splitCount, self.ignoreWeights) def itertests(self, es, index, weights = None): # Generate random points on the hyper-sphere... dirs = numpy.random.normal(size=(self.dirCount, self.dims)) dirs /= numpy.sqrt(numpy.square(dirs).sum(axis=1)).reshape((-1,1)) # Iterate and select a set of dimensions before trying each direction on them... for _ in xrange(self.dimCount): #dims = numpy.random.choice(es.features(self.channel), size=self.dims, replace=False) For when numpy 1.7.0 is common dims = numpy.zeros(self.dims, dtype=numpy.int32) feats = es.features(self.channel) for i in xrange(self.dims): # This loop is not quite right - could result in the same feature twice. Odds are low enough that its not really worth caring about however. dims[i] = numpy.random.randint(feats-i) dims[i] += (dims[:i]<=dims[i]).sum() for di in dirs: dists = (es[self.channel, index, dims] * di.reshape((1,-1))).sum(axis=1) if weights==None or self.ignoreWeights: mean = numpy.mean(dists) std = max(numpy.std(dists), 1e-6) else: w = weights[index] mean = numpy.average(dists, weights=w) std = max(numpy.average(numpy.fabs(dists-mean), weights=w), 1e-6) for _ in xrange(self.splitCount): split = numpy.random.normal(mean, std) yield numpy.asarray(dims, dtype=numpy.int32).tostring() + numpy.asarray(di, dtype=numpy.float32).tostring() + numpy.asarray([split], dtype=numpy.float32).tostring() def genCodeC(self, name, exemplar_list): code = start_cpp() + """ struct State%(name)s { void * test; size_t length; float * dirs; // Vectors giving points uniformly distributed on the hyper-sphere. int * feat; // The features to index at this moment. float mean; float sd; // Control counters - all count down... int featRemain; int dirRemain; int splitRemain; }; void %(name)s_init(State%(name)s & state, PyObject * data, Exemplar * test_set) { assert(sizeof(int)==4); // Output state... state.length = sizeof(int) * %(dims)i + sizeof(float) * (%(dims)i+1); state.test = malloc(state.length); // Generate a bunch of random directions... state.dirs = (float*)malloc(sizeof(float)*%(dims)i*%(dirCount)i); for (int d=0;d<%(dirCount)i;d++) { float length = 0.0; int base = %(dims)i * d; for (int f=0; f<%(dims)i; f++) { double u = 1.0-drand48(); double v = 1.0-drand48(); float bg = sqrt(-2.0*log(u)) * cos(2.0*M_PI*v); length += bg*bg; state.dirs[base+f] = bg; } length = sqrt(length); for (int f=0; f<%(dims)i; f++) { state.dirs[base+f] /= length; } } // Which features are currently being used... state.feat = (int*)malloc(sizeof(int)*%(dims)i); // Setup the counters so we do the required work when next is called... state.featRemain = %(dimCount)i; state.dirRemain = 0; state.splitRemain = 0; // Safety... %(channelType)s cd = (%(channelType)s)PyTuple_GetItem(data, %(channel)i); int featCount = %(channelName)s_features(cd); if (%(dims)i>featCount) { state.featRemain = 0; // Effectivly cancels work. } } bool %(name)s_next(State%(name)s & state, PyObject * data, Exemplar * test_set) { if (state.splitRemain==0) { %(channelType)s cd = (%(channelType)s)PyTuple_GetItem(data, %(channel)i); if (state.dirRemain==0) { if (state.featRemain==0) { free(state.feat); free(state.dirs); free(state.test); return false; } state.featRemain--; // Select a new set of features... int featCount = %(channelName)s_features(cd); for (int f=0; f<%(dims)i; f++) { state.feat[f] = lrand48() %% (featCount-f); for (int j=0; j<f; j++) { if (state.feat[j]<=state.feat[f]) state.feat[f]++; } } // Reset the counter... state.dirRemain = %(dirCount)i; } state.dirRemain--; // For the new direction calculate the mean and standard deviation with the current features... float sum = 0.0; float mean = 0.0; float mean2 = 0.0; while (test_set) { float x = 0.0; int base = %(dims)i * state.dirRemain; for (int f=0; f<%(dims)i; f++) { x += state.dirs[base+f] * %(channelName)s_get(cd, test_set->index, state.feat[f]); } if (%(ignoreWeights)s) { sum += 1.0; float delta = x - mean; mean += delta/sum; mean2 += delta * (x - mean); } else { float newSum = sum + test_set->weight; float delta = x - mean; float mean_delta = delta * test_set->weight / newSum; mean += mean_delta; mean2 += sum * delta * mean_delta; sum = newSum; } test_set = test_set->next; } state.mean = mean; state.sd = sqrt(mean2/sum); if (state.sd<1e-6) state.sd = 1e-6; // Reset the counter... state.splitRemain = %(splitCount)i; } state.splitRemain--; // Use the mean and standard deviation to select a split point... double u = 1.0-drand48(); double v = 1.0-drand48(); float bg = sqrt(-2.0*log(u)) * cos(2.0*M_PI*v); float split = state.mean + state.sd * bg; // Store it all in the output... for (int i=0; i<%(dims)i;i++) { ((int*)state.test)[i] = state.feat[i]; } int base = %(dims)i * state.dirRemain; for (int i=0; i<%(dims)i;i++) { ((float*)state.test)[%(dims)i+i] = state.dirs[base+i]; } ((float*)state.test)[2*%(dims)i] = split; return true; } """%{'name':name, 'channel':self.channel, 'channelName':exemplar_list[self.channel]['name'], 'channelType':exemplar_list[self.channel]['itype'], 'dims':self.dims, 'dimCount':self.dimCount, 'dirCount':self.dirCount, 'splitCount':self.splitCount, 'ignoreWeights':('true' if self.ignoreWeights else 'false')} return (code, 'State'+name) class DiscreteRandomGen(Generator, DiscreteBucket): """Defines a generator for discrete data. It basically takes a single discrete feature and randomly assigns just one value to pass and all others to fail the test. The selection is from the values provided by the data passed in, weighted by how many of them there are.""" def __init__(self, channel, featCount, valueCount): """channel is the channel to build discrete tests for. featCount is how many different features to select to generate tests for whilst valueCount is how many values to draw and offer as tests for each feature selected.""" DiscreteBucket.__init__(self, channel) self.featCount = featCount self.valueCount = valueCount def clone(self): return DiscreteRandomGen(self.channel, self.featCount, self.valueCount) def itertests(self, es, index, weights = None): # Iterate and yield the right number of tests... for _ in xrange(self.featCount): # Randomly select a feature... feat = numpy.random.randint(es.features(self.channel)) values = es[self.channel, index, feat] histo = numpy.bincount(values, weights=weights[index] if weights!=None else None) histo /= histo.sum() # Draw and iterate the values - do a fun trick to avoid duplicate yields,,, values = numpy.random.multinomial(self.valueCount, histo) for value in numpy.where(values!=0): # Yield a discrete decision object... yield numpy.asarray(feat, dtype=numpy.int32).tostring() + numpy.asarray(value, dtype=numpy.int32).tostring() def genCodeC(self, name, exemplar_list): code = start_cpp() + """ struct State%(name)s { void * test; // Will be the length of two 32 bit ints. size_t length; int feat; // Current feature. int featRemain; // How many more times we need to select a feature to play with. int * value; // List of values drawn from the feature - we return each in turn. int valueLength; // Reduced as each feature is drawn. float * weight; // Aligns with value; temporary used for the sampling. }; int %(name)s_int_comp(const void * lhs, const void * rhs) { return (*(int*)lhs) - (*(int*)rhs); } void %(name)s_init(State%(name)s & state, PyObject * data, Exemplar * test_set) { assert(sizeof(int)==4); state.test = malloc(2*sizeof(int)); state.length = 2*sizeof(int); state.feat = -1; state.featRemain = %(featCount)i; state.value = (int*)malloc(%(valueCount)i*sizeof(int)); state.valueLength = 0; state.weight = (float*)malloc(%(valueCount)i*sizeof(float)); } bool %(name)s_next(State%(name)s & state, PyObject * data, Exemplar * test_set) { // Check if we need to create a new set of values... if (state.valueLength==0) { // Check if we are done - if so clean up and return... if (state.featRemain==0) { free(state.weight); free(state.value); free(state.test); return false; } state.featRemain--; // Get the relevent channels object... %(channelType)s cd = (%(channelType)s)PyTuple_GetItem(data, %(channel)i); // Select a new feature... state.feat = lrand48() %% %(channelName)s_features(cd); // Generate a set of values - use a method based on a single pass through the linked list... float minWeight = 1.0; while (test_set) { if (test_set->weight>1e-6) { float w = pow(drand48(), 1.0/test_set->weight); if (state.valueLength<%(valueCount)i) { state.value[state.valueLength] = test_set->index; state.weight[state.valueLength] = w; if (minWeight>w) minWeight = w; state.valueLength++; } else { if (w>minWeight) // Below is not very efficient, but don't really care - valueCount tends to be low enough that optimisation is not worthwhile.. { int lowest = 0; for (int i=1; i<%(valueCount)i; i++) { if (state.weight[lowest]>state.weight[i]) lowest = i; } state.value[lowest] = test_set->index; state.weight[lowest] = w; minWeight = 1.0; for (int i=0; i<%(valueCount)i; i++) { if (minWeight>state.weight[i]) minWeight = state.weight[i]; } } } } test_set = test_set->next; } // Convert exemplar numbers to actual values... for (int i=0; i<state.valueLength; i++) { state.value[i] = %(channelName)s_get(cd, state.value[i], state.feat); } // Remove duplicates... qsort(state.value, state.valueLength, sizeof(int), %(name)s_int_comp); int out = 1; for (int i=1; i<state.valueLength; i++) { if (state.value[i]!=state.value[i-1]) { state.value[out] = state.value[i]; out++; } } state.valueLength = out; } // Get and arrange as the output the next value... state.valueLength -= 1; ((int*)state.test)[0] = state.feat; ((int*)state.test)[1] = state.value[state.valueLength]; return true; } """%{'name':name, 'channel':self.channel, 'channelName':exemplar_list[self.channel]['name'], 'channelType':exemplar_list[self.channel]['itype'], 'featCount':self.featCount, 'valueCount':self.valueCount} return (code, 'State'+name)
Python
# -*- coding: utf-8 -*- # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import math import numpy import numpy.linalg import numpy.random from wishart import Wishart from gaussian import Gaussian from student_t import StudentT class GaussianPrior: """The conjugate prior for the multivariate Gaussian distribution. Maintains the 4 values and supports various operations of interest - initialisation of prior, Bayesian update, drawing a Gaussian and calculating the probability of a data point comming from a Gaussian drawn from the distribution. Not a particularly efficient implimentation, and it has no numerical protection against extremelly large data sets. Interface is not entirly orthogonal, due to the demands of real world usage.""" def __init__(self, dims): """Initialises with everything zeroed out, such that a prior must added before anything interesting is done. Supports cloning.""" if isinstance(dims, GaussianPrior): self.invShape = dims.invShape.copy() self.shape = dims.shape.copy() if dims.shape!=None else None self.mu = dims.mu.copy() self.n = dims.n self.k = dims.k else: self.invShape = numpy.zeros((dims,dims), dtype=numpy.float32) # The inverse of lambda in the equations. self.shape = None # Cached value - inverse is considered primary. self.mu = numpy.zeros(dims, dtype=numpy.float32) self.n = 0.0 self.k = 0.0 def reset(self): """Resets as though there is no data, other than the dimensions of course.""" self.invShape[:] = 0.0 self.shape = None self.mu[:] = 0.0 self.n = 0.0 self.k = 0.0 def addPrior(self, mean, covariance, weight = None): """Adds a prior to the structure, as an estimate of the mean and covariance matrix, with a weight which can be interpreted as how many samples that estimate is worth. Note the use of 'add' - you can call this after adding actual samples, or repeatedly. If weight is omitted it defaults to the number of dimensions, as the total weight in the system must match or excede this value before draws etc can be done.""" if weight==None: weight = float(self.mu.shape[0]) delta = mean - self.mu self.invShape += weight * covariance # *weight converts to a scatter matrix. self.invShape += ((self.k*weight)/(self.k+weight)) * numpy.outer(delta,delta) self.shape = None self.mu += (weight/(self.k+weight)) * delta self.n += weight self.k += weight def addSample(self, sample, weight=1.0): """Updates the prior given a single sample drawn from the Gaussian being estimated. Can have a weight provided, in which case it will be equivalent to repetition of that data point, where the repetition count can be fractional.""" sample = numpy.asarray(sample, dtype=numpy.float32) if len(sample.shape)==0: sample.shape = (1,) delta = sample - self.mu self.invShape += (weight*self.k/(self.k+weight)) * numpy.outer(delta,delta) self.shape = None self.mu += delta * (weight / (self.k+weight)) self.n += weight self.k += weight def remSample(self, sample): """Does the inverse of addSample, to in effect remove a previously added sample. Note that the issues of floating point (in-)accuracy mean its not perfect, and removing all samples is bad if there is no prior. Does not support weighting - effectvily removes a sample of weight 1.""" sample = numpy.asarray(sample, dtype=numpy.float32) if len(sample.shape)==0: sample.shape = (1,) delta = sample - self.mu self.k -= 1.0 self.n -= 1.0 self.mu -= delta / self.k self.invShape -= ((self.k+1.0)/self.k) * numpy.outer(delta,delta) self.shape = None def addSamples(self, samples, weight = None): """Updates the prior given multiple samples drawn from the Gaussian being estimated. Expects a data matrix ([sample, position in sample]), or an object that numpy.asarray will interpret as such. Note that if you have only a few samples it might be faster to repeatedly call addSample, as this is designed to be efficient for hundreds+ of samples. You can optionally weight the samples, by providing an array to the weight parameter.""" samples = numpy.asarray(samples, dtype=numpy.float32) # Calculate the mean and scatter matrices... if weight==None: # Unweighted samples... # Calculate the mean and scatter matrix... d = self.mu.shape[0] num = samples.shape[0] mean = numpy.average(samples, axis=0) scatter = numpy.tensordot(delta, delta, ([0],[0])) else: # Weighted samples... # Calculate the mean and scatter matrix... d = self.mu.shape[0] num = weight.sum() mean = numpy.average(samples, axis=0, weights=weight) delta = samples - mean.reshape((1,-1)) scatter = numpy.tensordot(weight.reshape((-1,1))*delta, delta, ([0],[0])) # Update parameters... delta = mean-self.mu self.invShape += scatter self.invShape += ((self.k*num)/(self.k+num)) * numpy.outer(delta,delta) self.shape = None self.mu += (num/(self.k+num)) * delta self.n += num self.k += num def addGP(self, gp): """Adds another Gaussian prior, combining the two.""" delta = gp.mu - self.mu self.invShape += gp.invShape self.invShape += ((gp.k*self.k)/(gp.k+self.k)) * numpy.outer(delta,delta) self.shape = None self.mu += (gp.k/(self.k+gp.k)) * delta self.n += gp.n self.k += gp.k def make_safe(self): """Checks for a singular inverse shape matrix - if singular replaces it with the identity. Also makes sure n and k are not less than the number of dimensions, clamping them if need be. obviously the result of this is quite arbitary, but its better than getting a crash from bad data.""" dims = self.mu.shape[0] det = math.fabs(numpy.linalg.det(self.invShape)) if det<1e-3: self.invShape = numpy.identity(dims, dtype=numpy.float32) if self.n<dims: self.n = dims if self.k<1e-3: self.k = 1e-3 def reweight(self, newN = None, newK = None): """A slightly cheaky method that reweights the gp such that it has the new values of n and k, effectivly adjusting the relevant weightings of the samples - can be useful for generating a prior for some GPs using the data stored in those GPs. If a new k is not provided it is set to n; if a new n is not provided it is set to the number of dimensions.""" if newN==None: newN = float(self.mu.shape[0]) if newK==None: newK = newN self.invShape *= newN / self.n self.shape = None self.n = newN self.k = newK def getN(self): """Returns n.""" return self.n def getK(self): """Returns k.""" return self.k def getMu(self): """Returns mu.""" return self.mu def getLambda(self): """Returns lambda.""" if self.shape==None: self.shape = numpy.linalg.inv(self.invShape) return self.shape def getInverseLambda(self): """Returns the inverse of lambda.""" return self.invShape def safe(self): """Returns true if it is possible to sample the prior, work out the probability of samples or work out the probability of samples being drawn from a collapsed sample - basically a test that there is enough information.""" return self.n>=self.mu.shape[0] and self.k>0.0 def prob(self, gauss): """Returns the probability of drawing the provided Gaussian from this prior.""" d = self.mu.shape[0] wishart = Wishart(d) gaussian = Gaussian(d) wishart.setDof(self.n) wishart.setScale(self.getLambda()) gaussian.setMean(self.mu) gaussian.setPrecision(self.k*gauss.getPrecision()) return wishart.prob(gauss.getPrecision()) * gaussian.prob(gauss.getMean()) def intProb(self): """Returns a multivariate student-t distribution object that gives the probability of drawing a sample from a Gaussian drawn from this prior, with the Gaussian integrated out. You may then call the prob method of this object on each sample obtained.""" d = self.mu.shape[0] st = StudentT(d) dof = self.n-d+1.0 st.setDOF(dof) st.setLoc(self.mu) mult = self.k*dof / (self.k+1.0) st.setInvScale(mult * self.getLambda()) return st def sample(self): """Returns a Gaussian, drawn from this prior.""" d = self.mu.shape[0] wishart = Wishart(d) gaussian = Gaussian(d) ret = Gaussian(d) wishart.setDof(self.n) wishart.setScale(self.getLambda()) ret.setPrecision(wishart.sample()) gaussian.setPrecision(self.k*ret.getPrecision()) gaussian.setMean(self.mu) ret.setMean(gaussian.sample()) return ret def __str__(self): return '{n:%f, k:%f, mu:%s, lambda:%s}'%(self.n, self.k, str(self.mu), str(self.getLambda()))
Python
# -*- coding: utf-8 -*- # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import math import numpy import numpy.linalg import scipy.special class StudentT: """A feature incomplete multivariate student-t distribution object - at this time it only supports calculating the probability of a sample, and not the ability to make a draw.""" def __init__(self, dims): """dims is the number of dimensions - initalises it to default values with the degrees of freedom set to 1, the location as the zero vector and the identity matrix for the scale. Suports copy construction.""" if isinstance(dims, StudentT): self.dof = dims.dof self.loc = dims.loc.copy() self.scale = dims.scale.copy() if dims.scale!=None else None self.invScale = dims.invScale.copy() if dims.invScale!=None else None self.norm = dims.norm.copy() if dims.norm!=None else None else: self.dof = 1.0 self.loc = numpy.zeros(dims, dtype=numpy.float32) self.scale = numpy.identity(dims, dtype=numpy.float32) self.invScale = None self.norm = None # Actually the log of the normalising constant. def setDOF(self, dof): """Sets the degrees of freedom.""" self.dof = dof self.norm = None def setLoc(self, loc): """Sets the location vector.""" l = numpy.array(loc, dtype=numpy.float32) assert(l.shape==self.loc.shape) self.loc = l def setScale(self, scale): """Sets the scale matrix.""" s = numpy.array(scale, dtype=numpy.float32) assert(s.shape==(self.loc.shape[0],self.loc.shape[0])) self.scale = s self.invScale = None self.norm = None def setInvScale(self, invScale): """Sets the scale matrix by providing its inverse.""" i = numpy.array(invScale, dtype=numpy.float32) assert(i.shape==(self.loc.shape[0],self.loc.shape[0])) self.scale = None self.invScale = i self.norm = None def getDOF(self): """Returns the degrees of freedom.""" return self.dof def getLoc(self): """Returns the location vector.""" return self.loc def getScale(self): """Returns the scale matrix.""" if self.scale==None: self.scale = numpy.linalg.inv(self.invScale) return self.scale def getInvScale(self): """Returns the inverse of the scale matrix.""" if self.invScale==None: self.invScale = numpy.linalg.inv(self.scale) return self.invScale def getLogNorm(self): """Returns the logarithm of the normalising constant of the distribution. Typically for internal use only.""" if self.norm==None: d = self.loc.shape[0] self.norm = scipy.special.gammaln(0.5*(self.dof+d)) self.norm -= scipy.special.gammaln(0.5*self.dof) self.norm -= math.log(self.dof*math.pi)*(0.5*d) self.norm += 0.5*math.log(numpy.linalg.det(self.getInvScale())) return self.norm def prob(self, x): """Given a vector x evaluates the density function at that point.""" x = numpy.asarray(x) d = self.loc.shape[0] delta = x - self.loc val = numpy.dot(delta,numpy.dot(self.getInvScale(),delta)) val = 1.0 + val/self.dof return math.exp(self.getLogNorm() + math.log(val)*(-0.5*(self.dof+d))) def logProb(self, x): """Returns the logarithm of prob - faster than a straight call to prob.""" x = numpy.asarray(x) d = self.loc.shape[0] delta = x - self.loc val = numpy.dot(delta,numpy.dot(self.getInvScale(),delta)) val = 1.0 + val/self.dof return self.getLogNorm() + math.log(val)*(-0.5*(self.dof+d)) def batchProb(self, dm): """Given a data matrix evaluates the density function for each entry and returns the resulting array of probabilities.""" d = self.loc.shape[0] delta = dm - self.loc.reshape((1,d)) if hasattr(numpy, 'einsum'): # Can go away when scipy older than 1.6 is no longer in use. val = numpy.einsum('kj,ij,ik->i', self.getInvScale(), delta, delta) else: val = ((self.getInvScale().reshape(1,d,d) * delta.reshape(dm.shape[0],1,d)).sum(axis=2) * delta).sum(axis=1) val = 1.0 + val/self.dof return numpy.exp(self.getLogNorm() + numpy.log(val)*(-0.5*(self.dof+d))) def batchLogProb(self, dm): """Same as batchProb, but returns the logarithm of the probability instead.""" d = self.loc.shape[0] delta = dm - self.loc.reshape((1,d)) if hasattr(numpy, 'einsum'): # Can go away when scipy older than 1.6 is no longer in use. val = numpy.einsum('kj,ij,ik->i', self.getInvScale(), delta, delta) else: val = ((self.getInvScale().reshape(1,d,d) * delta.reshape(dm.shape[0],1,d)).sum(axis=2) * delta).sum(axis=1) val = 1.0 + val/self.dof return self.getLogNorm() + numpy.log(val)*(-0.5*(self.dof+d)) def __str__(self): return '{dof:%f, location:%s, scale:%s}'%(self.getDOF(), str(self.getLoc()), str(self.getScale()))
Python
# -*- coding: utf-8 -*- # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import math import random import numpy import numpy.linalg import numpy.random import scipy.special class Wishart: """Simple Wishart distribution class, quite basic really, but has caching to avoid duplicate computation.""" def __init__(self, dims): """dims is the number of dimensions - it initialises with the dof set to 1 and the scale set to the identity matrix. Has copy constructor support.""" if isinstance(dims, Wishart): self.dof = dims.dof self.scale = dims.scale.copy() self.invScale = dims.invScale.copy() if dims.invScale!=None else None self.norm = dims.norm self.cholesky = dims.cholesky.copy() if dims.cholesky!=None else None else: self.dof = 1.0 self.scale = numpy.identity(dims, dtype=numpy.float32) self.invScale = None self.norm = None self.cholesky = None def setDof(self, dof): """Sets the degrees of freedom of the distribution.""" self.dof = dof self.norm = None def setScale(self, scale): """Sets the scale matrix, must be symmetric positive definite""" ns = numpy.array(scale, dtype=numpy.float32) assert(ns.shape==self.scale.shape) self.scale = ns self.invScale = None self.norm = None self.cholesky = None def getDof(self): """Returns the degrees of freedom.""" return self.dof def getScale(self): """Returns the scale matrix.""" return self.scale def getInvScale(self): """Returns the inverse of the scale matrix.""" if self.invScale==None: self.invScale = numpy.linalg.inv(self.scale) return self.invScale def getNorm(self): """Returns the normalising constant of the distribution, typically not used by users.""" if self.norm==None: d = self.scale.shape[0] self.norm = math.pow(2.0,-0.5*self.dof*d) self.norm *= math.pow(numpy.linalg.det(self.scale),-0.5*self.dof) self.norm *= math.pow(math.pi,-0.25*d*(d-1)) for i in xrange(d): self.norm /= scipy.special.gamma(0.5*(n-i)) return self.norm def prob(self, mat): """Returns the probability of the provided matrix, which must be the same shape as the scale matrix and also symmetric and positive definite.""" d = self.scale.shape[0] val = math.pow(numpy.linalg.det(mat),0.5*(n-1-d)) val *= math.exp(-0.5 * numpy.linalg.trace(numpy.dot(mat,self.getInvScale()))) return self.getNorm() * val def sample(self): """Returns a draw from the distribution - will be a symmetric positive definite matrix.""" if self.cholesky==None: self.cholesky = numpy.linalg.cholesky(self.scale) d = self.scale.shape[0] a = numpy.zeros((d,d),dtype=numpy.float32) for r in xrange(d): if r!=0: a[r,:r] = numpy.random.normal(size=(r,)) a[r,r] = math.sqrt(random.gammavariate(0.5*(self.dof-d+1),2.0)) return numpy.dot(numpy.dot(numpy.dot(self.cholesky,a),a.T),self.cholesky.T) def __str__(self): return '{dof:%f, scale:%s}'%(self.dof, str(self.scale))
Python
# -*- coding: utf-8 -*- # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. from gaussian import Gaussian from gaussian_inc import GaussianInc from wishart import Wishart from student_t import StudentT from gaussian_prior import GaussianPrior
Python
# -*- coding: utf-8 -*- # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import numpy import numpy.linalg from gaussian import Gaussian class GaussianInc: """Allows you to incrimentally calculate a Gaussian distribution by providing lots of samples.""" def __init__(self, dims): """You provide the number of dimensions - you must add at least dims samples before there is the possibility of extracting a gaussian from this. Can also act as a copy constructor.""" if isinstance(dims, GaussianInc): self.n = dims.n self.mean = dims.mean.copy() self.scatter = dims.scatter.copy() else: self.n = 0 self.mean = numpy.zeros(dims, dtype=numpy.float32) self.scatter = numpy.zeros((dims,dims), dtype=numpy.float32) def add(self, sample, weight=1.0): """Updates the state given a new sample - sample can have a weight, which obviously defaults to 1, but can be set to other values to indicate repetition of a single point, including fractional.""" sample = numpy.asarray(sample) # Sample count goes up... self.n += weight # Update mean vector... delta = sample - self.mean self.mean += delta*(weight/float(self.n)) # Update scatter matrix (Yes, there is duplicated calculation here as it is symmetric, but who cares?)... self.scatter += weight * numpy.outer(delta, sample - self.mean) def safe(self): """Returns True if it has enough data to provide an actual Gaussian, False if it does not.""" return math.fabs(numpy.linalg.det(self.scatter)) > 1e-6 def makeSafe(self): """Bodges the internal representation so it can provide a non-singular covariance matrix - obviously a total hack, but potentially useful when insufficient information exists. Works by taking the svd, nudging zero entrys away from 0 in the diagonal matrix, then multiplying the terms back together again. End result is arbitary, but won't be inconsistant with the data provided.""" u, s, v = numpy.linalg.svd(self.scatter) epsilon = 1e-5 for i in xrange(s.shape[0]): if math.fabs(s[i])<epsilon: s[i] = math.copysign(epsilon, s[i]) self.scatter[:,:] = numpy.dot(u, numpy.dot(numpy.diag(s), v)) def fetch(self): """Returns the Gaussian distribution calculated so far.""" ret = Gaussian(self.mean.shape[0]) ret.setMean(self.mean) ret.setCovariance(self.scatter/float(self.n)) return ret
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Some basic matrix operations that come in use... matrix_code = start_cpp() + """ #ifndef MATRIX_CODE #define MATRIX_CODE template <typename T> inline void MemSwap(T * lhs, T * rhs, int count = 1) { while(count!=0) { T t = *lhs; *lhs = *rhs; *rhs = t; ++lhs; ++rhs; --count; } } // Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default. template <typename T> inline T Determinant(T * pos, int size, int stride = -1) { if (stride==-1) stride = size; if (size==1) return pos[0]; else { if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride]; else { T ret = 0.0; for (int i=0; i<size; i++) { if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1); T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1]; if ((i+size)%2) ret += sub; else ret -= sub; } for (int i=1; i<size; i++) { MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1); } return ret; } } } // Inverts a square matrix, will fail on singular and very occasionally on // non-singular matrices, returns true on success. Uses Gauss-Jordan elimination // with partial pivoting. // in is the input matrix, out the output matrix, just be aware that the input matrix is trashed. // You have to provide its size (Its square, obviously.), and optionally a stride if different from size. template <typename T> inline bool Inverse(T * in, T * out, int size, int stride = -1) { if (stride==-1) stride = size; for (int r=0; r<size; r++) { for (int c=0; c<size; c++) { out[r*stride + c] = (c==r)?1.0:0.0; } } for (int r=0; r<size; r++) { // Find largest pivot and swap in, fail if best we can get is 0... T max = in[r*stride + r]; int index = r; for (int i=r+1; i<size; i++) { if (fabs(in[i*stride + r])>fabs(max)) { max = in[i*stride + r]; index = i; } } if (index!=r) { MemSwap(&in[index*stride], &in[r*stride], size); MemSwap(&out[index*stride], &out[r*stride], size); } if (fabs(max-0.0)<1e-6) return false; // Divide through the entire row... max = 1.0/max; in[r*stride + r] = 1.0; for (int i=r+1; i<size; i++) in[r*stride + i] *= max; for (int i=0; i<size; i++) out[r*stride + i] *= max; // Row subtract to generate 0's in the current column, so it matches an identity matrix... for (int i=0; i<size; i++) { if (i==r) continue; T factor = in[i*stride + r]; in[i*stride + r] = 0.0; for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j]; for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j]; } } return true; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import time class ProgBar: """Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops.""" def __init__(self, width = 60, onCallback = None): self.start = time.time() self.fill = 0 self.width = width self.onCallback = onCallback sys.stdout.write(('_'*self.width)+'\n') sys.stdout.flush() def __del__(self): self.end = time.time() self.__show(self.width) sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n') sys.stdout.flush() def callback(self, nDone, nToDo): """Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo).""" if self.onCallback: self.onCallback() n = int(float(self.width)*float(nDone)/float(nToDo)) n = min((n,self.width)) if n>self.fill: self.__show(n) def __show(self,n): sys.stdout.write('|'*(n-self.fill)) sys.stdout.flush() self.fill = n
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pydoc import inspect class DocGen: """A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code.""" def __init__(self, name, title = None, summary = None): """name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title.""" if title==None: title = name if summary==None: summary = title self.doc = pydoc.HTMLDoc() self.html = open('%s.html'%name,'w') self.html.write('<html>\n') self.html.write('<head>\n') self.html.write('<title>%s</title>\n'%title) self.html.write('</head>\n') self.html.write('<body>\n') self.html_variables = '' self.html_functions = '' self.html_classes = '' self.wiki = open('%s.wiki'%name,'w') self.wiki.write('#summary %s\n\n'%summary) self.wiki.write('= %s= \n\n'%title) self.wiki_variables = '' self.wiki_functions = '' self.wiki_classes = '' def __del__(self): if self.html_variables!='': self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables)) if self.html_functions!='': self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions)) if self.html_classes!='': self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes)) self.html.write('</body>\n') self.html.write('</html>\n') self.html.close() if self.wiki_variables!='': self.wiki.write('= Variables =\n\n') self.wiki.write(self.wiki_variables) self.wiki.write('\n') if self.wiki_functions!='': self.wiki.write('= Functions =\n\n') self.wiki.write(self.wiki_functions) self.wiki.write('\n') if self.wiki_classes!='': self.wiki.write('= Classes =\n\n') self.wiki.write(self.wiki_classes) self.wiki.write('\n') self.wiki.close() def addFile(self, fn, title, fls = True): """Given a filename and section title adds the contents of said file to the output. Various flags influence how this works.""" html = [] wiki = [] for i, line in enumerate(open(fn,'r').readlines()): hl = line.replace('\n', '') if i==0 and fls: hl = '<strong>' + hl + '</strong>' for ext in ['py','txt']: if '.%s - '%ext in hl: s = hl.split('.%s - '%ext, 1) hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1] html.append(hl) wl = line.strip() if i==0 and fls: wl = '*%s*'%wl for ext in ['py','txt']: if '.%s - '%ext in wl: s = wl.split('.%s - '%ext, 1) wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n' wiki.append(wl) self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html))) self.wiki.write('== %s ==\n'%title) self.wiki.write('\n'.join(wiki)) self.wiki.write('----\n\n') def addVariable(self, var, desc): """Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc..""" self.html_variables += '<strong>%s</strong><br/>'%var self.html_variables += '%s<br/><br/>\n'%desc self.wiki_variables += '*`%s`*\n'%var self.wiki_variables += ' %s\n\n'%desc def addFunction(self, func): """Adds a function to the documentation. You provide the actual function instance.""" self.html_functions += self.doc.docroutine(func).replace('&nbsp;',' ') self.html_functions += '\n' name = func.__name__ args, varargs, keywords, defaults = inspect.getargspec(func) doc = inspect.getdoc(func) if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_functions += ' %s\n\n'%doc def addClass(self, cls): """Adds a class to the documentation. You provide the actual class object.""" self.html_classes += self.doc.docclass(cls).replace('&nbsp;',' ') self.html_classes += '\n' name = cls.__name__ parents = filter(lambda a: a!=cls, inspect.getmro(cls)) doc = inspect.getdoc(cls) par_str = '' if len(parents)!=0: par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents)) self.wiki_classes += '== %s(%s) ==\n'%(name, par_str) self.wiki_classes += ' %s\n\n'%doc methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x)) def method_key(pair): if pair[0]=='__init__': return '___' else: return pair[0] methods.sort(key=method_key) for name, method in methods: if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'): if inspect.ismethod(method): args, varargs, keywords, defaults = inspect.getargspec(method) else: args = ['?'] varargs = None keywords = None defaults = None if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords def fetch_doc(cls, name): try: method = getattr(cls, name) if method.__doc__!=None: return inspect.getdoc(method) except: pass for parent in filter(lambda a: a!=cls, inspect.getmro(cls)): ret = fetch_doc(parent, name) if ret!=None: return ret return None doc = fetch_doc(cls, name) self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_classes += ' %s\n\n'%doc variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float)) for name, var in variables: if not name.startswith('__'): if hasattr(var, '__doc__'): d = var.__doc__ else: d = str(var) self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import random import math from scipy.special import gammaln, psi, polygamma from scipy import weave from utils.start_cpp import start_cpp # Provides various gamma-related functions... gamma_code = start_cpp() + """ #ifndef GAMMA_CODE #define GAMMA_CODE #include <cmath> // Returns the natural logarithm of the Gamma function... // (Uses Lanczos's approximation.) double lnGamma(double z) { static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7}; if (z<0.5) { // Use reflection formula, as approximation doesn't work down here... return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z); } else { double x = coeff[0]; for (int i=1;i<9;i++) x += coeff[i]/(z+i-1); double t = z + 6.5; return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x); } } // Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values... double digamma(double z) { static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point. double ret = 0.0; while (z<highVal) { ret -= 1.0/z; z += 1.0; } double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz4 = iz2*iz2; double iz6 = iz4*iz2; ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0; return ret; } // Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically... double trigamma(double z) { static const double highVal = 8.0; double ret = 0.0; while (z<highVal) { ret += 1.0/(z*z); z += 1.0; } z -= 1.0; double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz3 = iz1*iz2; double iz5 = iz3*iz2; double iz7 = iz5*iz2; double iz9 = iz7*iz2; ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0; return ret; } #endif """ def lnGamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function""" code = start_cpp(gamma_code) + """ return_val = lnGamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def digamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function""" code = start_cpp(gamma_code) + """ return_val = digamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def trigamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function""" code = start_cpp(gamma_code) + """ return_val = trigamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) class TestFuncs(unittest.TestCase): """Test code for the assorted gamma-related functions.""" def test_compile(self): code = start_cpp(gamma_code) + """ """ weave.inline(code, support_code=gamma_code) def test_error_lngamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = lnGamma(z) good = gammaln(z) assert(math.fabs(own-good)<1e-12) def test_error_digamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = digamma(z) good = psi(z) assert(math.fabs(own-good)<1e-9) def test_error_trigamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = trigamma(z) good = polygamma(1,z) assert(math.fabs(own-good)<1e-9) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import inspect import hashlib def start_cpp(hash_str = None): """This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>""" frame = inspect.currentframe().f_back info = inspect.getframeinfo(frame) if hash_str==None: return '#line %i "%s"\n'%(info[1],info[0]) else: h = hashlib.md5() h.update(hash_str) hash_val = h.hexdigest() return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
Python
# -*- coding: utf-8 -*- # Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.). import cv import numpy as np def cv2array(im): """Converts a cv array to a numpy array.""" depth2dtype = { cv.IPL_DEPTH_8U: 'uint8', cv.IPL_DEPTH_8S: 'int8', cv.IPL_DEPTH_16U: 'uint16', cv.IPL_DEPTH_16S: 'int16', cv.IPL_DEPTH_32S: 'int32', cv.IPL_DEPTH_32F: 'float32', cv.IPL_DEPTH_64F: 'float64', } arrdtype=im.depth a = np.fromstring( im.tostring(), dtype=depth2dtype[im.depth], count=im.width*im.height*im.nChannels) a.shape = (im.height,im.width,im.nChannels) return a def array2cv(a): """Converts a numpy array to a cv array, if possible.""" dtype2depth = { 'uint8': cv.IPL_DEPTH_8U, 'int8': cv.IPL_DEPTH_8S, 'uint16': cv.IPL_DEPTH_16U, 'int16': cv.IPL_DEPTH_16S, 'int32': cv.IPL_DEPTH_32S, 'float32': cv.IPL_DEPTH_32F, 'float64': cv.IPL_DEPTH_64F, } try: nChannels = a.shape[2] except: nChannels = 1 cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]), dtype2depth[str(a.dtype)], nChannels) cv.SetData(cv_im, a.tostring(), a.dtype.itemsize*nChannels*a.shape[1]) return cv_im
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import multiprocessing as mp import multiprocessing.synchronize # To make sure we have all the functionality. import types import marshal import unittest def repeat(x): """A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant.""" while True: yield x def run_code(code,args): """Internal use function that does the work in each process.""" code = marshal.loads(code) func = types.FunctionType(code, globals(), '_') return func(*args) def mp_map(func, *iters, **keywords): """A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity.""" if 'pool' in keywords: pool = keywords['pool'] else: pool = mp.Pool() code = marshal.dumps(func.func_code) jobs = [] for args in zip(*iters): jobs.append(pool.apply_async(run_code,(code,args))) for i in xrange(len(jobs)): jobs[i] = jobs[i].get() return jobs class TestMpMap(unittest.TestCase): def test_simple1(self): data = ['a','b','c','d'] def noop(data): return data data_noop = mp_map(noop, data) self.assertEqual(data, data_noop) def test_simple2(self): data = [x for x in xrange(1000)] data_double = mp_map(lambda a: a*2, data) self.assertEqual(map(lambda a: a*2,data), data_double) def test_gen(self): def gen(): for i in xrange(100): yield i data_double = mp_map(lambda a: a*2, gen()) self.assertEqual(map(lambda a: a*2,gen()), data_double) def test_repeat(self): def mult(a,b): return a*b data = [x for x in xrange(50,5000,5)] data_triple = mp_map(mult, data, repeat(3)) self.assertEqual(map(lambda a: a*3,data),data_triple) def test_none(self): data = [] data_sqr = mp_map(lambda x: x*x, data) self.assertEqual([],data_sqr) if __name__ == '__main__': unittest.main()
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import os.path import tempfile import shutil from distutils.core import setup, Extension import distutils.ccompiler import distutils.dep_util try: __default_compiler = distutils.ccompiler.new_compiler() except: __default_compiler = None def make_mod(name, base, source, openCL = False): """Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place.""" if __default_compiler==None: raise Exception('No compiler!') # Work out the various file names - check if we actually need to do anything... if not isinstance(source, list): source = [source] source_path = map(lambda s: os.path.join(base, s), source) library_path = os.path.join(base, __default_compiler.shared_object_filename(name)) if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)): try: print 'b' # Backup the argv variable and create a temporary directory to do all work in... old_argv = sys.argv[:] temp_dir = tempfile.mkdtemp() # Prepare the extension... sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir] comp_path = filter(lambda s: not s.endswith('.h'), source_path) depends = filter(lambda s: s.endswith('.h'), source_path) if openCL: ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends) else: ext = Extension(name, comp_path, depends=depends) # Compile... setup(name=name, version='1.0.0', ext_modules=[ext]) finally: # Cleanup the argv variable and the temporary directory... sys.argv = old_argv shutil.rmtree(temp_dir, True)
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp from numpy_help_cpp import numpy_util_code # Provides various functions to assist with manipulating python objects from c++ code. python_obj_code = numpy_util_code + start_cpp() + """ #ifndef PYTHON_OBJ_CODE #define PYTHON_OBJ_CODE // Extracts a boolean from an object... bool GetObjectBoolean(PyObject * obj, const char * name) { PyObject * b = PyObject_GetAttrString(obj, name); bool ret = b!=Py_False; Py_DECREF(b); return ret; } // Extracts an int from an object... int GetObjectInt(PyObject * obj, const char * name) { PyObject * i = PyObject_GetAttrString(obj, name); int ret = PyInt_AsLong(i); Py_DECREF(i); return ret; } // Extracts a float from an object... float GetObjectFloat(PyObject * obj, const char * name) { PyObject * f = PyObject_GetAttrString(obj, name); float ret = PyFloat_AsDouble(f); Py_DECREF(f); return ret; } // Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored... unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); unsigned char * ret = new unsigned char[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i); Py_DECREF(nao); return ret; } // Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored... float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); float * ret = new float[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i); Py_DECREF(nao); return ret; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Defines helper functions for accessing numpy arrays... numpy_util_code = start_cpp() + """ #ifndef NUMPY_UTIL_CODE #define NUMPY_UTIL_CODE float & Float1D(PyArrayObject * arr, int index = 0) { return *(float*)(arr->data + index*arr->strides[0]); } float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } unsigned char & Byte1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index*arr->strides[0]); } unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } int & Int1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index*arr->strides[0]); } int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } #endif """
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cvarray import mp_map import prog_bar import numpy_help_cpp import python_obj_cpp import matrix_cpp import gamma_cpp import setProcName import start_cpp import make import doc_gen # Setup... doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.') doc.addFile('readme.txt', 'Overview') # Variables... doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.') doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.') doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++') doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++') # Functions... doc.addFunction(make.make_mod) doc.addFunction(cvarray.cv2array) doc.addFunction(cvarray.array2cv) doc.addFunction(mp_map.repeat) doc.addFunction(mp_map.mp_map) doc.addFunction(setProcName.setProcName) doc.addFunction(start_cpp.start_cpp) doc.addFunction(make.make_mod) # Classes... doc.addClass(prog_bar.ProgBar) doc.addClass(doc_gen.DocGen)
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from ctypes import * def setProcName(name): """Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases.""" # Call the process control function... libc = cdll.LoadLibrary('libc.so.6') libc.prctl(15, c_char_p(name), 0, 0, 0) # Update argv... charPP = POINTER(POINTER(c_char)) argv = charPP.in_dll(libc,'_dl_argv') size = libc.strlen(argv[0]) libc.strncpy(argv[0],c_char_p(name),size) if __name__=='__main__': # Quick test that it works... import os ps1 = 'ps' ps2 = 'ps -f' os.system(ps1) os.system(ps2) setProcName('wibble_wobble') os.system(ps1) os.system(ps2)
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import gcp from utils import doc_gen # Setup... doc = doc_gen.DocGen('gcp', 'Gaussian Conjugate Prior', 'Library of distributions focused on the Gaussian and its conjugate prior') doc.addFile('readme.txt', 'Overview') # Classes... doc.addClass(gcp.Gaussian) doc.addClass(gcp.GaussianInc) doc.addClass(gcp.Wishart) doc.addClass(gcp.StudentT) doc.addClass(gcp.GaussianPrior)
Python
# -*- coding: utf-8 -*- # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import math import numpy import numpy.linalg import numpy.random class Gaussian: """A basic multivariate Gaussian class. Has caching to avoid duplicate calculation.""" def __init__(self, dims): """dims is the number of dimensions. Initialises with mu at the origin and the identity matrix for the precision/covariance. dims can also be another Gaussian object, in which case it acts as a copy constructor.""" if isinstance(dims, Gaussian): self.mean = dims.mean.copy() self.precision = dims.precision.copy() if dims.precision!=None else None self.covariance = dims.covariance.copy() if dims.covariance!=None else None self.norm = dims.norm self.cholesky = dims.cholesky.copy() if dims.cholesky!=None else None else: self.mean = numpy.zeros(dims, dtype=numpy.float32) self.precision = numpy.identity(dims, dtype=numpy.float32) self.covariance = None self.norm = None self.cholesky = None def setMean(self, mean): """Sets the mean - you can use anything numpy will interprete as a 1D array of the correct length.""" nm = numpy.array(mean, dtype=numpy.float32) assert(nm.shape==self.mean.shape) self.mean = nm def setPrecision(self, precision): """Sets the precision matrix. Alternativly you can use the setCovariance method.""" np = numpy.array(precision, dtype=numpy.float32) assert(np.shape==(self.mean.shape[0],self.mean.shape[0])) self.precision = np self.covariance = None self.norm = None self.cholesky = None def setCovariance(self, covariance): """Sets the covariance matrix. Alternativly you can use the setPrecision method.""" nc = numpy.array(covariance, dtype=numpy.float32) assert(nc.shape==(self.mean.shape[0],self.mean.shape[0])) self.covariance = nc self.precision = None self.norm = None self.cholesky = None def getMean(self): """Returns the mean.""" return self.mean def getPrecision(self): """Returns the precision matrix.""" if self.precision==None: self.precision = numpy.linalg.inv(self.covariance) return self.precision def getCovariance(self): """Returns the covariance matrix.""" if self.covariance==None: self.covariance = numpy.linalg.inv(self.precision) return self.covariance def getNorm(self): """Returns the normalising constant of the distribution. Typically for internal use only.""" if self.norm==None: self.norm = math.pow(2.0*math.pi,-0.5*self.mean.shape[0]) * math.sqrt(numpy.linalg.det(self.getPrecision())) return self.norm def prob(self, x): """Given a vector x evaluates the probability density function at that point.""" x = numpy.asarray(x) offset = x - self.mean val = numpy.dot(offset,numpy.dot(self.getPrecision(),offset)) return self.getNorm() * math.exp(-0.5 * val) def sample(self): """Draws and returns a sample from the distribution.""" if self.cholesky==None: self.cholesky = numpy.linalg.cholesky(self.getCovariance()) z = numpy.random.normal(size=self.mean.shape) return self.mean + numpy.dot(self.cholesky,z) def __str__(self): return '{mean:%s, covar:%s}'%(str(self.mean), str(self.getCovariance()))
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import solvers from solve_shared import Params, State from dp_conc import PriorConcDP class Corpus: """Contains a set of Document-s, plus parameters for the graphical models priors - everything required as input to build a model, except a Params object.""" def __init__(self): """Basic setup - sets a whole bunch of stuff to sensible defaults.""" # Create the array of documents and support variables... self.docs = [] self.sampleCount = 0 # How many samples exist in all the documents. self.wordCount = 0 # How many types of words exist. # Behavoural flags... self.dnrDocInsts = False self.dnrCluInsts = False self.seperateClusterConc = False self.seperateDocumentConc = False self.oneCluster = False self.calcBeta = False # Parameters for the priors in the graphical model... self.alpha = PriorConcDP() # Document instance DP self.beta = 1.0 # Topic multinomial symmetric Dirichlet distribution prior. self.gamma = PriorConcDP() # Topic DP self.rho = PriorConcDP() # Cluster instance DP self.mu = PriorConcDP() # Cluster creating DP def setDocInstsDNR(self, val): """False to resample the document instances, True to not. Defaults to False, but can be set True to save a bit of computation. Not recomended to be changed as convergance is poor without it.""" self.dnrDocInsts = val def getDocInstsDNR(self): """Returns False if the document instances are going to be resampled, True if they are not.""" return self.dnrDocInsts def setCluInstsDNR(self, val): """False to resample the cluster instances, True to not. Defaults to False, but can be set True to save quite a bit of computation. Its debatable if switching this to True causes the results to degrade in any way, but left on by default as indicated in the paper.""" self.dnrCluInsts = val def getCluInstsDNR(self): """Returns False if the cluster instances are going to be resampled, True if they are not.""" return self.dnrCluInsts def setSeperateClusterConc(self, val): """True if you want clusters to each have their own concentration parameter, False, the default, if you want a single concentration parameter shared between all clusters. Note that setting this True doesn't really work in my experiance.""" self.seperateClusterConc = val def getSeperateClusterConc(self): """True if each cluster has its own seperate concentration parameter, false if they are shared.""" return self.seperateClusterConc def setSeperateDocumentConc(self, val): """True if you want each document to have its own concentration value, False if you want a single value shared between all documents. Experiance shows that the default, False, is the only sensible option most of the time, though when single cluster is on True can give advantages.""" self.seperateDocumentConc = val def getSeperateDocumentConc(self): """True if each document has its own concentration parameter, False if they all share a single concentration parameter.""" return self.seperateDocumentConc def setOneCluster(self, val): """Leave as False to keep the default cluster behaviour, but set to True to only have a single cluster - this results in a HDP implimentation that has an extra pointless layer, making a it a bit inefficient, but not really affecting the results relative to a normal HDP implimentation.""" self.oneCluster = val def getOneCluster(self): """Returns False for normal behaviour, True if only one cluster will be used - this forces the algorithm to be normal HDP, with an excess level, rather than dual HDP.""" return self.oneCluster def setCalcBeta(self, val): """Leave as False to have beta constant as the algorithm runs, True if you want it recalculated based on the topic multinomials drawn from it. Defaults to False.""" self.calcBeta = val def getCalcBeta(self): """Returns False to leave the beta prior on topic word multinomials as is, True to indicate that it should be optimised""" return self.calcBeta def setAlpha(self, alpha, beta, conc): """Sets the concentration details for the per-document DP from which the topics for words are drawn.""" self.alpha.alpha = alpha self.alpha.beta = beta self.alpha.conc = conc def getAlpha(self): """Returns the PriorConcDP for the alpha parameter.""" return self.alpha def setBeta(self, beta): """Parameter for the symmetric Dirichlet prior on the multinomial distribution from which words are drawn, one for each topic. (Symmetric therefore a single float as input.)""" assert(beta>0.0) self.beta = beta def getBeta(self): """Returns the current beta value. Defaults to 1.0.""" return self.beta def setGamma(self, alpha, beta, conc): """Sets the concentration details for the topic DP, from which topics are drawn""" self.gamma.alpha = alpha self.gamma.beta = beta self.gamma.conc = conc def getGamma(self): """Returns the PriorConcDP for the gamma parameter.""" return self.gamma def setRho(self, alpha, beta, conc): """Sets the concentration details used for each cluster instance.""" self.rho.alpha = alpha self.rho.beta = beta self.rho.conc = conc def getRho(self): """Returns the PriorConcDP for the rho parameter.""" return self.rho def setMu(self, alpha, beta, conc): """Sets the concentration details used for the DP from which clusters are drawn for documents.""" self.mu.alpha = alpha self.mu.beta = beta self.mu.conc = conc def getMu(self): """Returns the PriorConcDP for the mu parameter.""" return self.mu def add(self, doc): """Adds a document to the corpus.""" doc.ident = len(self.docs) self.docs.append(doc) self.sampleCount += doc.getSampleCount() self.wordCount = max((self.wordCount, doc.words[-1,0]+1)) def getDocumentCount(self): """Number of documents.""" return len(self.docs) def getDocument(self, ident): """Returns the Document associated with the given ident.""" return self.docs[ident] def documentList(self): """Returns a list of all documents.""" return self.docs def setWordCount(self, wordCount): """Because the system autodetects words as being the identifiers 0..max where max is the largest identifier seen it is possible for you to tightly pack words but to want to reserve some past the end. Its also possible for a data set to never contain the last word, creating problems. This allows you to set the number of words, forcing the issue. Note that setting the number less than actually exists will be ignored.""" self.wordCount = max((self.wordCount, wordCount)) def getWordCount(self): """Number of words as far as a fitter will be concerned; doesn't mean that they have all actually been sampled within documents however.""" return self.wordCount def getSampleCount(self): """Returns the number of samples stored in all the documents contained within.""" return self.sampleCount def sampleModel(self, params=None, callback=None, mp=True): """Given parameters to run the Gibbs sampling with this does the sampling, and returns the resulting Model object. If params is not provided it uses the default. callback can be a function to report progress, and mp can be set to False if you don't want to make use of multiprocessing.""" if params==None: params = Params() state = State(self, params) if mp and params.runs>1 and hasattr(solvers,'gibbs_all_mp'): solvers.gibbs_all_mp(state, callback) else: solvers.gibbs_all(state, callback) return state.getModel()
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. class Params: """Parameters for running the fitter that are universal to all fitters - basically the parameters you would typically associate with Gibbs sampling.""" def __init__(self, toClone = None): """Sets the parameters to reasonable defaults. Will act as a copy constructor if given an instance of this object.""" if toClone!=None: self.__runs = toClone.runs self.__samples = toClone.samples self.__burnIn = toClone.burnIn self.__lag = toClone.lag else: self.__runs = 8 self.__samples = 10 self.__burnIn = 1000 self.__lag = 100 def setRuns(self, runs): """Sets the number of runs, i.e. how many seperate chains are run.""" assert(isinstance(runs, int)) assert(runs>0) self.__runs = runs def setSamples(self, samples): """Number of samples to extract from each chain - total number of samples extracted will hence be samples*runs.""" assert(isinstance(samples, int)) assert(samples>0) self.__samples = samples def setBurnIn(self, burnIn): """Number of Gibbs iterations to do for burn in before sampling starts.""" assert(isinstance(burnIn, int)) assert(burnIn>=0) self.__burnIn = burnIn def setLag(self, lag): """Number of Gibbs iterations to do between samples.""" assert(isinstance(lag, int)) assert(lag>0) self.__lag = lag def getRuns(self): """Returns the number of runs.""" return self.__runs def getSamples(self): """Returns the number of samples.""" return self.__samples def getBurnIn(self): """Returns the burn in length.""" return self.__burnIn def getLag(self): """Returns the lag length.""" return self.__lag runs = property(getRuns, setRuns, None, "Number of seperate chains to run.") samples = property(getSamples, setSamples, None, "Number of samples to extract from each chain") burnIn = property(getBurnIn, setBurnIn, None, "Number of iterations to do before taking the first sample of a chain.") lag = property(getLag, setLag, None, "Number of iterations to do between samples.") def fromArgs(self, args, prefix = ''): """Extracts from an arg string, typically sys.argv[1:], the parameters, leaving them untouched if not given. Uses --runs, --samples, --burnIn and --lag. Can optionally provide a prefix which is inserted after the '--'""" try: ind = args[:-1].index('--'+prefix+'runs') self.runs = int(args[ind+1]) except ValueError: pass try: ind = args[:-1].index('--'+prefix+'samples') self.samples = int(args[ind+1]) except ValueError: pass try: ind = args[:-1].index('--'+prefix+'burnIn') self.burnIn = int(args[ind+1]) except ValueError: pass try: ind = args[:-1].index('--'+prefix+'lag') self.lag = int(args[ind+1]) except ValueError: pass
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # This loads in the entire library and provides the interface - only import needed by a user... # Load in the solvers (Done fist to avoid include loop issues.)... from solvers import * # Load in all the data structure types... from params import Params from solve_shared import State from model import Model, Sample, DocSample, DocModel from dp_conc import PriorConcDP from corpus import Corpus from document import Document
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import math import numpy from dp_conc import PriorConcDP from params import Params class DocState: """Helper class to contain the State of Gibbs sampling a specific document.""" def __init__(self, doc, alphaConc = None): if isinstance(doc, DocState): self.cluster = doc.cluster self.use = doc.use.copy() self.conc = doc.conc self.samples = doc.samples.copy() else: # Index of the cluster its assigned to, initialised to -1 to indicate it is not currently assigned... self.cluster = -1 # Definition of the documents DP, initialised to be empty, which contains instances of cluster instances. The use array is, as typical, indexed by instance in the first dimension and 0 or 1 in the second, where 0 gives the index of what it is instancing and 1 gives the number of users, which at this level will be the number of words. conc provides a sample of the concentration value for the DP... self.use = numpy.empty((0,2), dtype=numpy.int32) self.conc = alphaConc.conc # Contains the documents samples - a 2D array where the first dimension indexes each sample. There are then two columns - the first contains the instance index, which indexes the use array, and the second the word index, which indexes the multinomial assigned to each topic. We default to -1 in the instance index column to indicate that it is unassigned... self.samples = numpy.empty((doc.getSampleCount(),2), dtype=numpy.int32) self.samples[:,0] = -1 si = 0 for word, count in map(lambda i: doc.getWord(i), xrange(doc.getWordCount())): for _ in xrange(count): self.samples[si,1] = word si += 1 assert(si==doc.getSampleCount()) class State: """State object, as manipulated by a Gibbs sampler to get samples of the unknown parameters of the model.""" def __init__(self, obj, params = None): """Constructs a state object given either another State object (clone), or a Corpus and a Params object. If the Params object is omitted it uses the default. Also supports construction from a single Document, where it uses lots of defaults but is basically identical to a Corpus with a single Document in - used as a shortcut when fitting a Document to an already learnt model.""" if isinstance(obj, State): # Cloning time... self.dnrDocInsts = obj.dnrDocInsts self.dnrCluInsts = obj.dnrCluInsts self.seperateClusterConc = obj.seperateClusterConc self.seperateDocumentConc = obj.seperateDocumentConc self.oneCluster = obj.oneCluster self.calcBeta = obj.calcBeta self.alpha = PriorConcDP(obj.alpha) self.beta = obj.beta.copy() self.gamma = PriorConcDP(obj.gamma) self.rho = PriorConcDP(obj.rho) self.mu = PriorConcDP(obj.mu) self.topicWord = obj.topicWord.copy() self.topicUse = obj.topicUse.copy() self.topicConc = obj.topicConc self.cluster = map(lambda t: (t[0].copy(),t[1]),obj.cluster) self.clusterUse = obj.clusterUse.copy() self.clusterConc = obj.clusterConc self.doc = map(lambda d: DocState(d), obj.doc) self.params = Params(obj.params) self.model = Model(obj.model) elif isinstance(obj, Document): # Construct from a single document... self.dnrDocInsts = False self.dnrCluInsts = False self.seperateClusterConc = False self.seperateDocumentConc = False self.oneCluster = False self.calcBeta = False wordCount = obj.getWord(obj.getWordCount()-1)[0] self.alpha = PriorConcDP() self.beta = numpy.ones(wordCount,dtype=numpy.float32) self.gamma = PriorConcDP() self.rho = PriorConcDP() self.mu = PriorConcDP() self.topicWord = numpy.zeros((0,wordCount), dtype=numpy.int32) self.topicUse = numpy.zeros(0,dtype=numpy.int32) self.topicConc = self.gamma.conc self.cluster = [] self.clusterUse = numpy.zeros(0,dtype=numpy.int32) self.clusterConc = self.mu.conc self.doc = [DocState(obj,self.alpha)] if params!=None: self.params = params else: self.params = Params() self.model = Model() else: # Construct from a corpus, as that is the only left out option... # Behaviour flags... self.dnrDocInsts = obj.getDocInstsDNR() self.dnrCluInsts = obj.getCluInstsDNR() self.seperateClusterConc = obj.getSeperateClusterConc() self.seperateDocumentConc = obj.getSeperateDocumentConc() self.oneCluster = obj.getOneCluster() self.calcBeta = obj.getCalcBeta() # Concentration parameters - these are all constant... self.alpha = obj.getAlpha() self.beta = numpy.ones(obj.getWordCount(),dtype=numpy.float32) self.beta *= obj.getBeta() self.gamma = obj.getGamma() self.rho = obj.getRho() self.mu = obj.getMu() # The topics in the model - consists of three parts - first an array indexed by [topic,word] which gives how many times each word has been drawn from the given topic - this alongside beta allows the relevant Dirichlet posterior to be determined. Additionally we have topicUse, which counts how manny times each topic has been instanced in a cluster - this alongside topicConc, which is the sampled concentration, defines the DP from which topics are drawn for inclusion in clusters. self.topicWord = numpy.zeros((0,obj.getWordCount()),dtype=numpy.int32) self.topicUse = numpy.zeros(0,dtype=numpy.int32) self.topicConc = self.gamma.conc # Defines the clusters, as a list of (inst, conc). inst is a 2D array, containing all the topic instances that make up the cluster - whilst the first dimension of the array indexes each instance the second has two entrys only, the first the index number for the topic, the second the number of using document instances. conc is the sampled concentration that completes the definition of the DP defined for each cluster. Additionally we have the DDP from which the specific clusters are drawn - this is defined by clusterUse and clusterConc, just as for the topics. self.cluster = [] self.clusterUse = numpy.zeros(0,dtype=numpy.int32) self.clusterConc = self.mu.conc # List of document objects, to contain the documents - whilst declared immediatly below as an empty list we then proceed to fill it in with the information from the given Corpus... self.doc = [] for doc in obj.documentList(): self.doc.append(DocState(doc,self.alpha)) # Store the parameters... if params!=None: self.params = params else: self.params = Params() # Create a model object, for storing samples into... self.model = Model() def setGlobalParams(self, sample): """Sets a number of parameters for the State after initialisation, taking them from the given Sample object. Designed for use with the addPrior method this allows you to extract all relevant parameters from a Sample. Must be called before any Gibbs sampling takes place.""" self.alpha = sample.alpha self.beta = sample.beta.copy() self.gamma = sample.gamma self.rho = sample.rho self.mu = sample.mu self.topicConc = sample.topicConc self.clusterConc = sample.clusterConc for doc in self.doc: doc.conc = self.alpha.conc def addPrior(self, sample): """Given a Sample object this uses it as a prior - this is primarilly used to sample a single or small number of documents using a model already trainned on another set of documents. It basically works by adding the topics and clusters from the sample into this corpus, with the counts all intact so they have the relevant weight and can't be deleted. Note that you could in principle add multiple priors, though that would be quite a strange scenario. If only called once then the topic indices will line up. Note that all the prior parameters are not transfered, though often you would want to - setGlobalParams is provided to do this. Must be called before any Gibbs sampling takes place.""" offset = self.topicWord.shape[0] if self.topicWord.shape[0]!=0: self.topicWord = numpy.vstack((self.topicWord,sample.topicWord)) else: self.topicWord = sample.topicWord.copy() self.topicUse = numpy.hstack((self.topicUse,sample.topicUse)) def mapCluster(c): c0 = c[0].copy() c0[:,0] += offset return (c0,c[1]) self.cluster += map(mapCluster,sample.cluster) self.clusterUse = numpy.hstack((self.clusterUse,sample.clusterUse)) def sample(self): """Samples the current state, storing the current estimate of the model parameters.""" self.model.sampleState(self) def absorbClone(self,clone): """Given a clone absorb all its samples - used for multiprocessing.""" self.model.absorbModel(clone.model) def getParams(self): """Returns the parameters object.""" return self.params def getModel(self): """Returns the model constructed from all the calls to sample().""" return self.model # Includes at tail of file to resolve circular dependencies... from document import Document from model import Model
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. from scipy import weave import unittest from utils.start_cpp import start_cpp from ds_cpp import ds_code # Provides code for converting from python to the c++ data structure and back again - this is so the data can be stored in a suitable form in both situations, though it comes at the expense of a complex conversion... ds_link_code = ds_code + start_cpp() + """ // Helper for extracting a boolean from a Python object... bool GetObjectBool(PyObject * obj, const char * name) { PyObject * boolObj = PyObject_GetAttrString(obj, name); bool ret = boolObj==Py_True; Py_DECREF(boolObj); return ret; } // Helper converter for the Conc class and its python equivalent, PriorConcDP - just need to go one way. Given an object and the name of the variable in the object that is the PriorConcDP object... void ConcPyToCpp(PyObject * obj, const char * name, Conc & out) { PyObject * pyConc = PyObject_GetAttrString(obj,name); PyObject * alpha = PyObject_GetAttrString(pyConc,"_PriorConcDP__alpha"); out.alpha = PyFloat_AsDouble(alpha); Py_DECREF(alpha); PyObject * beta = PyObject_GetAttrString(pyConc,"_PriorConcDP__beta"); out.beta = PyFloat_AsDouble(beta); Py_DECREF(beta); PyObject * conc = PyObject_GetAttrString(pyConc,"_PriorConcDP__conc"); out.conc = PyFloat_AsDouble(conc); Py_DECREF(conc); Py_DECREF(pyConc); } // Python -> C++ - given pointers to the State class and a State object... // (The State object should be empty when passed.) void StatePyToCpp(PyObject * from, State * to) { // Extract the flags... to->dnrDocInsts = GetObjectBool(from,"dnrDocInsts"); to->dnrCluInsts = GetObjectBool(from,"dnrCluInsts"); to->seperateClusterConc = GetObjectBool(from,"seperateClusterConc"); to->seperateDocumentConc = GetObjectBool(from,"seperateDocumentConc"); to->oneCluster = GetObjectBool(from,"oneCluster"); to->calcBeta = GetObjectBool(from,"calcBeta"); // Extract all the parameters, though only rho and beta get stored in the state - others get used later when filling out other structures... Conc alpha; ConcPyToCpp(from,"alpha",alpha); PyArrayObject * beta = (PyArrayObject*)PyObject_GetAttrString(from,"beta"); to->beta = new float[beta->dimensions[0]]; to->betaSum = 0.0; for (int i=0;i<beta->dimensions[0];i++) { to->beta[i] = Float1D(beta,i); to->betaSum += to->beta[i]; } Py_DECREF(beta); Conc gamma; ConcPyToCpp(from,"gamma",gamma); ConcPyToCpp(from,"rho",to->rho); Conc mu; ConcPyToCpp(from,"mu",mu); // Create the topic objects... PyArrayObject * topicWord = (PyArrayObject*)PyObject_GetAttrString(from,"topicWord"); PyArrayObject * topicUse = (PyArrayObject*)PyObject_GetAttrString(from,"topicUse"); int topicCount = topicWord->dimensions[0]; int wordCount = topicWord->dimensions[1]; to->wordCount = wordCount; ItemRef<Topic,Conc> ** topicArray = new ItemRef<Topic,Conc>*[topicCount]; for (int t=0;t<topicCount;t++) { ItemRef<Topic,Conc> * topic = to->topics.Append(); topicArray[t] = topic; topic->id = t; topic->wc = new int[wordCount]; topic->wcTotal = 0; for (int w=0;w<wordCount;w++) { int val = Int2D(topicWord,t,w); topic->wc[w] = val; topic->wcTotal += val; } topic->IncRef(Int1D(topicUse,t)); } Py_DECREF(topicUse); Py_DECREF(topicWord); PyObject * topicConc = PyObject_GetAttrString(from,"topicConc"); to->topics.Body().alpha = gamma.alpha; to->topics.Body().beta = gamma.beta; to->topics.Body().conc = PyFloat_AsDouble(topicConc); Py_DECREF(topicConc); // Now create the clusters... PyObject * cluster = PyObject_GetAttrString(from,"cluster"); PyArrayObject * clusterUse = (PyArrayObject*)PyObject_GetAttrString(from,"clusterUse"); int clusterCount = PyList_Size(cluster); ItemRef<Cluster,Conc> ** clusterArray = new ItemRef<Cluster,Conc>*[clusterCount]; ItemRef<ClusterInst,Conc> *** clusterInstArray = new ItemRef<ClusterInst,Conc>**[clusterCount]; for (int c=0;c<clusterCount;c++) { PyObject * cluEntry = PyList_GetItem(cluster,c); PyArrayObject * cluInst = (PyArrayObject*)PyTuple_GetItem(cluEntry,0); PyObject * cluConc = PyTuple_GetItem(cluEntry,1); // Create the cluster instance... ItemRef<Cluster,Conc> * clu = to->clusters.Append(); clu->id = c; clusterArray[c] = clu; clu->IncRef(Int1D(clusterUse,c)); // Create the clusters topic instances, including filling in the counts... clusterInstArray[c] = new ItemRef<ClusterInst,Conc>*[cluInst->dimensions[0]]; for (int ci=0;ci<cluInst->dimensions[0];ci++) { ItemRef<ClusterInst,Conc> * nci = clu->Append(); nci->id = ci; clusterInstArray[c][ci] = nci; int topic = Int2D(cluInst,ci,0); int users = Int2D(cluInst,ci,1); if (topic!=-1) nci->SetTopic(topicArray[topic],false); nci->IncRef(users); } // Fill in the clusters concentration stuff... clu->Body().alpha = to->rho.alpha; clu->Body().beta = to->rho.beta; clu->Body().conc = PyFloat_AsDouble(cluConc); } Py_DECREF(clusterUse); Py_DECREF(cluster); PyObject * clusterConc = PyObject_GetAttrString(from,"clusterConc"); to->clusters.Body().alpha = mu.alpha; to->clusters.Body().beta = mu.beta; to->clusters.Body().conc = PyFloat_AsDouble(clusterConc); Py_DECREF(clusterConc); // Finally, create the documents... PyObject * docList = PyObject_GetAttrString(from,"doc"); to->docCount = PyList_Size(docList); delete[] to->doc; to->doc = new Document[to->docCount]; for (int d=0;d<to->docCount;d++) { // Get the relevant entities... PyObject * fromDoc = PyList_GetItem(docList,d); Document & toDoc = to->doc[d]; // Setup the link to the cluster... PyObject * clusterIndex = PyObject_GetAttrString(fromDoc,"cluster"); int cluIndex = PyInt_AsLong(clusterIndex); Py_DECREF(clusterIndex); if (cluIndex!=-1) toDoc.SetCluster(clusterArray[cluIndex],false); // Prep the documents DP... PyArrayObject * use = (PyArrayObject*)PyObject_GetAttrString(fromDoc,"use"); ItemRef<DocInst,Conc> ** docInstArray = new ItemRef<DocInst,Conc>*[use->dimensions[0]]; for (int di=0;di<use->dimensions[0];di++) { ItemRef<DocInst,Conc> * docInst = toDoc.Append(); docInst->id = di; docInstArray[di] = docInst; int ciIndex = Int2D(use,di,0); int ciUse = Int2D(use,di,1); if (ciIndex!=-1) docInst->SetClusterInst(clusterInstArray[cluIndex][ciIndex],false); docInst->IncRef(ciUse); } Py_DECREF(use); PyObject * docConc = PyObject_GetAttrString(fromDoc,"conc"); toDoc.Body().alpha = alpha.alpha; toDoc.Body().beta = alpha.beta; toDoc.Body().conc = PyFloat_AsDouble(docConc); Py_DECREF(docConc); // Store the samples... PyArrayObject * samples = (PyArrayObject*)PyObject_GetAttrString(fromDoc,"samples"); Sample * sArray = new Sample[samples->dimensions[0]]; for (int s=0;s<samples->dimensions[0];s++) { int di = Int2D(samples,s,0); if (di!=-1) sArray[s].SetDocInst(docInstArray[di],false); sArray[s].SetWord(Int2D(samples,s,1)); } toDoc.SetSamples(samples->dimensions[0],sArray); Py_DECREF(samples); // Clean up... delete[] docInstArray; } Py_DECREF(docList); // Clean up... for (int c=0;c<clusterCount;c++) delete[] clusterInstArray[c]; delete[] clusterInstArray; delete[] clusterArray; delete[] topicArray; } // C++ -> Python - given pointers to the State class and a State object... // Note that this assumes that the State object was created from the PyObject in the first place - if not it will almost certainly break. void StateCppToPy(State * from, PyObject * to) { // Extract beta - it could of been updated... npy_intp size[2]; size[0] = from->wordCount; PyArrayObject * beta = (PyArrayObject*)PyArray_SimpleNew(1,size,NPY_FLOAT); for (int i=0;i<from->wordCount;i++) { Float1D(beta,i) = from->beta[i]; } PyObject_SetAttrString(to,"beta",(PyObject*)beta); Py_DECREF(beta); // Update the topics information - replace current... size[0] = from->topics.Size(); size[1] = from->wordCount; PyArrayObject * topicWord = (PyArrayObject*)PyArray_SimpleNew(2,size,NPY_INT); PyArrayObject * topicUse = (PyArrayObject*)PyArray_SimpleNew(1,size,NPY_INT); { ItemRef<Topic,Conc> * targ = from->topics.First(); for (int t=0;t<topicWord->dimensions[0];t++) { targ->id = t; for (int w=0;w<topicWord->dimensions[1];w++) { Int2D(topicWord,t,w) = targ->wc[w]; } Int1D(topicUse,t) = targ->RefCount(); targ = targ->Next(); } } PyObject_SetAttrString(to,"topicUse",(PyObject*)topicUse); PyObject_SetAttrString(to,"topicWord",(PyObject*)topicWord); Py_DECREF(topicUse); Py_DECREF(topicWord); PyObject * topicConc = PyFloat_FromDouble(from->topics.Body().conc); PyObject_SetAttrString(to,"topicConc",topicConc); Py_DECREF(topicConc); // Update the clusters information - replace current... size[0] = from->clusters.Size(); PyObject * cluster = PyList_New(size[0]); PyArrayObject * clusterUse = (PyArrayObject*)PyArray_SimpleNew(1,size,NPY_INT); { ItemRef<Cluster,Conc> * clu = from->clusters.First(); for (int c=0;c<from->clusters.Size();c++) { clu->id = c; PyObject * pair = PyTuple_New(2); PyList_SetItem(cluster,c,pair); size[0] = clu->Size(); size[1] = 2; PyArrayObject * clusterInstance = (PyArrayObject*)PyArray_SimpleNew(2,size,NPY_INT); PyTuple_SetItem(pair, 0, (PyObject*)clusterInstance); PyTuple_SetItem(pair, 1, PyFloat_FromDouble(clu->Body().conc)); ItemRef<ClusterInst,Conc> * cluInst = clu->First(); for (int ci=0;ci<clu->Size();ci++) { cluInst->id = ci; if (cluInst->GetTopic()) Int2D(clusterInstance,ci,0) = cluInst->GetTopic()->id; else Int2D(clusterInstance,ci,0) = -1; Int2D(clusterInstance,ci,1) = cluInst->RefCount(); cluInst = cluInst->Next(); } Int1D(clusterUse,c) = clu->RefCount(); clu = clu->Next(); } } PyObject_SetAttrString(to,"clusterUse",(PyObject*)clusterUse); PyObject_SetAttrString(to,"cluster",cluster); Py_DECREF(clusterUse); Py_DECREF(cluster); PyObject * clusterConc = PyFloat_FromDouble(from->clusters.Body().conc); PyObject_SetAttrString(to,"clusterConc",clusterConc); Py_DECREF(clusterConc); // Update the documents information - keep it simple by just overwriting cluster and sample assignments whilst replacing the per-document DP... PyObject * docList = PyObject_GetAttrString(to,"doc"); for (int d=0;d<from->docCount;d++) { Document & fromDoc = from->doc[d]; PyObject * toDoc = PyList_GetItem(docList,d); // Set cluster... int clusterID = -1; if (fromDoc.GetCluster()) clusterID = fromDoc.GetCluster()->id; PyObject * cluID = PyInt_FromLong(clusterID); PyObject_SetAttrString(toDoc,"cluster",cluID); Py_DECREF(cluID); // Replace DP... size[0] = fromDoc.Size(); size[1] = 2; PyArrayObject * use = (PyArrayObject*)PyArray_SimpleNew(2,size,NPY_INT); ItemRef<DocInst,Conc> * docInst = fromDoc.First(); for (int di=0;di<size[0];di++) { docInst->id = di; if (docInst->GetClusterInst()) Int2D(use,di,0) = docInst->GetClusterInst()->id; else Int2D(use,di,0) = -1; Int2D(use,di,1) = docInst->RefCount(); docInst = docInst->Next(); } PyObject_SetAttrString(toDoc,"use",(PyObject*)use); Py_DECREF(use); PyObject * conc = PyFloat_FromDouble(fromDoc.Body().conc); PyObject_SetAttrString(toDoc,"conc",conc); Py_DECREF(conc); // Update samples DP assignments... PyArrayObject * samples = (PyArrayObject*)PyObject_GetAttrString(toDoc,"samples"); for (int s=0;s<fromDoc.SampleCount();s++) { Sample & sam = fromDoc.GetSample(s); if (sam.GetDocInst()) Int2D(samples,s,0) = sam.GetDocInst()->id; else Int2D(samples,s,0) = -1; } Py_DECREF(samples); } Py_DECREF(docList); } """ class TestDSLink(unittest.TestCase): """Test code for the data structure.""" def test_compile(self): code = start_cpp(dual_hdp_ds_link) + """ """ weave.inline(code, support_code=dual_hdp_ds_link) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import time import multiprocessing as mp import multiprocessing.synchronize # To make sure we have all the functionality. from solve_shared import Params, State from solve_weave import gibbs_run from model import DocModel def gibbs_run_wrap(state, doneIters): """Wrapper around gibbs_run to make it suitable for multiprocessing.""" def next(amount = 1): doneIters.value += amount gibbs_run(state, next) return state def gibbs_all_mp(state, callback = None): """Identical to gibbs_all, except it does each run in a different process to fully stress the computer.""" # Need the parameters object so we do the correct amount of work... params = state.getParams() # Create a pool of worker processes... pool = mp.Pool() # Create a value for sub-processes to report back their progress with... manager = mp.Manager() doneIters = manager.Value('i',0) totalIters = params.runs * (max((params.burnIn,params.lag)) + params.samples + (params.samples-1)*params.lag) # Create a callback for when a job completes... def onComplete(s): state.absorbClone(s) # Create all the jobs, wait for their completion, report progress... try: jobs = [] for r in xrange(params.runs): jobs.append(pool.apply_async(gibbs_run_wrap,(State(state),doneIters), callback = onComplete)) finally: # Close the pool and wait for all the jobs to complete... pool.close() while len(jobs)!=0: if jobs[0].ready(): del jobs[0] continue time.sleep(0.01) if callback!=None: callback(doneIters.value,totalIters) pool.join() def gibbs_doc_mp(model, doc, params = None, callback = None): """Runs Gibbs iterations on a single document, by sampling with a prior constructed from each sample in the given Model. params applies to each sample, so should probably be much more limited than usual - the default if its undefined is to use 1 run and 1 sample and a burn in of only 500. Returns a DocModel with all the relevant samples in.""" # Initialisation stuff - handle params, create the state and the DocModel object, plus a reporter... if params==None: params = Params() params.runs = 1 params.samples = 1 params.burnIn = 500 state = State(doc, params) dm = DocModel() # Create a pool of worker processes... pool = mp.Pool() # Create a value for sub-processes to report back their progress with... manager = mp.Manager() doneIters = manager.Value('i',0) totalIters = model.sampleCount() * params.runs * (params.burnIn + params.samples + (params.samples-1)*params.lag) # Create a callback for when a job completes... def onComplete(s): dm.addFrom(s.getModel()) # Create all the jobs, wait for their completion, report progress... try: jobs = [] for sample in model.sampleList(): tempState = State(state) tempState.setGlobalParams(sample) tempState.addPrior(sample) jobs.append(pool.apply_async(gibbs_run_wrap,(tempState,doneIters), callback = onComplete)) finally: # Close the pool and wait for all the jobs to complete... pool.close() while len(jobs)!=0: if jobs[0].ready(): del jobs[0] continue time.sleep(0.01) if callback!=None: callback(doneIters.value,totalIters) pool.join() # Return... return dm
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from utils.start_cpp import start_cpp # Code for sampling from various distributions, including some very specific situations involving Dirichlet processes... sampling_code = start_cpp() + """ #ifndef SAMPLING_CODE #define SAMPLING_CODE #include <stdlib.h> #include <math.h> const double gamma_approx = 32.0; // Threshold between the two methods of doing a gamma draw. // Returns a sample from the natural numbers [0,n)... int sample_nat(int n) { return lrand48()%n; } // Returns a sample from [0.0,1.0)... double sample_uniform() { return drand48(); //return double(random())/(double(RAND_MAX)+1.0); } // Samples from a normal distribution with a mean of 0 and a standard deviation of 1... double sample_standard_normal() { double u = 1.0-sample_uniform(); double v = 1.0-sample_uniform(); return sqrt(-2.0*log(u)) * cos(2.0*M_PI*v); } // Samples from a normal distribution with the given mean and standard deviation... double sample_normal(double mean, double sd) { return mean + sd*sample_standard_normal(); } // Samples from the Gamma distribution, base version that has no scaling parameter... /*double sample_gamma(double alpha) { // Check if the alpha value is high enough to approximate via a normal distribution... if (alpha>gamma_approx) { while (true) { double ret = sample_normal(alpha, sqrt(alpha)); if (ret<0.0) continue; return ret; } } // First do the integer part of gamma(alpha)... double ret = 0.0; // 1.0 while (alpha>=1.0) { alpha -= 1.0; //ret /= 1.0 - sample_uniform(); ret -= log(1.0-sample_uniform()); } //ret = log(ret); // Now do the remaining fractional part and sum it in - uses rejection sampling... if (alpha>1e-4) { while (true) { double u1 = 1.0 - sample_uniform(); double u2 = 1.0 - sample_uniform(); double u3 = 1.0 - sample_uniform(); double frac, point; if (u1<=(M_E/(M_E+alpha))) { frac = pow(u2,1.0/alpha); point = u3*pow(frac,alpha-1.0); } else { frac = 1.0 - log(u2); point = u3*exp(-frac); } if (point<=(pow(frac,alpha-1.0)*exp(-frac))) { ret += frac; break; } } } // Finally return... return ret; }*/ // As above, but faster... double sample_gamma(double alpha) { // Check if the alpha value is high enough to approximate via a normal distribution... if (alpha>gamma_approx) { while (true) { double ret = sample_normal(alpha, sqrt(alpha)); if (ret<0.0) continue; return ret; } } // If alpha is one, within tolerance, just use an exponential distribution... if (fabs(alpha-1.0)<1e-4) { return -log(1.0-sample_uniform()); } if (alpha>1.0) { // If alpha is 1 or greater use the Cheng/Feast method... while (true) { double u1 = sample_uniform(); double u2 = sample_uniform(); double v = ((alpha - 1.0/(6.0*alpha))*u1) / ((alpha-1.0)*u2); double lt2 = 2.0*(u2-1.0)/(alpha-1) + v + 1.0/v; if (lt2<=2.0) { return (alpha-1.0)*v; } double lt1 = 2.0*log(u2)/(alpha-1.0) - log(v) + v; if (lt1<=1.0) { return (alpha-1.0)*v; } } } else { // If alpha is less than 1 use a rejection sampling method... while (true) { double u1 = 1.0 - sample_uniform(); double u2 = 1.0 - sample_uniform(); double u3 = 1.0 - sample_uniform(); double frac, point; if (u1<=(M_E/(M_E+alpha))) { frac = pow(u2,1.0/alpha); point = u3*pow(frac,alpha-1.0); } else { frac = 1.0 - log(u2); point = u3*exp(-frac); } if (point<=(pow(frac,alpha-1.0)*exp(-frac))) { return frac; break; } } } } // Samples from the Gamma distribution, version that has a scaling parameter... double sample_gamma(double alpha, double beta) { return sample_gamma(alpha)/beta; } // Samples from the Beta distribution... double sample_beta(double alpha, double beta) { double g1 = sample_gamma(alpha); double g2 = sample_gamma(beta); return g1 / (g1 + g2); } #endif """
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from utils.python_obj_cpp import python_obj_code from linked_list_cpp import linked_list_gc_code from utils.gamma_cpp import gamma_code from sampling_cpp import sampling_code from conc_cpp import conc_code from dir_est_cpp import dir_est_code # Put all the suplied code together into one easy to use include... dp_utils_code = python_obj_code + linked_list_gc_code + gamma_code + sampling_code + conc_code + dir_est_code
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from utils.start_cpp import start_cpp conc_code = start_cpp() + """ // This funky little function is used to resample the concentration parameter of a Dirichlet process, using the previous parameter - allows this parameter to be Gibbs sampled. Also works for any level of a HDP, due to the limited interactions. // Parameters are: // pcp - previous concentration parameter. // n - number of samples taken from the Dirichlet process // k - number of discretly different samples, i.e. table count in the Chinese restaurant process. // prior_alpha - alpha value of the Gamma prior on the concentration parameter. // prior_beta - beta value of the Gamma prior on the concentration parameter. double sample_dirichlet_proc_conc(double pcp, double n, double k, double prior_alpha = 1.01, double prior_beta = 0.01) { if ((n<(1.0-1e-6))||(k<(2.0-1e-6))) { return pcp; // Doesn't work in this case, so just repeat. } double nn = sample_beta(pcp+1.0, n); double log_nn = log(nn); double f_alpha = prior_alpha + k; double f_beta = prior_beta - log_nn; double pi_n_mod = (f_alpha - 1.0) / (n * f_beta); double r = sample_uniform(); double r_mod = r / (1.0 - r); if (r_mod>=pi_n_mod) f_alpha -= 1.0; double ret = sample_gamma(f_alpha, f_beta); if (ret<1e-3) ret = 1e-3; return ret; } // Class to represent the concentration parameter associated with a DP - consists of the prior and the previous/current value... struct Conc { float alpha; // Parameter for Gamma prior. float beta; // " float conc; // Previously sampled concentration value - needed for next sample, and for output/use. // Resamples the concentration value, assuming only a single DP is using it. n = number of samples from DP, k = number of unique samples, i.e. respectivly RefTotal() and Size() for a ListRef. void ResampleConc(int n, int k) { conc = sample_dirichlet_proc_conc(conc, n, k, alpha, beta); if (conc<1e-3) conc = 1e-3; } }; // This class is the generalisation of the above for when multiple Dirichlet processes share a single concentration parameter - again allows a new concentration parameter to be drawn given the previous one and a Gamma prior, but takes multiple pairs of sample count/discrete sample counts, hence the class interface to allow it to accumilate the relevant information. class SampleConcDP { public: SampleConcDP():f_alpha(1.0),f_beta(1.0),prev_conc(1.0) {} ~SampleConcDP() {} // Sets the prior and resets the entire class.... void SetPrior(double alpha, double beta) { f_alpha = alpha; f_beta = beta; } // Set the previous concetration parameter - must be called before any DP stats are added... void SetPrevConc(double prev) { prev_conc = prev; } // Call once for each DP that is using the concentration parameter... // (n is the number of samples drawn, k the number of discretly different samples.) void AddDP(double n, double k) { if (k>1.0) { double s = 0.0; if (sample_uniform()>(1.0/(1.0+n/prev_conc))) s = 1.0; double w = sample_beta(prev_conc+1.0,n); f_alpha += k - s; f_beta -= log(w); } } // Once all DP have been added call this to draw a new concentration value... double Sample() { double ret = sample_gamma(f_alpha, f_beta); if (ret<1e-3) ret = 1e-3; return ret; } private: double f_alpha; double f_beta; double prev_conc; }; """
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from scipy import weave import unittest from utils.start_cpp import start_cpp # Defines code for a doubly linked list - simple but works as expected... (Includes its data via templated inheritance - a little strange, but neat and saves on memory thrashing.) linked_list_code = start_cpp() + """ // Predefinitions... template <typename ITEM, typename BODY> class Item; template <typename ITEM, typename BODY> class List; // Useful default... struct Empty {}; // Item for the linked list data structure - simply inherits extra data stuff... template <typename ITEM = Empty, typename BODY = Empty> class Item : public ITEM { public: Item(List<ITEM,BODY> * head):head(head),next(this),prev(this) {} ~Item() {} Item<ITEM,BODY> * Next() {return next;} Item<ITEM,BODY> * Prev() {return prev;} List<ITEM,BODY> * GetList() {return head;} bool Valid() {return static_cast< Item<ITEM,BODY>* >(head)!=this;} bool IsDummy() {return static_cast< Item<ITEM,BODY>* >(head)==this;} Item<ITEM,BODY> * PreNew() // Adds a new item before this one. { Item<ITEM,BODY> * ret = new Item<ITEM,BODY>(head); head->size += 1; ret->prev = this->prev; ret->next = this; ret->prev->next = ret; ret->next->prev = ret; return ret; } Item<ITEM,BODY> * PostNew() // Adds a new item after this one. { Item<ITEM,BODY> * ret = new Item<ITEM,BODY>(head); head->size += 1; ret->prev = this; ret->next = this->next; ret->prev->next = ret; ret->next->prev = ret; return ret; } void Suicide() // Removes this node from its list and makes it delete itself. { head->size -= 1; next->prev = prev; prev->next = next; delete this; } protected: List<ITEM,BODY> * head; Item<ITEM,BODY> * next; Item<ITEM,BODY> * prev; }; // Simple totally inline doubly linked list structure, where template <typename ITEM = Empty, typename BODY = Empty> class List : protected Item<ITEM,BODY> { public: List():Item<ITEM,BODY>(this),size(0) {} ~List() { while(this->size!=0) { this->next->Suicide(); } } Item<ITEM,BODY> * Append() {return this->PreNew();} Item<ITEM,BODY> * Prepend() {return this->PostNew();} Item<ITEM,BODY> * First() {return this->next;} Item<ITEM,BODY> * Last() {return this->prev;} int Size() {return this->size;} BODY & Body() {return body;} Item<ITEM,BODY> * Index(int i) { Item<ITEM,BODY> * ret = this->next; while(i>0) { ret = ret->next; i -= 1; } return ret; } protected: friend class Item<ITEM,BODY>; int size; BODY body; }; """ class TestLinkedList(unittest.TestCase): """Test code for the linked list.""" def test_compile(self): code = start_cpp(linked_list) + """ """ weave.inline(code, support_code=linked_list) def test_size(self): code = start_cpp(linked_list) + """ int errors = 0; List<> wibble; if (wibble.Size()!=0) errors += 1; Item<> * it = wibble.Append(); if (wibble.Size()!=1) errors += 1; it->Suicide(); if (wibble.Size()!=0) errors += 1; return_val = errors; """ errors = weave.inline(code, support_code=linked_list) self.assertEqual(errors,0) def test_loop(self): extra = """ struct Number { int num; }; """ code = start_cpp(linked_list_code+extra) + """ int errors = 0; List<Number> wibble; for (int i=0;i<10;i++) { Item<Number> * it = wibble.Append(); it->num = i; } if (wibble.Size()!=10) errors += 1; int i = 0; for (Item<Number> * targ = wibble.First(); targ->Valid(); targ = targ->Next()) { if (i!=targ->num) errors += 1; i += 1; } return_val = errors; """ errors = weave.inline(code, support_code=linked_list_code+extra) self.assertEqual(errors,0) # Code for a linked list with garbage collection - each entry has a reference count, and it also allows access of the reference counts and the total number of reference counts for all entrys. This structure is very useful for modelling a Dirichlet process as a direct consequence, as it has all its properties... linked_list_gc_code = linked_list_code + start_cpp() + """ // Predefinitions... template <typename ITEM, typename BODY> class ItemRef; template <typename ITEM, typename BODY> class ListRef; // Item for the linked list data structure - simply inherits extra data stuff... template <typename ITEM = Empty, typename BODY = Empty> class ItemRef : public ITEM { public: ItemRef(ListRef<ITEM,BODY> * head):head(head),next(this),prev(this),refCount(0) {} ~ItemRef() {} ItemRef<ITEM,BODY> * Next() {return next;} ItemRef<ITEM,BODY> * Prev() {return prev;} ListRef<ITEM,BODY> * GetList() {return head;} bool Valid() {return static_cast< ItemRef<ITEM,BODY>* >(head)!=this;} bool IsDummy() {return static_cast< ItemRef<ITEM,BODY>* >(head)==this;} ItemRef<ITEM,BODY> * PreNew() // Adds a new item before this one. { ItemRef<ITEM,BODY> * ret = new ItemRef<ITEM,BODY>(head); head->size += 1; ret->prev = this->prev; ret->next = this; ret->prev->next = ret; ret->next->prev = ret; return ret; } ItemRef<ITEM,BODY> * PostNew() // Adds a new item after this one. { ItemRef<ITEM,BODY> * ret = new ItemRef<ITEM,BODY>(head); head->size += 1; ret->prev = this; ret->next = this->next; ret->prev->next = ret; ret->next->prev = ret; return ret; } void Suicide() // Removes this node from its list and makes it delete itself. { head->size -= 1; head->refTotal -= refCount; next->prev = prev; prev->next = next; delete this; } void IncRef(int amount = 1) { this->refCount += amount; head->refTotal += amount; } void DecRef(int amount = 1) // If the ref count reaches zero the object will delete itself. { this->refCount -= amount; head->refTotal -= amount; if (refCount<=0) this->Suicide(); } int RefCount() {return refCount;} protected: ListRef<ITEM,BODY> * head; ItemRef<ITEM,BODY> * next; ItemRef<ITEM,BODY> * prev; int refCount; }; // Simple totally inline doubly linked list structure... template <typename ITEM = Empty, typename BODY = Empty> class ListRef : protected ItemRef<ITEM,BODY> { public: ListRef():ItemRef<ITEM,BODY>(this),size(0),refTotal(0) {} ~ListRef() { while(this->size!=0) { this->next->Suicide(); } } ItemRef<ITEM,BODY> * Append() {return this->PreNew();} ItemRef<ITEM,BODY> * Prepend() {return this->PostNew();} ItemRef<ITEM,BODY> * First() {return this->next;} ItemRef<ITEM,BODY> * Last() {return this->prev;} int Size() {return this->size;} int RefTotal() {return this->refTotal;} BODY & Body() {return body;} ItemRef<ITEM,BODY> * Index(int i) { ItemRef<ITEM,BODY> * ret = this->next; while(i>0) { ret = ret->Next(); i -= 1; } return ret; } protected: friend class ItemRef<ITEM,BODY>; int size; int refTotal; BODY body; }; """ class TestLinkedListGC(unittest.TestCase): """Test code for the linked list with garbage collection.""" def test_compile(self): code = start_cpp(linked_list_gc) + """ """ weave.inline(code, support_code=linked_list_gc) def test_size_gc(self): code = start_cpp(linked_list_gc_code) + """ int errors = 0; ListRef<> wibble; if (wibble.Size()!=0) errors += 1; ItemRef<> * it = wibble.Append(); if (wibble.Size()!=1) errors += 1; if (wibble.RefTotal()!=0) errors += 1; it->IncRef(); it->IncRef(); if (it->RefCount()!=2) errors += 1; if (wibble.RefTotal()!=2) errors += 1; it->DecRef(); it->DecRef(); if (wibble.RefTotal()!=0) errors += 1; if (wibble.Size()!=0) errors += 1; return_val = errors; """ errors = weave.inline(code, support_code=linked_list_gc_code) self.assertEqual(errors,0) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from utils.start_cpp import start_cpp # Provides code for estimating the Dirichlet distribution from which a number of multinomial distributions were drawn from, given those multinomials... dir_est_code = start_cpp() + """ // Defined as a class - you then add each multinomial before requesting a maximum likelihood update of the Dirichlet distribution. It uses Newton-Raphson iterations, and so needs a starting point - you provide a vector to be updated, which can of course save time if it is already close... class EstimateDir { public: EstimateDir(int vecSize):size(vecSize), samples(0), meanLog(new double[vecSize]), grad(new double[vecSize]), qq(new double[vecSize]) { for (int i=0;i<vecSize;i++) meanLog[i] = 0.0; } ~EstimateDir() {delete[] meanLog; delete[] grad; delete[] qq;} void Add(float * mn) { samples += 1; for (int i=0;i<size;i++) { meanLog[i] += (log(mn[i]) - meanLog[i]) / double(samples); } } void Add(double * mn) { samples += 1; for (int i=0;i<size;i++) { meanLog[i] += (log(mn[i]) - meanLog[i]) / double(samples); } } void Update(float * dir, int maxIter = 64, float epsilon = 1e-3, float cap = 1e6) { for (int iter=0;iter<maxIter;iter++) { // We will need the sum of the dir vector... double dirSum = 0.0; for (int i=0;i<size;i++) { dirSum += dir[i]; } // Check for Nan/inf - if so reset to basic value... if ((dirSum==dirSum) || (dirSum>1e100)) { for (int i=0;i<size;i++) dir[i] = 1.0; dirSum = size; } // Safety - don't let it get too precise, that probably means its being crazy (Can happen with too few samples.)... if (dirSum>cap) { float mult = cap / dirSum; for (int i=0;i<size;i++) { dir[i] *= mult; } dirSum = cap; } // Calculate the gradiant and the Hessian 'matrix', except its actually diagonal... double digDirSum = digamma(dirSum); for (int i=0;i<size;i++) { grad[i] = samples * (digDirSum - digamma(dir[i]) + meanLog[i]); qq[i] = -samples * trigamma(dir[i]); } // Calculate b... double b = 0.0; double bDiv = 1.0 / (samples*trigamma(dirSum)); for (int i=0;i<size;i++) { b += grad[i]/qq[i]; bDiv += 1.0/qq[i]; } b /= bDiv; // Do the update, sum the change... double change = 0.0; for (int i=0;i<size;i++) { double delta = (grad[i] - b) / qq[i]; dir[i] -= delta; if (dir[i]<1e-3) dir[i] = 1e-3; change += fabs(delta); } // Break if no change... if (change<epsilon) break; } } void Update(double * dir, int maxIter = 64, double epsilon = 1e-6, double cap = 1e6) { for (int iter=0;iter<maxIter;iter++) { // We will need the sum of the dir vector... double dirSum = 0.0; for (int i=0;i<size;i++) { dirSum += dir[i]; } // Check for Nan/inf - if so reset to basic value... if ((dirSum==dirSum) || (dirSum>1e100)) { for (int i=0;i<size;i++) dir[i] = 1.0; dirSum = size; } // Safety - don't let it get too precise, that probably means its being crazy (Can happen with too few samples.)... if (dirSum>cap) { float mult = cap / dirSum; for (int i=0;i<size;i++) { dir[i] *= mult; } dirSum = cap; } // Calculate the gradiant and the Hessian 'matrix', except its actually diagonal... double digDirSum = digamma(dirSum); for (int i=0;i<size;i++) { grad[i] = samples * (digDirSum - digamma(dir[i]) + meanLog[i]); qq[i] = -samples * trigamma(dir[i]); } // Calculate b... double b = 0.0; double bDiv = 1.0 / (samples*trigamma(dirSum)); for (int i=0;i<size;i++) { b += grad[i]/qq[i]; bDiv += 1.0/qq[i]; } b /= bDiv; // Do the update, sum the change... double change = 0.0; for (int i=0;i<size;i++) { double delta = (grad[i] - b) / qq[i]; dir[i] -= delta; change += fabs(delta); } // Break if no change... if (change<epsilon) break; } } private: int size; int samples; double * meanLog; // Vector of length size, contains the component-wise mean of the log of each of the samples - consititutes the sufficient statistics required to do the update. double * grad; // Temporary during update. double * qq; // Temporary during update. }; """
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Some basic matrix operations that come in use... matrix_code = start_cpp() + """ #ifndef MATRIX_CODE #define MATRIX_CODE template <typename T> inline void MemSwap(T * lhs, T * rhs, int count = 1) { while(count!=0) { T t = *lhs; *lhs = *rhs; *rhs = t; ++lhs; ++rhs; --count; } } // Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default. template <typename T> inline T Determinant(T * pos, int size, int stride = -1) { if (stride==-1) stride = size; if (size==1) return pos[0]; else { if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride]; else { T ret = 0.0; for (int i=0; i<size; i++) { if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1); T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1]; if ((i+size)%2) ret += sub; else ret -= sub; } for (int i=1; i<size; i++) { MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1); } return ret; } } } // Inverts a square matrix, will fail on singular and very occasionally on // non-singular matrices, returns true on success. Uses Gauss-Jordan elimination // with partial pivoting. // in is the input matrix, out the output matrix, just be aware that the input matrix is trashed. // You have to provide its size (Its square, obviously.), and optionally a stride if different from size. template <typename T> inline bool Inverse(T * in, T * out, int size, int stride = -1) { if (stride==-1) stride = size; for (int r=0; r<size; r++) { for (int c=0; c<size; c++) { out[r*stride + c] = (c==r)?1.0:0.0; } } for (int r=0; r<size; r++) { // Find largest pivot and swap in, fail if best we can get is 0... T max = in[r*stride + r]; int index = r; for (int i=r+1; i<size; i++) { if (fabs(in[i*stride + r])>fabs(max)) { max = in[i*stride + r]; index = i; } } if (index!=r) { MemSwap(&in[index*stride], &in[r*stride], size); MemSwap(&out[index*stride], &out[r*stride], size); } if (fabs(max-0.0)<1e-6) return false; // Divide through the entire row... max = 1.0/max; in[r*stride + r] = 1.0; for (int i=r+1; i<size; i++) in[r*stride + i] *= max; for (int i=0; i<size; i++) out[r*stride + i] *= max; // Row subtract to generate 0's in the current column, so it matches an identity matrix... for (int i=0; i<size; i++) { if (i==r) continue; T factor = in[i*stride + r]; in[i*stride + r] = 0.0; for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j]; for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j]; } } return true; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import time class ProgBar: """Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops.""" def __init__(self, width = 60, onCallback = None): self.start = time.time() self.fill = 0 self.width = width self.onCallback = onCallback sys.stdout.write(('_'*self.width)+'\n') sys.stdout.flush() def __del__(self): self.end = time.time() self.__show(self.width) sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n') sys.stdout.flush() def callback(self, nDone, nToDo): """Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo).""" if self.onCallback: self.onCallback() n = int(float(self.width)*float(nDone)/float(nToDo)) n = min((n,self.width)) if n>self.fill: self.__show(n) def __show(self,n): sys.stdout.write('|'*(n-self.fill)) sys.stdout.flush() self.fill = n
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pydoc import inspect class DocGen: """A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code.""" def __init__(self, name, title = None, summary = None): """name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title.""" if title==None: title = name if summary==None: summary = title self.doc = pydoc.HTMLDoc() self.html = open('%s.html'%name,'w') self.html.write('<html>\n') self.html.write('<head>\n') self.html.write('<title>%s</title>\n'%title) self.html.write('</head>\n') self.html.write('<body>\n') self.html_variables = '' self.html_functions = '' self.html_classes = '' self.wiki = open('%s.wiki'%name,'w') self.wiki.write('#summary %s\n\n'%summary) self.wiki.write('= %s= \n\n'%title) self.wiki_variables = '' self.wiki_functions = '' self.wiki_classes = '' def __del__(self): if self.html_variables!='': self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables)) if self.html_functions!='': self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions)) if self.html_classes!='': self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes)) self.html.write('</body>\n') self.html.write('</html>\n') self.html.close() if self.wiki_variables!='': self.wiki.write('= Variables =\n\n') self.wiki.write(self.wiki_variables) self.wiki.write('\n') if self.wiki_functions!='': self.wiki.write('= Functions =\n\n') self.wiki.write(self.wiki_functions) self.wiki.write('\n') if self.wiki_classes!='': self.wiki.write('= Classes =\n\n') self.wiki.write(self.wiki_classes) self.wiki.write('\n') self.wiki.close() def addFile(self, fn, title, fls = True): """Given a filename and section title adds the contents of said file to the output. Various flags influence how this works.""" html = [] wiki = [] for i, line in enumerate(open(fn,'r').readlines()): hl = line.replace('\n', '') if i==0 and fls: hl = '<strong>' + hl + '</strong>' for ext in ['py','txt']: if '.%s - '%ext in hl: s = hl.split('.%s - '%ext, 1) hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1] html.append(hl) wl = line.strip() if i==0 and fls: wl = '*%s*'%wl for ext in ['py','txt']: if '.%s - '%ext in wl: s = wl.split('.%s - '%ext, 1) wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n' wiki.append(wl) self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html))) self.wiki.write('== %s ==\n'%title) self.wiki.write('\n'.join(wiki)) self.wiki.write('----\n\n') def addVariable(self, var, desc): """Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc..""" self.html_variables += '<strong>%s</strong><br/>'%var self.html_variables += '%s<br/><br/>\n'%desc self.wiki_variables += '*`%s`*\n'%var self.wiki_variables += ' %s\n\n'%desc def addFunction(self, func): """Adds a function to the documentation. You provide the actual function instance.""" self.html_functions += self.doc.docroutine(func).replace('&nbsp;',' ') self.html_functions += '\n' name = func.__name__ args, varargs, keywords, defaults = inspect.getargspec(func) doc = inspect.getdoc(func) if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_functions += ' %s\n\n'%doc def addClass(self, cls): """Adds a class to the documentation. You provide the actual class object.""" self.html_classes += self.doc.docclass(cls).replace('&nbsp;',' ') self.html_classes += '\n' name = cls.__name__ parents = filter(lambda a: a!=cls, inspect.getmro(cls)) doc = inspect.getdoc(cls) par_str = '' if len(parents)!=0: par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents)) self.wiki_classes += '== %s(%s) ==\n'%(name, par_str) self.wiki_classes += ' %s\n\n'%doc methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x)) def method_key(pair): if pair[0]=='__init__': return '___' else: return pair[0] methods.sort(key=method_key) for name, method in methods: if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'): if inspect.ismethod(method): args, varargs, keywords, defaults = inspect.getargspec(method) else: args = ['?'] varargs = None keywords = None defaults = None if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords def fetch_doc(cls, name): try: method = getattr(cls, name) if method.__doc__!=None: return inspect.getdoc(method) except: pass for parent in filter(lambda a: a!=cls, inspect.getmro(cls)): ret = fetch_doc(parent, name) if ret!=None: return ret return None doc = fetch_doc(cls, name) self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_classes += ' %s\n\n'%doc variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float)) for name, var in variables: if not name.startswith('__'): if hasattr(var, '__doc__'): d = var.__doc__ else: d = str(var) self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import random import math from scipy.special import gammaln, psi, polygamma from scipy import weave from utils.start_cpp import start_cpp # Provides various gamma-related functions... gamma_code = start_cpp() + """ #ifndef GAMMA_CODE #define GAMMA_CODE #include <cmath> // Returns the natural logarithm of the Gamma function... // (Uses Lanczos's approximation.) double lnGamma(double z) { static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7}; if (z<0.5) { // Use reflection formula, as approximation doesn't work down here... return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z); } else { double x = coeff[0]; for (int i=1;i<9;i++) x += coeff[i]/(z+i-1); double t = z + 6.5; return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x); } } // Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values... double digamma(double z) { static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point. double ret = 0.0; while (z<highVal) { ret -= 1.0/z; z += 1.0; } double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz4 = iz2*iz2; double iz6 = iz4*iz2; ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0; return ret; } // Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically... double trigamma(double z) { static const double highVal = 8.0; double ret = 0.0; while (z<highVal) { ret += 1.0/(z*z); z += 1.0; } z -= 1.0; double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz3 = iz1*iz2; double iz5 = iz3*iz2; double iz7 = iz5*iz2; double iz9 = iz7*iz2; ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0; return ret; } #endif """ def lnGamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function""" code = start_cpp(gamma_code) + """ return_val = lnGamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def digamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function""" code = start_cpp(gamma_code) + """ return_val = digamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def trigamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function""" code = start_cpp(gamma_code) + """ return_val = trigamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) class TestFuncs(unittest.TestCase): """Test code for the assorted gamma-related functions.""" def test_compile(self): code = start_cpp(gamma_code) + """ """ weave.inline(code, support_code=gamma_code) def test_error_lngamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = lnGamma(z) good = gammaln(z) assert(math.fabs(own-good)<1e-12) def test_error_digamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = digamma(z) good = psi(z) assert(math.fabs(own-good)<1e-9) def test_error_trigamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = trigamma(z) good = polygamma(1,z) assert(math.fabs(own-good)<1e-9) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import inspect import hashlib def start_cpp(hash_str = None): """This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>""" frame = inspect.currentframe().f_back info = inspect.getframeinfo(frame) if hash_str==None: return '#line %i "%s"\n'%(info[1],info[0]) else: h = hashlib.md5() h.update(hash_str) hash_val = h.hexdigest() return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
Python
# -*- coding: utf-8 -*- # Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.). import cv import numpy as np def cv2array(im): """Converts a cv array to a numpy array.""" depth2dtype = { cv.IPL_DEPTH_8U: 'uint8', cv.IPL_DEPTH_8S: 'int8', cv.IPL_DEPTH_16U: 'uint16', cv.IPL_DEPTH_16S: 'int16', cv.IPL_DEPTH_32S: 'int32', cv.IPL_DEPTH_32F: 'float32', cv.IPL_DEPTH_64F: 'float64', } arrdtype=im.depth a = np.fromstring( im.tostring(), dtype=depth2dtype[im.depth], count=im.width*im.height*im.nChannels) a.shape = (im.height,im.width,im.nChannels) return a def array2cv(a): """Converts a numpy array to a cv array, if possible.""" dtype2depth = { 'uint8': cv.IPL_DEPTH_8U, 'int8': cv.IPL_DEPTH_8S, 'uint16': cv.IPL_DEPTH_16U, 'int16': cv.IPL_DEPTH_16S, 'int32': cv.IPL_DEPTH_32S, 'float32': cv.IPL_DEPTH_32F, 'float64': cv.IPL_DEPTH_64F, } try: nChannels = a.shape[2] except: nChannels = 1 cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]), dtype2depth[str(a.dtype)], nChannels) cv.SetData(cv_im, a.tostring(), a.dtype.itemsize*nChannels*a.shape[1]) return cv_im
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import multiprocessing as mp import multiprocessing.synchronize # To make sure we have all the functionality. import types import marshal import unittest def repeat(x): """A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant.""" while True: yield x def run_code(code,args): """Internal use function that does the work in each process.""" code = marshal.loads(code) func = types.FunctionType(code, globals(), '_') return func(*args) def mp_map(func, *iters, **keywords): """A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity.""" if 'pool' in keywords: pool = keywords['pool'] else: pool = mp.Pool() code = marshal.dumps(func.func_code) jobs = [] for args in zip(*iters): jobs.append(pool.apply_async(run_code,(code,args))) for i in xrange(len(jobs)): jobs[i] = jobs[i].get() return jobs class TestMpMap(unittest.TestCase): def test_simple1(self): data = ['a','b','c','d'] def noop(data): return data data_noop = mp_map(noop, data) self.assertEqual(data, data_noop) def test_simple2(self): data = [x for x in xrange(1000)] data_double = mp_map(lambda a: a*2, data) self.assertEqual(map(lambda a: a*2,data), data_double) def test_gen(self): def gen(): for i in xrange(100): yield i data_double = mp_map(lambda a: a*2, gen()) self.assertEqual(map(lambda a: a*2,gen()), data_double) def test_repeat(self): def mult(a,b): return a*b data = [x for x in xrange(50,5000,5)] data_triple = mp_map(mult, data, repeat(3)) self.assertEqual(map(lambda a: a*3,data),data_triple) def test_none(self): data = [] data_sqr = mp_map(lambda x: x*x, data) self.assertEqual([],data_sqr) if __name__ == '__main__': unittest.main()
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import os.path import tempfile import shutil from distutils.core import setup, Extension import distutils.ccompiler import distutils.dep_util try: __default_compiler = distutils.ccompiler.new_compiler() except: __default_compiler = None def make_mod(name, base, source, openCL = False): """Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place.""" if __default_compiler==None: raise Exception('No compiler!') # Work out the various file names - check if we actually need to do anything... if not isinstance(source, list): source = [source] source_path = map(lambda s: os.path.join(base, s), source) library_path = os.path.join(base, __default_compiler.shared_object_filename(name)) if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)): try: print 'b' # Backup the argv variable and create a temporary directory to do all work in... old_argv = sys.argv[:] temp_dir = tempfile.mkdtemp() # Prepare the extension... sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir] comp_path = filter(lambda s: not s.endswith('.h'), source_path) depends = filter(lambda s: s.endswith('.h'), source_path) if openCL: ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends) else: ext = Extension(name, comp_path, depends=depends) # Compile... setup(name=name, version='1.0.0', ext_modules=[ext]) finally: # Cleanup the argv variable and the temporary directory... sys.argv = old_argv shutil.rmtree(temp_dir, True)
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp from numpy_help_cpp import numpy_util_code # Provides various functions to assist with manipulating python objects from c++ code. python_obj_code = numpy_util_code + start_cpp() + """ #ifndef PYTHON_OBJ_CODE #define PYTHON_OBJ_CODE // Extracts a boolean from an object... bool GetObjectBoolean(PyObject * obj, const char * name) { PyObject * b = PyObject_GetAttrString(obj, name); bool ret = b!=Py_False; Py_DECREF(b); return ret; } // Extracts an int from an object... int GetObjectInt(PyObject * obj, const char * name) { PyObject * i = PyObject_GetAttrString(obj, name); int ret = PyInt_AsLong(i); Py_DECREF(i); return ret; } // Extracts a float from an object... float GetObjectFloat(PyObject * obj, const char * name) { PyObject * f = PyObject_GetAttrString(obj, name); float ret = PyFloat_AsDouble(f); Py_DECREF(f); return ret; } // Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored... unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); unsigned char * ret = new unsigned char[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i); Py_DECREF(nao); return ret; } // Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored... float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); float * ret = new float[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i); Py_DECREF(nao); return ret; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Defines helper functions for accessing numpy arrays... numpy_util_code = start_cpp() + """ #ifndef NUMPY_UTIL_CODE #define NUMPY_UTIL_CODE float & Float1D(PyArrayObject * arr, int index = 0) { return *(float*)(arr->data + index*arr->strides[0]); } float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } unsigned char & Byte1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index*arr->strides[0]); } unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } int & Int1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index*arr->strides[0]); } int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } #endif """
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cvarray import mp_map import prog_bar import numpy_help_cpp import python_obj_cpp import matrix_cpp import gamma_cpp import setProcName import start_cpp import make import doc_gen # Setup... doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.') doc.addFile('readme.txt', 'Overview') # Variables... doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.') doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.') doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++') doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++') # Functions... doc.addFunction(make.make_mod) doc.addFunction(cvarray.cv2array) doc.addFunction(cvarray.array2cv) doc.addFunction(mp_map.repeat) doc.addFunction(mp_map.mp_map) doc.addFunction(setProcName.setProcName) doc.addFunction(start_cpp.start_cpp) doc.addFunction(make.make_mod) # Classes... doc.addClass(prog_bar.ProgBar) doc.addClass(doc_gen.DocGen)
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from ctypes import * def setProcName(name): """Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases.""" # Call the process control function... libc = cdll.LoadLibrary('libc.so.6') libc.prctl(15, c_char_p(name), 0, 0, 0) # Update argv... charPP = POINTER(POINTER(c_char)) argv = charPP.in_dll(libc,'_dl_argv') size = libc.strlen(argv[0]) libc.strncpy(argv[0],c_char_p(name),size) if __name__=='__main__': # Quick test that it works... import os ps1 = 'ps' ps2 = 'ps -f' os.system(ps1) os.system(ps2) setProcName('wibble_wobble') os.system(ps1) os.system(ps2)
Python
#! /usr/bin/env python # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import dp_utils from utils import doc_gen # Setup... doc = doc_gen.DocGen('dp_utils', 'Dirichlet Process Utilities', 'Utility library for handling Dirichlet processes') doc.addFile('readme.txt', 'Overview') # Variables... doc.addVariable('sampling_code', 'Code for sampling from various distributions - uniform, Gaussian, gamma and beta.') doc.addVariable('conc_code', 'Contains code to sample a concentration parameter and two classes - one to represent the status of a concentration parameter - its prior and its estimated value, and another to do the same thing for when a concentration parameter is shared between multiple Dirichlet processes.') doc.addVariable('dir_est_code', 'Contains a class for doing maximum likelihood estimation of a Dirichlet distrbution given multinomials that have been drawn from it.') doc.addVariable('linked_list_code', 'A linked list implimentation - doubly linked, adds data via templated inheritance.') doc.addVariable('linked_list_gc_code', 'A linked list with reference counting and garabge collection for its entries. Happens to be very good at representing a Dirichlet process.') doc.addVariable('dp_utils_code', 'Combines all of the code provided in this module into a single variable.')
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import unittest from params import Params from solve_shared import State from model import DocModel from utils.start_cpp import start_cpp from ds_link_cpp import ds_link_code from scipy import weave # Shared code used to Gibbs sample the model - provides operations used repeatedly by the sampling code. Note that this contains all the heavy code used by the system - the rest is basically just loops. Additionally the data structure code is prepended to this, so this is the only shared code... shared_code = ds_link_code + start_cpp() + """ // Code for resampling a documents cluster assignment... void ResampleDocumentCluster(State & state, Document & doc) { // If the document does not currently have a cluster then create one for it - let 'em cluster in non-initialisation iterations... if (doc.GetCluster()==0) { ItemRef<Cluster,Conc> * newC = state.clusters.Append(); newC->Body().alpha = state.rho.alpha; newC->Body().beta = state.rho.beta; newC->Body().conc = state.rho.conc; doc.SetCluster(newC); return; } // Fill probAux of the topics with the counts of how many of each topic exist in the document whilst at the same time detaching the cluster instances from the document instances... { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { topic->probAux = 0.0; topic = topic->Next(); } } { ItemRef<DocInst,Conc> * docInst = doc.First(); while (docInst->Valid()) { docInst->topic = docInst->GetClusterInst()->GetTopic(); docInst->topic->IncRef(); // Could be that this is the last (indirect) reference to the topic, and the next line could delete it - would be bad. docInst->SetClusterInst(0); docInst->topic->probAux += 1.0; docInst = docInst->Next(); } } // Detach the document from its current cluster... doc.SetCluster(0); // Work out the log probabilities of assigning one of the known clusters to the document - store them in the cluster prob values. Uses the topic prob values as intermediates, for the probability of drawing each topic from the cluster... float maxLogProb = -1e100; { ItemRef<Cluster,Conc> * cluster = state.clusters.First(); while (cluster->Valid()) { // We need the probability of drawing each topic from the cluster, which we write into the prob variable of the topics... // Zero out the prob values of the topics... { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { topic->prob = 0.0; topic = topic->Next(); } } // Count how many times each topic has been drawn from the cluster, storing in the topic prob values... { ItemRef<ClusterInst,Conc> * cluInst = cluster->First(); while (cluInst->Valid()) { cluInst->GetTopic()->prob += cluInst->RefCount(); cluInst = cluInst->Next(); } } // Normalise whilst adding in the probability of drawing the given topic... // (There is some cleverness here to account for the extra references to the topics obtained from the document being resampled.) { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { topic->prob += cluster->Body().conc * float(topic->RefCount()-topic->probAux) / (state.topics.RefTotal() - doc.Size() + state.topics.Body().conc); topic->prob /= cluster->RefTotal() + cluster->Body().conc; topic = topic->Next(); } } // Now calculate the log probability of the cluster - involves a loop over the topics plus the inclusion of the probability of drawing this cluster... cluster->prob = log(cluster->RefCount()); //cluster->prob -= log(state.clusters.RefTotal() + state.clusters.Body().conc); { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { cluster->prob += topic->probAux * log(topic->prob); topic = topic->Next(); } } if (cluster->prob>maxLogProb) maxLogProb = cluster->prob; cluster = cluster->Next(); } } // Calculate the log probability of assigning a new cluster - involves quite a few terms, including a loop over the topics to get many of them... float probNew = log(state.clusters.Body().conc); //probNew -= log(state.clusters.RefTotal() + state.clusters.Body().conc); probNew += lnGamma(doc.Body().conc); probNew -= lnGamma(doc.Body().conc + doc.Size()); { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { float tProb = float(topic->RefCount()-topic->probAux) / (state.topics.RefTotal() - doc.Size() + state.topics.Body().conc); float tWeight = doc.Body().conc * tProb; probNew += lnGamma(tWeight + topic->probAux); probNew -= lnGamma(tWeight); topic = topic->Next(); } } if (probNew>maxLogProb) maxLogProb = probNew; // Convert from logs to actual probabilities, with partial normalisation and summing for implicit precise normalisation later... float sumProb = 0.0; probNew = exp(probNew - maxLogProb); sumProb += probNew; { ItemRef<Cluster,Conc> * cluster = state.clusters.First(); while (cluster->Valid()) { cluster->prob = exp(cluster->prob - maxLogProb); sumProb += cluster->prob; cluster = cluster->Next(); } } // Draw which cluster we are to assign; in the event of a new cluster create it... ItemRef<Cluster,Conc> * selected = 0; { float rand = sample_uniform() * sumProb; ItemRef<Cluster,Conc> * cluster = state.clusters.First(); while (cluster->Valid()) { rand -= cluster->prob; if (rand<0.0) { selected = cluster; break; } cluster = cluster->Next(); } } if (selected==0) { selected = state.clusters.Append(); selected->Body().alpha = state.rho.alpha; selected->Body().beta = state.rho.beta; selected->Body().conc = state.rho.conc; } // Update the document with its new cluster - consists of setting the documents cluster and updating the document instances to use the new cluster, which requires more sampling... doc.SetCluster(selected); ItemRef<DocInst,Conc> * docInst = doc.First(); while(docInst->Valid()) { // Update the cluster instance for this document instance - treat as a draw from the cluster DP with a hard requiremement that we draw an instance with the same topic as currently (What to do here is not given by the dual-hdp paper - this is just one option amung many, choosen for being good for convergance and relativly easy to impliment.)... // Sum weights from the cluster instances, but only when they are the correct topic; also add in the probability of creating a new cluster instance with the relevant topic... float probSum = selected->Body().conc * float(docInst->topic->RefCount()) / (state.topics.RefTotal() + state.topics.Body().conc); { ItemRef<ClusterInst,Conc> * targ2 = selected->First(); while (targ2->Valid()) { if (targ2->GetTopic()==docInst->topic) probSum += targ2->RefCount(); targ2 = targ2->Next(); } } // Select the relevant one... ItemRef<ClusterInst,Conc> * relevant = 0; { float rand = sample_uniform() * probSum; ItemRef<ClusterInst,Conc> * cluInst = selected->First(); while (cluInst->Valid()) { if (cluInst->GetTopic()==docInst->topic) { rand -= cluInst->RefCount(); if (rand<0.0) { relevant = cluInst; break; } } cluInst = cluInst->Next(); } } if (relevant==0) { relevant = selected->Append(); relevant->SetTopic(docInst->topic); } // Assign it... docInst->SetClusterInst(relevant); // Temporary with topic in is no longer needed - decriment the reference... docInst->topic->DecRef(); docInst = docInst->Next(); } } // Code for resampling the topics associated with cluster instances - single function that does them all - designed this way for efficiency reasons... void ResampleClusterInstances(State & state) { // First construct a linked list in each ClusterInst of all samples currently assigned to that ClusterInst, ready for the next bit - quite an involved process due to the multiple levels... { ItemRef<Cluster,Conc> * cluster = state.clusters.First(); while (cluster->Valid()) { ItemRef<ClusterInst,Conc> * cluInst = cluster->First(); while (cluInst->Valid()) { cluInst->first = 0; cluInst = cluInst->Next(); } cluster = cluster->Next(); } } for (int d=0;d<state.docCount;d++) { Document & doc = state.doc[d]; for (int s=0;s<doc.SampleCount();s++) { Sample & sam = doc.GetSample(s); ItemRef<ClusterInst,Conc> * ci = sam.GetDocInst()->GetClusterInst(); sam.next = ci->first; ci->first = &sam; } } // Now iterate all the cluster instances and resample each in turn... { ItemRef<Cluster,Conc> * cluster = state.clusters.First(); while (cluster->Valid()) { ItemRef<ClusterInst,Conc> * cluInst = cluster->First(); while (cluInst->Valid()) { // First decriment the topic word counts for all the using samples and remove its topic... int sampleCount = 0; { Sample * sam = cluInst->first; while (sam) { ItemRef<Topic,Conc> * topic = sam->GetDocInst()->GetClusterInst()->GetTopic(); topic->wc[sam->GetWord()] -= 1; topic->wcTotal -= 1; sampleCount += 1; sam = sam->next; } } cluInst->SetTopic(0); // Iterate the topics and calculate the log probability of each, find maximum log probability... float maxLogProb = -1e100; { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { topic->prob = log(topic->RefCount()); float samDiv = log(topic->wcTotal + state.betaSum); Sample * sam = cluInst->first; while (sam) { topic->prob += log(topic->wc[sam->GetWord()] + state.beta[sam->GetWord()]) - samDiv; sam = sam->next; } if (topic->prob>maxLogProb) maxLogProb = topic->prob; topic = topic->Next(); } } // Calculate the log probability of a new topic; maintain maximum... float probNew = log(state.topics.Body().conc); { Sample * sam = cluInst->first; while (sam) { probNew += log(state.beta[sam->GetWord()]/state.betaSum); sam = sam->next; } } if (probNew>maxLogProb) maxLogProb = probNew; // Convert log probabilities to actual probabilities in a numerically safe way, and sum them up for selection... float probSum = 0.0; probNew = exp(probNew-maxLogProb); probSum += probNew; { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { topic->prob = exp(topic->prob-maxLogProb); probSum += topic->prob; topic = topic->Next(); } } // Select the resampled topic, creating a new one if required... ItemRef<Topic,Conc> * nt = 0; float rand = probSum * sample_uniform(); { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { rand -= topic->prob; if (rand<0.0) { nt = topic; break; } topic = topic->Next(); } } if (nt==0) { nt = state.topics.Append(); nt->wc = new int[state.wordCount]; for (int w=0;w<state.wordCount;w++) nt->wc[w] = 0; nt->wcTotal = 0; } // Finally set its topic and sum back in the topic usage by its using samples... cluInst->SetTopic(nt); { Sample * sam = cluInst->first; while (sam) { ItemRef<Topic,Conc> * topic = sam->GetDocInst()->GetClusterInst()->GetTopic(); topic->wc[sam->GetWord()] += 1; topic->wcTotal += 1; sam = sam->next; } } cluInst = cluInst->Next(); } cluster = cluster->Next(); } } } // Code for resampling a document instance's cluster instance - actually does all document instances for a single document with each call, for efficiency reasons... void ResampleDocumentInstances(State & state, Document & doc) { // First construct a linked list in each DocInst of the samples contained within - needed to do the next task efficiently... { ItemRef<DocInst,Conc> * docInst = doc.First(); while (docInst->Valid()) { docInst->first = 0; docInst = docInst->Next(); } } for (int s=0;s<doc.SampleCount();s++) { Sample & sam = doc.GetSample(s); sam.next = sam.GetDocInst()->first; sam.GetDocInst()->first = &sam; } // Now iterate all DocInst in the document, resampling each in turn... { ItemRef<DocInst,Conc> * docInst = doc.First(); while (docInst->Valid()) { // Detach from its cluster instance, removing all topic references at the same time... int sampleCount = 0; { Sample * sample = docInst->first; while (sample) { ItemRef<Topic,Conc> * topic = sample->GetDocInst()->GetClusterInst()->GetTopic(); topic->wc[sample->GetWord()] -= 1; topic->wcTotal -= 1; sampleCount += 1; sample = sample->next; } } docInst->SetClusterInst(0); // Iterate the topics and determine the log probability of each topic for the sample in probAux and the log probability of drawing a new cluster with the given topic in prob. The latter has its max recorded for numerically stable normalisation later... float maxLogProb = -1e100; { ItemRef<Topic,Conc> * topic = state.topics.First(); float baseTopicLogProb = log(doc.GetCluster()->Body().conc) - log(state.topics.RefTotal() + state.topics.Body().conc); while (topic->Valid()) { topic->probAux = 0.0; Sample * sample = docInst->first; float samDiv = log(topic->wcTotal + state.betaSum); while (sample) { topic->probAux += log(topic->wc[sample->GetWord()] + state.beta[sample->GetWord()]) - samDiv; sample = sample->next; } topic->prob = baseTopicLogProb + log(topic->RefCount()) + topic->probAux; if (topic->prob>maxLogProb) maxLogProb = topic->prob; topic = topic->Next(); } } // Iterate the cluster instances and calculate their log probabilities, maintaining knowledge of the maximum... { ItemRef<ClusterInst,Conc> * cluInst = doc.GetCluster()->First(); while (cluInst->Valid()) { cluInst->prob = log(cluInst->RefCount()) + cluInst->GetTopic()->probAux; if (cluInst->prob>maxLogProb) maxLogProb = cluInst->prob; cluInst = cluInst->Next(); } } // Calculate the log probability of a new topic and new cluster instance, factor into the maximum... float probAllNew = log(doc.GetCluster()->Body().conc) + log(state.topics.Body().conc) - log(state.topics.RefTotal() + state.topics.Body().conc); { Sample * sample = docInst->first; while (sample) { probAllNew += log(state.beta[sample->GetWord()]/state.betaSum); sample = sample->next; } } if (probAllNew>maxLogProb) maxLogProb = probAllNew; // Use the maximum log probability to convert all values to normal probabilities in a numerically safe way, storing a sum ready for drawing from the various options... float probSum = 0.0; probAllNew = exp(probAllNew-maxLogProb); probSum += probAllNew; { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { topic->prob = exp(topic->prob-maxLogProb); probSum += topic->prob; topic = topic->Next(); } } { ItemRef<ClusterInst,Conc> * cluInst = doc.GetCluster()->First(); while (cluInst->Valid()) { cluInst->prob = exp(cluInst->prob-maxLogProb); probSum += cluInst->prob; cluInst = cluInst->Next(); } } // Draw the new cluster instance - can involve creating a new one and even creating a new topic... ItemRef<ClusterInst,Conc> * nci = 0; float rand = sample_uniform() * probSum; { ItemRef<ClusterInst,Conc> * cluInst = doc.GetCluster()->First(); while (cluInst->Valid()) { rand -= cluInst->prob; if (rand<0.0) { nci = cluInst; break; } cluInst = cluInst->Next(); } } if (nci==0) { nci = doc.GetCluster()->Append(); ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { rand -= topic->prob; if (rand<0.0) { nci->SetTopic(topic); break; } topic = topic->Next(); } } if (nci->GetTopic()==0) { ItemRef<Topic,Conc> * nt = state.topics.Append(); nt->wc = new int[state.wordCount]; for (int w=0;w<state.wordCount;w++) nt->wc[w] = 0; nt->wcTotal = 0; nci->SetTopic(nt); } // Reattach its new cluster instance, and incriment the topic word counts... docInst->SetClusterInst(nci); { Sample * sample = docInst->first; while (sample) { ItemRef<Topic,Conc> * topic = sample->GetDocInst()->GetClusterInst()->GetTopic(); topic->wc[sample->GetWord()] += 1; topic->wcTotal += 1; sample = sample->next; } } docInst = docInst->Next(); } } } // Code for resampling a samples topic instance assignment... // (Everything must be assigned - no null pointers on the chain from sample to topic.) void ResampleSample(State & state, Document & doc, Sample & sam) { // Remove the samples current assignment... sam.SetDocInst(0); // Assign probabilities to the various possibilities - there are temporary variables in the data structure to make this elegant. Sum up the total probability ready for the sampling phase. In all cases an entity is assigned the probability of using that entity with everything below it being created from scratch... float pSum = 0.0; // Calculate the probabilities of various 'new' events... float probNewDocInst = doc.Body().conc / (doc.RefTotal() + doc.Body().conc); float probNewCluInst = probNewDocInst * doc.GetCluster()->Body().conc / (doc.GetCluster()->RefTotal() + doc.GetCluster()->Body().conc); float probNewTopic = probNewCluInst * state.topics.Body().conc / (state.topics.RefTotal() + state.topics.Body().conc); // The probability of a new topic... pSum += probNewTopic * state.beta[sam.GetWord()] / state.betaSum; // The topics - keep the probabilities of drawing the word in question from the topic in the aux variables, to save computation in the following steps... { ItemRef<Topic,Conc> * topic = state.topics.First(); float divisor = state.topics.RefTotal() + state.topics.Body().conc; while (topic->Valid()) { topic->probAux = (topic->wc[sam.GetWord()] + state.beta[sam.GetWord()]) / (topic->wcTotal + state.betaSum); topic->prob = topic->probAux * probNewCluInst * topic->RefCount() / divisor; pSum += topic->prob; topic = topic->Next(); } } // The cluster instances... { ItemRef<ClusterInst,Conc> * cluInst = doc.GetCluster()->First(); float divisor = doc.GetCluster()->RefTotal() + doc.GetCluster()->Body().conc; while (cluInst->Valid()) { cluInst->prob = cluInst->GetTopic()->probAux * probNewDocInst * cluInst->RefCount() / divisor; pSum += cluInst->prob; cluInst = cluInst->Next(); } } // The document instances... { ItemRef<DocInst,Conc> * docInst = doc.First(); float divisor = doc.RefTotal() + doc.Body().conc; while (docInst->Valid()) { docInst->prob = docInst->GetClusterInst()->GetTopic()->probAux * docInst->RefCount() / divisor; pSum += docInst->prob; docInst = docInst->Next(); } } // Now draw from the distribution and assign the result, creating new entities as required. The checking is done in order of (typically) largest to smallest, to maximise the chance of an early bail out... // Draw the random uniform, scaled by the pSum - we will repeatedly subtract from this random variable for each item - when it becomes negative we have found the item to draw... float rand = sample_uniform() * pSum; // Check the document instances... { ItemRef<DocInst,Conc> * docInst = doc.First(); while (docInst->Valid()) { rand -= docInst->prob; if (rand<0.0) { // A document instance has been selected - simplest reassignment case... sam.SetDocInst(docInst); return; } docInst = docInst->Next(); } } // Check the cluster instances - would involve a new document instance... { ItemRef<ClusterInst,Conc> * cluInst = doc.GetCluster()->First(); while (cluInst->Valid()) { rand -= cluInst->prob; if (rand<0.0) { // A cluster instance has been selected - need to create a new document instance... ItemRef<DocInst,Conc> * ndi = doc.Append(); ndi->SetClusterInst(cluInst); sam.SetDocInst(ndi); return; } cluInst = cluInst->Next(); } } // Check the topics - would involve both a new cluster and document instance... { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { rand -= topic->prob; if (rand<0.0) { // A topic has been selected - need a new cluster and a new document instance... ItemRef<ClusterInst,Conc> * nci = doc.GetCluster()->Append(); nci->SetTopic(topic); ItemRef<DocInst,Conc> * ndi = doc.Append(); ndi->SetClusterInst(nci); sam.SetDocInst(ndi); return; } topic = topic->Next(); } } // If we have got this far then its a new topic, with a new cluster and document instance as well... ItemRef<Topic,Conc> * nt = state.topics.Append(); nt->wc = new int[state.wordCount]; for (int w=0;w<state.wordCount;w++) nt->wc[w] = 0; nt->wcTotal = 0; ItemRef<ClusterInst,Conc> * nci = doc.GetCluster()->Append(); nci->SetTopic(nt); ItemRef<DocInst,Conc> * ndi = doc.Append(); ndi->SetClusterInst(nci); sam.SetDocInst(ndi); } // Code for resampling all the concentration parameters - just have to iterate through and call all the resampling methods... void ResampleConcs(State & state) { // Concentrations for DPs from which topics and clusters are drawn... state.topics.Body().ResampleConc(state.topics.RefTotal(), state.topics.Size()); state.clusters.Body().ResampleConc(state.clusters.RefTotal(), state.clusters.Size()); // Concentrations for clusters... if (state.seperateClusterConc) { ItemRef<Cluster,Conc> * cluster = state.clusters.First(); while (cluster->Valid()) { cluster->Body().ResampleConc(cluster->RefTotal(), cluster->Size()); cluster = cluster->Next(); } } else { if (state.clusters.Size()>0) { SampleConcDP scdp; scdp.SetPrior(state.rho.alpha,state.rho.beta); scdp.SetPrevConc(state.clusters.First()->Body().conc); ItemRef<Cluster,Conc> * cluster = state.clusters.First(); while (cluster->Valid()) { scdp.AddDP(cluster->RefTotal(), cluster->Size()); cluster = cluster->Next(); } double newConc = scdp.Sample(); cluster = state.clusters.First(); while (cluster->Valid()) { cluster->Body().conc = newConc; cluster = cluster->Next(); } } } // Concentrations for documents... if (state.seperateDocumentConc) { for (int d=0;d<state.docCount;d++) { state.doc[d].Body().ResampleConc(state.doc[d].RefTotal(), state.doc[d].Size()); } } else { SampleConcDP scdp; scdp.SetPrior(state.doc[0].Body().alpha,state.doc[0].Body().beta); scdp.SetPrevConc(state.doc[0].Body().conc); for (int d=0;d<state.docCount;d++) { scdp.AddDP(state.doc[d].RefTotal(), state.doc[d].Size()); } double newConc = scdp.Sample(); for (int d=0;d<state.docCount;d++) { state.doc[d].Body().conc = newConc; } } } """ # The actual function for Gibbs iterating the data structure - takes as input the State object as 'state' and the number of iterations to do as 'iters'... gibbs_code = start_cpp(shared_code) + """ State s; StatePyToCpp(state, &s); float * mn = new float[s.wordCount]; for (int iter=0;iter<iters;iter++) { // Iterate the documents... for (int d=0;d<s.docCount;d++) { // Resample the documents cluster... if (s.oneCluster) { if (s.doc[d].GetCluster()==0) { if (s.clusters.Size()==0) { ItemRef<Cluster,Conc> * newC = s.clusters.Append(); newC->Body().alpha = s.rho.alpha; newC->Body().beta = s.rho.beta; newC->Body().conc = s.rho.conc; s.doc[d].SetCluster(newC); } else { s.doc[d].SetCluster(s.clusters.First()); } } } else { ResampleDocumentCluster(s, s.doc[d]); } // Resample the documents samples (words)... for (int w=0;w<s.doc[d].SampleCount();w++) { ResampleSample(s, s.doc[d], s.doc[d].GetSample(w)); } // Resample the cluster instance that each document instance is assigned to... if (!s.dnrDocInsts) { ResampleDocumentInstances(s,s.doc[d]); } } // Resample the cluster instances assigned topics... if (!s.dnrCluInsts) { ResampleClusterInstances(s); } // Resample the many concentration parameters... ResampleConcs(s); // If requested recalculate beta... if (s.calcBeta) { EstimateDir ed(s.wordCount); ItemRef<Topic,Conc> * topic = s.topics.First(); while (topic->Valid()) { float div = 0.0; for (int i=0;i<s.wordCount;i++) { mn[i] = topic->wc[i] + s.beta[i]; div += mn[i]; } for (int i=0;i<s.wordCount;i++) mn[i] /= div; ed.Add(mn); // Not actually correct - we are using the mean of the distribution from which we should draw the multinomial, rather than actually drawing. This is easier however, and not that unreasonable. topic = topic->Next(); } ed.Update(s.beta); s.betaSum = 0.0; for (int i=0;i<s.wordCount;i++) s.betaSum += s.beta[i]; } // Verify the state is consistant - for debugging (Only works when there is no prior)... //VerifyState(s); } delete[] mn; StateCppToPy(&s, state); """ class ProgReporter: """Class to allow progress to be reported.""" def __init__(self,params,callback,mult = 1): self.doneIters = 0 self.totalIters = mult * params.runs * (max((params.burnIn,params.lag)) + params.samples + (params.samples-1)*params.lag) self.callback = callback if self.callback: self.callback(self.doneIters,self.totalIters) def next(self, amount = 1): self.doneIters += amount if self.callback: self.callback(self.doneIters,self.totalIters) def gibbs(state, total_iters, next, step = 64): """Does the requested number of Gibbs iterations to the passed in state. If state has not been initialised the first iteration will be an incrimental construction.""" while total_iters>0: iters = total_iters if iters>step: iters = step total_iters -= iters weave.inline(gibbs_code, ['state', 'iters'], support_code=shared_code) next(iters) def gibbs_run(state, next): """Does a single run on the given state object, adding the relevant samples.""" params = state.getParams() if params.burnIn>params.lag: gibbs(state, params.burnIn-params.lag,next) for s in xrange(params.samples): gibbs(state, params.lag,next) state.sample() next() def gibbs_all(state, callback = None): """Does all the runs requested by a states params object, collating all the samples into the State.""" params = state.getParams() reporter = ProgReporter(params,callback) for r in xrange(params.runs): tempState = State(state) gibbs_run(tempState,reporter.next) state.absorbClone(tempState) def gibbs_doc(model, doc, params = None, callback = None): """Runs Gibbs iterations on a single document, by sampling with a prior constructed from each sample in the given Model. params applies to each sample, so should probably be much more limited than usual - the default if its undefined is to use 1 run and 1 sample and a burn in of only 500. Returns a DocModel with all the relevant samples in.""" # Initialisation stuff - handle params, create the state and the DocModel object, plus a reporter... if params==None: params = Params() params.runs = 1 params.samples = 1 params.burnIn = 500 state = State(doc, params) dm = DocModel() reporter = ProgReporter(params,callback,model.sampleCount()) # Iterate and run for each sample in the model... for sample in model.sampleList(): tempState = State(state) tempState.setGlobalParams(sample) tempState.addPrior(sample) gibbs_run(tempState,reporter.next) dm.addFrom(tempState.getModel()) # Return... return dm class TestShared(unittest.TestCase): """Test code for the data structure.""" def test_compile(self): code = start_cpp(shared_code) + """ """ weave.inline(code, support_code=shared_code) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. class PriorConcDP: """Contains the parameters required for the concentration parameter of a DP - specifically its Gamma prior and initial concentration value.""" def __init__(self, other = None): if other!=None: self.__alpha = other.alpha self.__beta = other.beta self.__conc = other.conc else: self.__alpha = 1.0 self.__beta = 1.0 self.__conc = 16.0 def getAlpha(self): """Getter for alpha.""" return self.__alpha def getBeta(self): """Getter for beta.""" return self.__beta def getConc(self): """Getter for the initial concentration.""" return self.__conc def setAlpha(self, alpha): """Setter for alpha.""" assert(alpha>0.0) self.__alpha = alpha def setBeta(self, beta): """Setter for beta.""" assert(beta>0.0) self.__beta = beta def setConc(self, conc): """Setter for the initial concentration.""" assert(conc>=0.0) self.__conc = conc alpha = property(getAlpha, setAlpha, None, "The alpha parameter of the Gamma prior over the concentration parameter.") beta = property(getBeta, setBeta, None, "The beta parameter of the Gamma prior over the concentration parameter.") conc = property(getConc, setConc, None, "The starting value of the concentration parameter, to be updated.")
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. from scipy import weave import unittest from utils.start_cpp import start_cpp from dp_utils.dp_utils import dp_utils_code # Data structure for storing the state of the model, for use with the c++ Gibbs sampling code. A custom structure is used for speed and to keep the code clean... ds_code = dp_utils_code + start_cpp() + """ // Details specific for a topic - basically its multinomial and some helper stuff... class Topic { public: Topic():wc(0) {} ~Topic() {delete[] wc;} int id; // Used when moving this data structure to and from python. int * wc; // Indexed by word id this contains the count of how many words with that id are assigned to this topic - from the prior the multinomial can hence be worked out. int wcTotal; // Sum of above. float prob; // Helper for resampling. float probAux; // " }; class Sample; // Predeclaration required for below. // Stuff for the clustering - basically everything that goes into a cluster, including its DP... class ClusterInst { public: ClusterInst():topic(0) {} ~ClusterInst() { if (topic) topic->DecRef(); } ItemRef<Topic,Conc> * GetTopic() {return topic;} void SetTopic(ItemRef<Topic,Conc> * nt, bool safe=true) { if (safe&&nt) nt->IncRef(); if (safe&&topic) topic->DecRef(); topic = nt; } int id; // Used when moving this data structure to and from python. float prob; // Helper for resampling. Sample * first; // For a temporary linked list when resampling the topic. protected: ItemRef<Topic,Conc> * topic; }; class Cluster : public ListRef<ClusterInst,Conc> { public: Cluster() {} ~Cluster() {} int id; // Used when moving this data structure to and from python. float prob; // Helper for resampling. }; class DocInst { public: DocInst():clusterInst(0) {} ~DocInst() { if (clusterInst) clusterInst->DecRef(); } ItemRef<ClusterInst,Conc> * GetClusterInst() {return clusterInst;} void SetClusterInst(ItemRef<ClusterInst,Conc> * nci, bool safe=true) { if (safe&&nci) nci->IncRef(); if (safe&&clusterInst) clusterInst->DecRef(); clusterInst = nci; } int id; // Used when moving this data structure to and from python. float prob; // Helper for resampling. ItemRef<Topic,Conc> * topic; // Temporary value, used to store the topic whilst disconnected from a cluster inst during the cluster resampling process. Sample * first; // For a temporary linked list when resampling the cluster instance. protected: ItemRef<ClusterInst,Conc> * clusterInst; }; class Sample { public: Sample():word(-1),docInst(0) {} ~Sample() { if (docInst) docInst->DecRef(); } int GetWord() {return word;} void SetWord(int w) {word = w;} ItemRef<DocInst,Conc> * GetDocInst() {return docInst;} void SetDocInst(ItemRef<DocInst,Conc> * ndi, bool safe=true) { if (safe&&ndi) { ndi->IncRef(); ItemRef<Topic,Conc> * topic = ndi->GetClusterInst()->GetTopic(); topic->wcTotal += 1; topic->wc[word] += 1; } if (safe&&docInst) { ItemRef<Topic,Conc> * topic = docInst->GetClusterInst()->GetTopic(); topic->wcTotal -= 1; topic->wc[word] -= 1; docInst->DecRef(); } docInst = ndi; } Sample * next; // Used for a temporary linked list whilst resampling higher up the hierachy. protected: int word; ItemRef<DocInst,Conc> * docInst; }; class Document : public ListRef<DocInst,Conc> { public: Document():cluster(0),sampleCount(0),sample(0) {} ~Document() { if (cluster) cluster->DecRef(); delete[] sample; } ItemRef<Cluster,Conc> * GetCluster() {return cluster;} void SetCluster(ItemRef<Cluster,Conc> * nc, bool safe=true) { if (safe&&nc) nc->IncRef(); if (safe&&cluster) cluster->DecRef(); cluster = nc; } int SampleCount() {return sampleCount;} Sample & GetSample(int i) {return sample[i];} void SetSamples(int count,Sample * array) // Takes owenership of the given array, must be declared with new[] { sampleCount = count; delete[] sample; sample = array; } protected: ItemRef<Cluster,Conc> * cluster; int sampleCount; Sample * sample; // Declared with new[] }; // Final State object - represents an entire model... class State { public: State():seperateClusterConc(false), seperateDocumentConc(false), oneCluster(false), calcBeta(false), beta(0), betaSum(0.0), docCount(0), doc(0) {} ~State() { for (int d=0;d<docCount;d++) { doc[d].SetSamples(0,0); while (doc[d].Size()!=0) doc[d].First()->Suicide(); doc[d].SetCluster(0); } delete[] doc; while (clusters.Size()!=0) { ItemRef<Cluster,Conc> * victim = clusters.First(); while (victim->Size()!=0) victim->First()->Suicide(); victim->Suicide(); } while (topics.Size()!=0) topics.First()->Suicide(); delete[] beta; } // Algorithm behavioural flags, indicate if concentration parameters for clusters and documents are shared or calculated on a per entity basis, and if we should fix it to a single cluster to acheive HDP-like behaviour... bool dnrDocInsts; bool dnrCluInsts; bool seperateClusterConc; bool seperateDocumentConc; bool oneCluster; bool calcBeta; // Parameters - only need these once as most can be stored where they are needed... float * beta; float betaSum; Conc rho; // Needed for new clusters. int wordCount; // Number of unique word types. // Basic DP that provides topics, contains multinomial distributions etc... ListRef<Topic,Conc> topics; // DDP that provides clusters - you draw DP's from this... ListRef<Cluster,Conc> clusters; // All the documents... int docCount; Document * doc; // Declared with new[] }; // Goes through the given State object and verifies that the ref counts match the number of references - for debugging. (Obviously no good if there is a prior.) printf's out any errors... void VerifyState(State & state) { // Verify topic counts... int * counts = new int[state.topics.Size()]; { ItemRef<Topic,Conc> * targ = state.topics.First(); int id = 0; while (targ->Valid()) { targ->id = id; counts[id] = 0; id += 1; targ = targ->Next(); } if (id!=state.topics.Size()) printf("Size of topics is incorrect\\n"); } { ItemRef<Cluster,Conc> * targ = state.clusters.First(); while (targ->Valid()) { ItemRef<ClusterInst,Conc> * targ2 = targ->First(); while (targ2->Valid()) { if (targ2->GetTopic()) counts[targ2->GetTopic()->id] += 1; targ2 = targ2->Next(); } targ = targ->Next(); } } { ItemRef<Topic,Conc> * targ = state.topics.First(); int total = 0; while (targ->Valid()) { total += targ->RefCount(); if (counts[targ->id]!=targ->RefCount()) { printf("Topic %i has the wrong refcount\\n",targ->id); } targ = targ->Next(); } if (total!=state.topics.RefTotal()) printf("Topics ref-total is incorrect\\n"); } delete[] counts; // Verify cluster counts... counts = new int[state.clusters.Size()]; { ItemRef<Cluster,Conc> * targ = state.clusters.First(); int id = 0; while (targ->Valid()) { targ->id = id; counts[id] = 0; id += 1; targ = targ->Next(); } if (id!=state.clusters.Size()) printf("Size of clusters is incorrect\\n"); } for (int d=0;d<state.docCount;d++) { if (state.doc[d].GetCluster()) { counts[state.doc[d].GetCluster()->id] += 1; } } { ItemRef<Cluster,Conc> * targ = state.clusters.First(); int total = 0; while (targ->Valid()) { total += targ->RefCount(); if (counts[targ->id]!=targ->RefCount()) { printf("Cluster %i has the wrong refcount\\n",targ->id); } targ = targ->Next(); } if (total!=state.clusters.RefTotal()) printf("Clusters ref-total is incorrect\\n"); } delete[] counts; // Verify cluster instance counts... int cluInstSum = 0; { ItemRef<Cluster,Conc> * targ = state.clusters.First(); while (targ->Valid()) { cluInstSum += targ->Size(); targ = targ->Next(); } } counts = new int[cluInstSum]; { ItemRef<Cluster,Conc> * targ = state.clusters.First(); int id = 0; while (targ->Valid()) { ItemRef<ClusterInst,Conc> * targ2 = targ->First(); int startId = id; while (targ2->Valid()) { targ2->id = id; counts[id] = 0; id += 1; targ2 = targ2->Next(); } if ((id-startId)!=targ->Size()) printf("Size of cluster instance %i is incorrect\\n",targ->id); targ = targ->Next(); } } for (int d=0;d<state.docCount;d++) { ItemRef<DocInst,Conc> * targ = state.doc[d].First(); while (targ->Valid()) { if (targ->GetClusterInst()) { counts[targ->GetClusterInst()->id] += 1; } targ = targ->Next(); } } { ItemRef<Cluster,Conc> * targ = state.clusters.First(); while (targ->Valid()) { int total = 0; ItemRef<ClusterInst,Conc> * targ2 = targ->First(); while (targ2->Valid()) { total += targ2->RefCount(); if (targ2->RefCount()!=counts[targ2->id]) { printf("Cluster instance %i of cluster %i has a bad refcount\\n",targ2->id,targ->id); } targ2 = targ2->Next(); } if (total!=targ->RefTotal()) printf("Cluster instance %i has a bad ref total\\n",targ->id); targ = targ->Next(); } } delete[] counts; // Verify document instance counts... for (int d=0;d<state.docCount;d++) { counts = new int[state.doc[d].Size()]; { ItemRef<DocInst,Conc> * targ = state.doc[d].First(); int id = 0; while (targ->Valid()) { targ->id = id; counts[id] = 0; id += 1; targ = targ->Next(); } if (id!=state.doc[d].Size()) printf("Doc %i has an invalid size\\n",d); } for (int s=0;s<state.doc[d].SampleCount();s++) { Sample & sam = state.doc[d].GetSample(s); if (sam.GetDocInst()) { counts[sam.GetDocInst()->id] += 1; } } { ItemRef<DocInst,Conc> * targ = state.doc[d].First(); int total = 0; while (targ->Valid()) { total += targ->RefCount(); if (targ->RefCount()!=counts[targ->id]) { printf("Document %i, instance %i has a bad ref count\\n",d,targ->id); } targ = targ->Next(); } if (total!=state.doc[d].RefTotal()) printf("Doc %i has an invalid ref total\\n",d); } delete[] counts; } } """ class TestDS(unittest.TestCase): """Test code for the data structure.""" def test_compile(self): code = start_cpp(dual_hdp_ds) + """ State state; """ weave.inline(code, support_code=dual_hdp_ds) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # Loads solvers.... # Load the most basic solver, but also load a mp one if possible... try: from solve_weave import gibbs_all, gibbs_doc __fitter = 'weave' except: raise #raise Exception('Could not load basic weave solver') try: from solve_weave_mp import gibbs_all_mp, gibbs_doc_mp __fitter = 'multiprocess weave' except: pass def getAlgorithm(): """Returns a text string indicating which implimentation of the fitting algorithm is being used by default, which will be the best avaliable.""" global __fitter return __fitter
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import math import numpy import scipy.special import collections import solvers class DocSample: """Stores the sample information for a given document - the DP from which topics are drawn and which cluster it is a member of. Also calculates and stores the negative log liklihood of the document.""" def __init__(self, doc): """Given the specific DocState object this copies the relevant information. Note that it doesn't calculate the nll - a method does that. Also supports cloning.""" if isinstance(doc, DocSample): # Code for clonning self.cluster = doc.cluster self.dp = doc.dp.copy() self.conc = doc.conc self.nll = doc.nll else: # Extract the model information... self.cluster = doc.cluster self.dp = doc.use.copy() self.conc = doc.conc self.nll = 0.0 def calcNLL(self, doc, state): """Calculates the negative log likelihood of the document, given the relevant information. This is the DocState object again (It needs the samples, which are not copied into this object by the constructor.), but this time with the entire state object as well. Probability (Expressed as negative log likelihood.) is specificly calculated using all terms that contain a variable in the document, but none that would be identical for all documents. That is, it contains the probability of the cluster, the probability of the DP given the cluster, and the probability of the samples, which factor in both the drawing of the topic and the drawing of the word. The ordering of the samples is considered irrelevant, with both the topic and word defining uniqueness. Some subtle approximation is made - see if you can spot it in the code!""" self.nll = 0.0 # Probability of drawing the cluster... self.nll -= math.log(state.clusterUse[doc.cluster]) self.nll += math.log(state.clusterUse.sum()+state.clusterConc) # Probability of drawing the documents dp from its cluster... cl = state.cluster[doc.cluster] instCounts = numpy.zeros(cl[0].shape[0], dtype=numpy.int32) for ii in xrange(doc.use.shape[0]): instCounts[doc.use[ii,0]] += 1 norm = cl[0][:,1].sum() + cl[1] self.nll -= (numpy.log(numpy.asfarray(cl[0][:,1])/norm)*instCounts).sum() self.nll -= scipy.special.gammaln(instCounts.sum() + 1.0) self.nll += scipy.special.gammaln(instCounts + 1.0).sum() # Count the numbers of word/topic instance pairs in the data structure - sum using a dictionary... samp_count = collections.defaultdict(int) # [instance,word] for s in xrange(doc.samples.shape[0]): samp_count[doc.samples[s,0],doc.samples[s,1]] += 1 # Calculate the probability distribution of drawing each topic instance and the probability of drawing each word/topic assignment... inst = numpy.asfarray(doc.use[:,1]) inst /= inst.sum() + doc.conc topicWord = numpy.asfarray(state.topicWord) + state.beta topicWord = (topicWord.T/topicWord.sum(axis=1)).T instLog = numpy.log(inst) wordLog = numpy.log(topicWord) # Now sum into nll the probability of drawing the samples that have been drawn - gets a tad complex as includes the probability of drawing the topic from the documents dp and then the probability of drawing the word from the topic, except I've merged them such that it doesn't look like that is what is happening... self.nll -= scipy.special.gammaln(doc.samples.shape[0]+1.0) for pair, count in samp_count.iteritems(): inst, word = pair topic = cl[0][doc.use[inst,0],0] self.nll -= count * (wordLog[topic,word] + instLog[inst]) self.nll += scipy.special.gammaln(count+1.0) def getCluster(self): """Returns the sampled cluster assignment.""" return self.cluster def getInstCount(self): """Returns the number of cluster instances in the documents model.""" return self.dp.shape[0] def getInstTopic(self, i): """Returns the topic index for the given instance.""" return self.dp[i,0] def getInstWeight(self, i): """Returns the number of samples that have been assigned to the given topic instance.""" return self.dp[i,1] def getInstDual(self): """Returns a 2D numpy array of integers where the first dimension indexes the topic instances for the document and the the second dimension has two entrys, the first (0) the topic index, the second (1) the number of samples assigned to the given topic instance. Do not edit the return value for this method - copy it first.""" return self.dp def getInstConc(self): """Returns the sampled concentration parameter, as used by the document DP.""" return self.conc def getNLL(self): """Returns the negative log liklihood of the document given the model, if it has been calculated.""" return self.nll class Sample: """Stores a single sample drawn from the model - the topics, clusters and each document being sampled over. Stores counts and parameters required to make them into distributions, rather than final distributions. Has clonning capability.""" def __init__(self, state, calcNLL = True): """Given a state this draws a sample from it, as a specific parametrisation of the model.""" if isinstance(state, Sample): # Code for clonning self.alpha = state.alpha self.beta = state.beta.copy() self.gamma = state.gamma self.rho = state.rho self.mu = state.mu self.topicWord = state.topicWord.copy() self.topicUse = state.topicUse.copy() self.topicConc = state.topicConc # Cluster stuff... self.cluster = map(lambda t: (t[0].copy(),t[1]), state.cluster) self.clusterUse = state.clusterUse.copy() self.clusterConc = state.clusterConc self.doc = map(lambda ds: DocSample(ds), state.doc) else: self.alpha = state.alpha self.beta = state.beta.copy() self.gamma = state.gamma self.rho = state.rho self.mu = state.mu # Topic stuff... self.topicWord = state.topicWord.copy() self.topicUse = state.topicUse.copy() self.topicConc = state.topicConc # Cluster stuff... self.cluster = map(lambda t: (t[0].copy(),t[1]), state.cluster) self.clusterUse = state.clusterUse.copy() self.clusterConc = state.clusterConc # The details for each document... self.doc = [] for d in xrange(len(state.doc)): self.doc.append(DocSample(state.doc[d])) # Second pass through documents to fill in the negative log liklihoods... if calcNLL: for d in xrange(len(state.doc)): self.doc[d].calcNLL(state.doc[d],state) def getAlphaPrior(self): """Returns the PriorConcDP that was used for the alpha parameter, which is the concentration parameter for the DP in each document.""" return self.alpha def getBeta(self): """Returns the beta prior, which is a vector representing a Dirichlet distribution from which the multinomials for each topic are drawn, from which words are drawn.""" return self.beta def getGammaPrior(self): """Returns the PriorConcDP that was used for the gamma parameter, which is the concentration parameter for the global DP from which topics are drawn.""" return self.gamma def getRhoPrior(self): """Returns the PriorConcDP that was used for the rho parameter, which is the concentration parameter for each specific clusters DP.""" return self.rho def getMuPrior(self): """Returns the PriorConcDP that was used for the mu parameter, which is the concentration parameter for the Dp from which clusters are drawn.""" return self.mu def getTopicCount(self): """Returns the number of topics in the sample.""" return self.topicWord.shape[0] def getWordCount(self): """Returns the number of words in the topic multinomial.""" return self.topicWord.shape[1] def getTopicUseWeight(self, t): """Returns how many times the given topic has been instanced in a cluster.""" return self.topicUse[t] def getTopicUseWeights(self): """Returns an array, indexed by topic id, that contains how many times each topic has been instanciated in a cluster. Do not edit the return value - copy it first.""" return self.topicUse def getTopicConc(self): """Returns the sampled concentration parameter for drawing topic instances from the global DP.""" return self.topicConc def getTopicWordCount(self, t): """Returns the number of samples assigned to each word for the given topic, as an integer numpy array. Do not edit the return value - make a copy first.""" return self.topicWord[t,:] def getTopicWordCounts(self, t): """Returns the number of samples assigned to each word for all topics, indexed [topic,word], as an integer numpy array. Do not edit the return value - make a copy first.""" return self.topicWord def getTopicMultinomial(self, t): """Returns the calculated multinomial for a given topic ident.""" ret = self.beta.copy() ret += self.topicWord[t,:] ret /= ret.sum() return ret def getTopicMultinomials(self): """Returns the multinomials for all topics, in a single array - indexed by [topic,word] to give P(word|topic).""" ret = numpy.vstack([self.beta]*self.topicWord.shape[0]) ret += self.topicWord ret = (ret.T / ret.sum(axis=1)).T return ret def getClusterCount(self): """Returns how many clusters there are.""" return len(self.cluster) def getClusterDrawWeight(self, c): """Returns how many times the given cluster has been instanced by a document.""" return self.clusterUse[c] def getClusterDrawWeights(self): """Returns an array, indexed by cluster id, that contains how many times each cluster has been instanciated by a document. Do not edit the return value - copy it first.""" return self.clusterUse def getClusterDrawConc(self): """Returns the sampled concentration parameter for drawing cluster instances for documents.""" return self.clusterConc def getClusterInstCount(self, c): """Returns how many instances of topics exist in the given cluster.""" return self.cluster[c][0].shape[0] def getClusterInstWeight(self, c, ti): """Returns how many times the given cluster topic instance has been instanced by a documents DP.""" return self.cluster[c][0][ti,1] def getClusterInstTopic(self, c, ti): """Returns which topic the given cluster topic instance is an instance of.""" return self.cluster[c][0][ti,0] def getClusterInstDual(self, c): """Returns a 2D array, where the first dimension is indexed by the topic instance, and the second contains two columns - the first the topic index, the second the weight. Do not edit the return value - copy before use.""" return self.cluster[c][0] def getClusterInstConc(self, c): """Returns the sampled concentration that goes with the DP from which members of each documents DP are drawn.""" return self.cluster[c][1] def docCount(self): """Returns the number of documents stored within. Should be the same as the corpus from which the sample was drawn.""" return len(self.doc) def getDoc(self,d): """Given a document index this returns the appropriate DocSample object. These indices should align up with the document indices in the Corpus from which this Sample was drawn.""" return self.doc[d] def nllAllDocs(self): """Returns the negative log likelihood of all the documents in the sample - a reasonable value to compare various samples with.""" return sum(map(lambda d: d.getNLL(),self.doc)) class Model: """Simply contains a list of samples taken from the state during Gibbs iterations. Has clonning capability.""" def __init__(self, obj=None): self.sample = [] if isinstance(obj, Model): for sample in obj.sample: self.sample.append(Sample(sample)) def sampleState(self, state): """Samples the state, storing the sampled model within.""" self.sample.append(Sample(state)) def absorbModel(self, model): """Given another model this absorbs all its samples, leaving the given model baren.""" self.sample += model.sample model.sample = [] def sampleCount(self): """Returns the number of samples.""" return len(self.sample) def getSample(self, s): """Returns the sample associated with the given index.""" return self.sample[s] def sampleList(self): """Returns a list of samples, for iterating.""" return self.sample def bestSampleOnly(self): """Calculates the document nll for each sample and prunes all but the one with the highest - very simple way of 'merging' multiple samples together.""" score = map(lambda s: s.nllAllDocs(),self.sample) best = 0 for i in xrange(1,len(self.sample)): if score[i]>score[best]: best = i self.sample = [self.sample[best]] def fitDoc(self, doc, params = None, callback=None, mp = True): """Given a document this returns a DocModel calculated by Gibbs sampling the document with the samples in the model as priors. Returns a DocModel. Note that it samples using params for *each* sample in the Model, so you typically want to use less than the defaults in Params, typically only a single run and sample, which is the default. mp can be set to False to force it to avoid multi-processing behaviour.""" if mp and len(self.sample)>1 and hasattr(solvers,'gibbs_doc_mp'): return solvers.gibbs_doc_mp(self, doc, params, callback) else: return solvers.gibbs_doc(self, doc, params, callback) class DocModel: """A Model that just contains DocSample-s for a single document. Obviously incomplete without a full Model, this is typically used when sampling a document relative to an already trained Model, such that the topic/cluster indices will match up with the original Model. Note that if the document has enough data to justify the creation of an extra topic/cluster then that could exist with an index above the indices of the topics/clusters in the source Model.""" def __init__(self, obj=None): """Supports cloning.""" self.sample = [] if isinstance(obj, DocModel): for sample in obj.sample: self.sample.append(DocSample(sample)) def addFrom(self, model, index=0): """Given a model and a document index number extracts all the relevant DocSample-s, adding them to this DocModel. It does not edit the Model but the DocSample-s transfered over are the same instances.""" for s in xrange(model.sampleCount()): self.sample.append(model.getSample(s).getDoc(index)) def absorbModel(self, dModel): """Absorbs samples from the given DocModel, leaving it baren.""" self.sample += dModel.sample dModel.sample = [] def sampleCount(self): """Returns the number of samples contained within.""" return len(self.sample) def getSample(self, s): """Returns the sample with the given index, in the range 0..sampleCount()-1""" return self.sample[s] def sampleList(self): """Returns a list of samples, for iterating.""" return self.sample def getNLL(self): """Returns the average nll of all the contained samples - does a proper mean of the probability of the samples.""" minSam = min(map(lambda s:s.getNLL(),self.sample)) probMean = sum(map(lambda s:math.exp(minSam-s.getNLL()),self.sample)) probMean /= float(len(self.sample)) return minSam - math.log(probMean)
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Some basic matrix operations that come in use... matrix_code = start_cpp() + """ #ifndef MATRIX_CODE #define MATRIX_CODE template <typename T> inline void MemSwap(T * lhs, T * rhs, int count = 1) { while(count!=0) { T t = *lhs; *lhs = *rhs; *rhs = t; ++lhs; ++rhs; --count; } } // Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default. template <typename T> inline T Determinant(T * pos, int size, int stride = -1) { if (stride==-1) stride = size; if (size==1) return pos[0]; else { if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride]; else { T ret = 0.0; for (int i=0; i<size; i++) { if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1); T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1]; if ((i+size)%2) ret += sub; else ret -= sub; } for (int i=1; i<size; i++) { MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1); } return ret; } } } // Inverts a square matrix, will fail on singular and very occasionally on // non-singular matrices, returns true on success. Uses Gauss-Jordan elimination // with partial pivoting. // in is the input matrix, out the output matrix, just be aware that the input matrix is trashed. // You have to provide its size (Its square, obviously.), and optionally a stride if different from size. template <typename T> inline bool Inverse(T * in, T * out, int size, int stride = -1) { if (stride==-1) stride = size; for (int r=0; r<size; r++) { for (int c=0; c<size; c++) { out[r*stride + c] = (c==r)?1.0:0.0; } } for (int r=0; r<size; r++) { // Find largest pivot and swap in, fail if best we can get is 0... T max = in[r*stride + r]; int index = r; for (int i=r+1; i<size; i++) { if (fabs(in[i*stride + r])>fabs(max)) { max = in[i*stride + r]; index = i; } } if (index!=r) { MemSwap(&in[index*stride], &in[r*stride], size); MemSwap(&out[index*stride], &out[r*stride], size); } if (fabs(max-0.0)<1e-6) return false; // Divide through the entire row... max = 1.0/max; in[r*stride + r] = 1.0; for (int i=r+1; i<size; i++) in[r*stride + i] *= max; for (int i=0; i<size; i++) out[r*stride + i] *= max; // Row subtract to generate 0's in the current column, so it matches an identity matrix... for (int i=0; i<size; i++) { if (i==r) continue; T factor = in[i*stride + r]; in[i*stride + r] = 0.0; for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j]; for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j]; } } return true; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import time class ProgBar: """Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops.""" def __init__(self, width = 60, onCallback = None): self.start = time.time() self.fill = 0 self.width = width self.onCallback = onCallback sys.stdout.write(('_'*self.width)+'\n') sys.stdout.flush() def __del__(self): self.end = time.time() self.__show(self.width) sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n') sys.stdout.flush() def callback(self, nDone, nToDo): """Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo).""" if self.onCallback: self.onCallback() n = int(float(self.width)*float(nDone)/float(nToDo)) n = min((n,self.width)) if n>self.fill: self.__show(n) def __show(self,n): sys.stdout.write('|'*(n-self.fill)) sys.stdout.flush() self.fill = n
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pydoc import inspect class DocGen: """A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code.""" def __init__(self, name, title = None, summary = None): """name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title.""" if title==None: title = name if summary==None: summary = title self.doc = pydoc.HTMLDoc() self.html = open('%s.html'%name,'w') self.html.write('<html>\n') self.html.write('<head>\n') self.html.write('<title>%s</title>\n'%title) self.html.write('</head>\n') self.html.write('<body>\n') self.html_variables = '' self.html_functions = '' self.html_classes = '' self.wiki = open('%s.wiki'%name,'w') self.wiki.write('#summary %s\n\n'%summary) self.wiki.write('= %s= \n\n'%title) self.wiki_variables = '' self.wiki_functions = '' self.wiki_classes = '' def __del__(self): if self.html_variables!='': self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables)) if self.html_functions!='': self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions)) if self.html_classes!='': self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes)) self.html.write('</body>\n') self.html.write('</html>\n') self.html.close() if self.wiki_variables!='': self.wiki.write('= Variables =\n\n') self.wiki.write(self.wiki_variables) self.wiki.write('\n') if self.wiki_functions!='': self.wiki.write('= Functions =\n\n') self.wiki.write(self.wiki_functions) self.wiki.write('\n') if self.wiki_classes!='': self.wiki.write('= Classes =\n\n') self.wiki.write(self.wiki_classes) self.wiki.write('\n') self.wiki.close() def addFile(self, fn, title, fls = True): """Given a filename and section title adds the contents of said file to the output. Various flags influence how this works.""" html = [] wiki = [] for i, line in enumerate(open(fn,'r').readlines()): hl = line.replace('\n', '') if i==0 and fls: hl = '<strong>' + hl + '</strong>' for ext in ['py','txt']: if '.%s - '%ext in hl: s = hl.split('.%s - '%ext, 1) hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1] html.append(hl) wl = line.strip() if i==0 and fls: wl = '*%s*'%wl for ext in ['py','txt']: if '.%s - '%ext in wl: s = wl.split('.%s - '%ext, 1) wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n' wiki.append(wl) self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html))) self.wiki.write('== %s ==\n'%title) self.wiki.write('\n'.join(wiki)) self.wiki.write('----\n\n') def addVariable(self, var, desc): """Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc..""" self.html_variables += '<strong>%s</strong><br/>'%var self.html_variables += '%s<br/><br/>\n'%desc self.wiki_variables += '*`%s`*\n'%var self.wiki_variables += ' %s\n\n'%desc def addFunction(self, func): """Adds a function to the documentation. You provide the actual function instance.""" self.html_functions += self.doc.docroutine(func).replace('&nbsp;',' ') self.html_functions += '\n' name = func.__name__ args, varargs, keywords, defaults = inspect.getargspec(func) doc = inspect.getdoc(func) if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_functions += ' %s\n\n'%doc def addClass(self, cls): """Adds a class to the documentation. You provide the actual class object.""" self.html_classes += self.doc.docclass(cls).replace('&nbsp;',' ') self.html_classes += '\n' name = cls.__name__ parents = filter(lambda a: a!=cls, inspect.getmro(cls)) doc = inspect.getdoc(cls) par_str = '' if len(parents)!=0: par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents)) self.wiki_classes += '== %s(%s) ==\n'%(name, par_str) self.wiki_classes += ' %s\n\n'%doc methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x)) def method_key(pair): if pair[0]=='__init__': return '___' else: return pair[0] methods.sort(key=method_key) for name, method in methods: if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'): if inspect.ismethod(method): args, varargs, keywords, defaults = inspect.getargspec(method) else: args = ['?'] varargs = None keywords = None defaults = None if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords def fetch_doc(cls, name): try: method = getattr(cls, name) if method.__doc__!=None: return inspect.getdoc(method) except: pass for parent in filter(lambda a: a!=cls, inspect.getmro(cls)): ret = fetch_doc(parent, name) if ret!=None: return ret return None doc = fetch_doc(cls, name) self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_classes += ' %s\n\n'%doc variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float)) for name, var in variables: if not name.startswith('__'): if hasattr(var, '__doc__'): d = var.__doc__ else: d = str(var) self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import random import math from scipy.special import gammaln, psi, polygamma from scipy import weave from utils.start_cpp import start_cpp # Provides various gamma-related functions... gamma_code = start_cpp() + """ #ifndef GAMMA_CODE #define GAMMA_CODE #include <cmath> // Returns the natural logarithm of the Gamma function... // (Uses Lanczos's approximation.) double lnGamma(double z) { static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7}; if (z<0.5) { // Use reflection formula, as approximation doesn't work down here... return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z); } else { double x = coeff[0]; for (int i=1;i<9;i++) x += coeff[i]/(z+i-1); double t = z + 6.5; return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x); } } // Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values... double digamma(double z) { static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point. double ret = 0.0; while (z<highVal) { ret -= 1.0/z; z += 1.0; } double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz4 = iz2*iz2; double iz6 = iz4*iz2; ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0; return ret; } // Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically... double trigamma(double z) { static const double highVal = 8.0; double ret = 0.0; while (z<highVal) { ret += 1.0/(z*z); z += 1.0; } z -= 1.0; double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz3 = iz1*iz2; double iz5 = iz3*iz2; double iz7 = iz5*iz2; double iz9 = iz7*iz2; ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0; return ret; } #endif """ def lnGamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function""" code = start_cpp(gamma_code) + """ return_val = lnGamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def digamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function""" code = start_cpp(gamma_code) + """ return_val = digamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def trigamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function""" code = start_cpp(gamma_code) + """ return_val = trigamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) class TestFuncs(unittest.TestCase): """Test code for the assorted gamma-related functions.""" def test_compile(self): code = start_cpp(gamma_code) + """ """ weave.inline(code, support_code=gamma_code) def test_error_lngamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = lnGamma(z) good = gammaln(z) assert(math.fabs(own-good)<1e-12) def test_error_digamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = digamma(z) good = psi(z) assert(math.fabs(own-good)<1e-9) def test_error_trigamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = trigamma(z) good = polygamma(1,z) assert(math.fabs(own-good)<1e-9) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import inspect import hashlib def start_cpp(hash_str = None): """This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>""" frame = inspect.currentframe().f_back info = inspect.getframeinfo(frame) if hash_str==None: return '#line %i "%s"\n'%(info[1],info[0]) else: h = hashlib.md5() h.update(hash_str) hash_val = h.hexdigest() return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
Python
# -*- coding: utf-8 -*- # Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.). import cv import numpy as np def cv2array(im): """Converts a cv array to a numpy array.""" depth2dtype = { cv.IPL_DEPTH_8U: 'uint8', cv.IPL_DEPTH_8S: 'int8', cv.IPL_DEPTH_16U: 'uint16', cv.IPL_DEPTH_16S: 'int16', cv.IPL_DEPTH_32S: 'int32', cv.IPL_DEPTH_32F: 'float32', cv.IPL_DEPTH_64F: 'float64', } arrdtype=im.depth a = np.fromstring( im.tostring(), dtype=depth2dtype[im.depth], count=im.width*im.height*im.nChannels) a.shape = (im.height,im.width,im.nChannels) return a def array2cv(a): """Converts a numpy array to a cv array, if possible.""" dtype2depth = { 'uint8': cv.IPL_DEPTH_8U, 'int8': cv.IPL_DEPTH_8S, 'uint16': cv.IPL_DEPTH_16U, 'int16': cv.IPL_DEPTH_16S, 'int32': cv.IPL_DEPTH_32S, 'float32': cv.IPL_DEPTH_32F, 'float64': cv.IPL_DEPTH_64F, } try: nChannels = a.shape[2] except: nChannels = 1 cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]), dtype2depth[str(a.dtype)], nChannels) cv.SetData(cv_im, a.tostring(), a.dtype.itemsize*nChannels*a.shape[1]) return cv_im
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import multiprocessing as mp import multiprocessing.synchronize # To make sure we have all the functionality. import types import marshal import unittest def repeat(x): """A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant.""" while True: yield x def run_code(code,args): """Internal use function that does the work in each process.""" code = marshal.loads(code) func = types.FunctionType(code, globals(), '_') return func(*args) def mp_map(func, *iters, **keywords): """A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity.""" if 'pool' in keywords: pool = keywords['pool'] else: pool = mp.Pool() code = marshal.dumps(func.func_code) jobs = [] for args in zip(*iters): jobs.append(pool.apply_async(run_code,(code,args))) for i in xrange(len(jobs)): jobs[i] = jobs[i].get() return jobs class TestMpMap(unittest.TestCase): def test_simple1(self): data = ['a','b','c','d'] def noop(data): return data data_noop = mp_map(noop, data) self.assertEqual(data, data_noop) def test_simple2(self): data = [x for x in xrange(1000)] data_double = mp_map(lambda a: a*2, data) self.assertEqual(map(lambda a: a*2,data), data_double) def test_gen(self): def gen(): for i in xrange(100): yield i data_double = mp_map(lambda a: a*2, gen()) self.assertEqual(map(lambda a: a*2,gen()), data_double) def test_repeat(self): def mult(a,b): return a*b data = [x for x in xrange(50,5000,5)] data_triple = mp_map(mult, data, repeat(3)) self.assertEqual(map(lambda a: a*3,data),data_triple) def test_none(self): data = [] data_sqr = mp_map(lambda x: x*x, data) self.assertEqual([],data_sqr) if __name__ == '__main__': unittest.main()
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import os.path import tempfile import shutil from distutils.core import setup, Extension import distutils.ccompiler import distutils.dep_util try: __default_compiler = distutils.ccompiler.new_compiler() except: __default_compiler = None def make_mod(name, base, source, openCL = False): """Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place.""" if __default_compiler==None: raise Exception('No compiler!') # Work out the various file names - check if we actually need to do anything... if not isinstance(source, list): source = [source] source_path = map(lambda s: os.path.join(base, s), source) library_path = os.path.join(base, __default_compiler.shared_object_filename(name)) if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)): try: print 'b' # Backup the argv variable and create a temporary directory to do all work in... old_argv = sys.argv[:] temp_dir = tempfile.mkdtemp() # Prepare the extension... sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir] comp_path = filter(lambda s: not s.endswith('.h'), source_path) depends = filter(lambda s: s.endswith('.h'), source_path) if openCL: ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends) else: ext = Extension(name, comp_path, depends=depends) # Compile... setup(name=name, version='1.0.0', ext_modules=[ext]) finally: # Cleanup the argv variable and the temporary directory... sys.argv = old_argv shutil.rmtree(temp_dir, True)
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp from numpy_help_cpp import numpy_util_code # Provides various functions to assist with manipulating python objects from c++ code. python_obj_code = numpy_util_code + start_cpp() + """ #ifndef PYTHON_OBJ_CODE #define PYTHON_OBJ_CODE // Extracts a boolean from an object... bool GetObjectBoolean(PyObject * obj, const char * name) { PyObject * b = PyObject_GetAttrString(obj, name); bool ret = b!=Py_False; Py_DECREF(b); return ret; } // Extracts an int from an object... int GetObjectInt(PyObject * obj, const char * name) { PyObject * i = PyObject_GetAttrString(obj, name); int ret = PyInt_AsLong(i); Py_DECREF(i); return ret; } // Extracts a float from an object... float GetObjectFloat(PyObject * obj, const char * name) { PyObject * f = PyObject_GetAttrString(obj, name); float ret = PyFloat_AsDouble(f); Py_DECREF(f); return ret; } // Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored... unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); unsigned char * ret = new unsigned char[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i); Py_DECREF(nao); return ret; } // Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored... float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); float * ret = new float[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i); Py_DECREF(nao); return ret; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Defines helper functions for accessing numpy arrays... numpy_util_code = start_cpp() + """ #ifndef NUMPY_UTIL_CODE #define NUMPY_UTIL_CODE float & Float1D(PyArrayObject * arr, int index = 0) { return *(float*)(arr->data + index*arr->strides[0]); } float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } unsigned char & Byte1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index*arr->strides[0]); } unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } int & Int1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index*arr->strides[0]); } int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } #endif """
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cvarray import mp_map import prog_bar import numpy_help_cpp import python_obj_cpp import matrix_cpp import gamma_cpp import setProcName import start_cpp import make import doc_gen # Setup... doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.') doc.addFile('readme.txt', 'Overview') # Variables... doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.') doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.') doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++') doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++') # Functions... doc.addFunction(make.make_mod) doc.addFunction(cvarray.cv2array) doc.addFunction(cvarray.array2cv) doc.addFunction(mp_map.repeat) doc.addFunction(mp_map.mp_map) doc.addFunction(setProcName.setProcName) doc.addFunction(start_cpp.start_cpp) doc.addFunction(make.make_mod) # Classes... doc.addClass(prog_bar.ProgBar) doc.addClass(doc_gen.DocGen)
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from ctypes import * def setProcName(name): """Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases.""" # Call the process control function... libc = cdll.LoadLibrary('libc.so.6') libc.prctl(15, c_char_p(name), 0, 0, 0) # Update argv... charPP = POINTER(POINTER(c_char)) argv = charPP.in_dll(libc,'_dl_argv') size = libc.strlen(argv[0]) libc.strncpy(argv[0],c_char_p(name),size) if __name__=='__main__': # Quick test that it works... import os ps1 = 'ps' ps2 = 'ps -f' os.system(ps1) os.system(ps2) setProcName('wibble_wobble') os.system(ps1) os.system(ps2)
Python
#! /usr/bin/env python # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import dhdp from utils import doc_gen # Setup... doc = doc_gen.DocGen('dhdp', 'Dual Hierarchical Dirichlet Processes', 'Clustering topic model') doc.addFile('readme.txt', 'Overview') # Functions... doc.addFunction(dhdp.getAlgorithm) # Classes... doc.addClass(dhdp.PriorConcDP) doc.addClass(dhdp.Params) doc.addClass(dhdp.Document) doc.addClass(dhdp.Corpus) doc.addClass(dhdp.DocSample) doc.addClass(dhdp.Sample) doc.addClass(dhdp.Model) doc.addClass(dhdp.DocModel)
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import numpy class Document: """Representation of a document as used by the system. Consists of a list of words - each is referenced by a natural number and is associated with a count of how many of that particular word exist in the document.""" def __init__(self, dic): """Constructs a document given a dictionary (Or equivalent) dic[ident] = count, where ident is the natural number that indicates which word and count is how many times that word exists in the document. Excluded entries are effectivly assumed to have a count of zero. Note that the solver will construct an array 0..{max word ident} and assume all words in that range exist, going so far as smoothing in words that are never actually seen.""" # Create data store... self.words = numpy.empty((len(dic),2), dtype=numpy.uint) # Copy in the data... index = 0 self.sampleCount = 0 # Total number of words is sometimes useful - stored to save computation. for key, value in dic.iteritems(): self.words[index,0] = key self.words[index,1] = value self.sampleCount += value index += 1 assert(index==self.words.shape[0]) # Sorts the data - experiance shows this is not actually needed as iteritems kicks out integers sorted, but as that is not part of the spec (As I know it.) this can not be assumed, and so this step is required, incase it ever changes (Or indeed another type that pretends to be a dictionary is passed in.)... self.words = self.words[self.words[:,0].argsort(),:] # Ident for the document, stored in here for conveniance. Only assigned when the document is stuffed into a Corpus... self.ident = None def getDic(self): """Returns a dictionary object that represents the document, basically a recreated version of the dictionary handed in to the constructor.""" ret = dict() for i in xrange(self.words.shape[0]): ret[self.words[i,0]] = self.words[i,1] return ret def getIdent(self): """Ident - just the offset into the array in the corpus where this document is stored, or None if its yet to be stored anywhere.""" return self.ident def getSampleCount(self): """Returns the number of samples in the document, which is equivalent to the number of words, counting duplicates.""" return self.sampleCount def getWordCount(self): """Returns the number of unique words in the document, i.e. not counting duplicates.""" return self.words.shape[0] def getWord(self, index): """Given an index 0..getWordCount()-1 this returns the tuple (ident,count) for that word.""" return (self.words[index,0],self.words[index,1])
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import math import random import numpy import numpy.random import scipy.special from gcp import gcp class DPGMM: """A Dirichlet process Gaussian mixture model, implimented using the mean-field variational method, with the stick tying rather than capping method such that incrimental usage works. For those unfamiliar with Dirichlet processes the key thing to realise is that each stick corresponds to a mixture component for density estimation or a cluster for clustering, so the stick cap is the maximum number of these entities (Though it can choose to use less sticks than supplied.). As each stick has a computational cost standard practise is to start with a low number of sticks and then increase the count, updating the model each time, until no improvement to the model is obtained with further sticks. Note that because the model is fully Bayesian the output is in fact a probability distribution over the probability distribution from which the data is drawn, i.e. instead of the point estimate of a Gaussian mixture model you get back a probability distribution from which you can draw a Gaussian mixture model, though shortcut methods are provided to get the probability/component membership probability of features. Regarding the model it actually has an infinite number of sticks, but as computers don't have an infinite amount of memory or computation the sticks count is capped. The infinite number of sticks past the cap are still modeled in a limited manor, such that you can get the probability in clustering of a sample/feature belonging to an unknown cluster (i.e. one of the infinite number past the stick cap.). It also means that there is no explicit cluster count, as it is modelling a probability distribution over the number of clusters - if you need a cluster count the best choice is to threshold on the component weights, as returned by sampleMixture(...) or intMixture(...), i.e. keep only thise that are higher than a percentage of the highest weight.""" def __init__(self, dims, stickCap = 1): """You initialise with the number of dimensions and the cap on the number of sticks to have. Note that the stick cap should be high enough for it to represent enough components, but not so high that you run out of memory. The better option is to set the stick cap to 1 (The default) and use the solve grow methods, which in effect find the right number of sticks. Alternativly if only given one parameter of the same type it acts as a copy constructor.""" if isinstance(dims, DPGMM): self.dims = dims.dims self.stickCap = dims.stickCap self.data = map(lambda x: x.copy(), dims.data) self.prior = gcp.GaussianPrior(dims.prior) self.priorT = gcp.StudentT(dims.priorT) if dims.priorT!=None else None self.n = map(lambda x: gcp.GaussianPrior(x), dims.n) self.beta = dims.beta.copy() self.alpha = dims.alpha.copy() self.v = dims.v.copy() self.z = None if dims.z==None else dims.z.copy() self.skip = dims.skip self.epsilon = dims.epsilon self.nT = map(lambda x: None if x==None else gcp.StudentT(x), dims.nT) self.vExpLog = dims.vExpLog.copy() self.vExpNegLog = dims.vExpNegLog.copy() else: self.dims = dims self.stickCap = stickCap self.data = [] # A list containing data matrices - used to collate all the samples ready for processing. Before processing they are all appended into a single data matrix, such that this list is of length 1. self.prior = gcp.GaussianPrior(self.dims) # The prior over the mixture components. self.priorT = None self.n = map(lambda _: gcp.GaussianPrior(self.dims), xrange(self.stickCap)) # The mixture components, one for each stick. self.beta = numpy.ones(2, dtype=numpy.float32) # The two parameters (Typically named alpha & beta) for the Gamma distribution prior over alpha. self.alpha = numpy.ones(2, dtype=numpy.float32) # The parameters for the Gamma distribution that represents the current distribution over alpha - basically beta updated with the current stick configuration. self.v = numpy.ones((self.stickCap,2), dtype=numpy.float32) # Each [i,:] of this array represents the two parameters of the beta distribution over the strick breaking weight for the relevant mixture component. self.z = None # The matrix of multinomials over stick-assignment for each sample, aligned with the data matrix. In the case of incrimental use will not necessarily be complete. self.skip = 0 # Number of samples at the start of the data matrix to not bother updating - useful to speed things up with incrimental learning. self.epsilon = 1e-4 # Amount of change below which it stops iterating. # The cache of stuff kept around for speed... self.nT = [None]*self.stickCap # The student T distribution associated with each Gaussian. self.vExpLog = numpy.empty(self.stickCap, dtype=numpy.float32) # The expected value of the logorithm of each v. self.vExpNegLog = numpy.empty(self.stickCap, dtype=numpy.float32) # The expected value of the logarithm of 1 minus each v. self.vExpLog[:] = -1.0 # Need these to always be correct - this matches initialisation of v. self.vExpNegLog[:] = -1.0 # As above. def incStickCap(self, inc = 1): """Increases the stick cap by the given number of entrys. Can be used in collaboration with nllData to increase the number of sticks until insufficient improvement, indicating the right number has been found.""" self.stickCap += inc self.n += map(lambda _: gcp.GaussianPrior(self.dims), xrange(inc)) self.v = numpy.append(self.v,numpy.ones((inc,2), dtype=numpy.float32),0) if self.z!=None: self.z = numpy.append(self.z, numpy.random.mtrand.dirichlet(32.0*numpy.ones(inc), size=self.z.shape[0]), 1) weight = numpy.random.mtrand.dirichlet(numpy.ones(2), size=self.z.shape[0]) self.z[:,:self.stickCap-inc] *= weight[:,0].reshape((self.z.shape[0],1)) self.z[:,self.stickCap-inc:] *= weight[:,1].reshape((self.z.shape[0],1)) self.nT += [None] * inc self.vExpLog = numpy.append(self.vExpLog,-1.0*numpy.ones(inc, dtype=numpy.float32)) self.vExpNegLog = numpy.append(self.vExpNegLog,-1.0*numpy.ones(inc, dtype=numpy.float32)) def getStickCap(self): """Returns the current stick cap.""" return self.stickCap def setPrior(self, mean = None, covar = None, weight = None, scale = 1.0): """Sets a prior for the mixture components - basically a pass through for the addPrior method of the GaussianPrior class. If None (The default) is provided for the mean or the covar then it calculates these values for the currently contained sample set and uses them. Note that the prior defaults to nothing - this must be called before fitting the model, and if mean/covar are not provided then there must be enough data points to avoid problems. weight defaults to the number of dimensions if not specified. If covar is not given then scale is a multiplier for the covariance matrix - setting it high will soften the prior up and make it consider softer solutions when given less data. Returns True on success, False on failure - failure can happen if there is not enough data contained for automatic calculation (Think singular covariance matrix). This must be called before any solve methods are called.""" # Handle mean/covar being None... if mean==None or covar==None: inc = gcp.GaussianInc(self.dims) dm = self.getDM() for i in xrange(dm.shape[0]): inc.add(dm[i,:]) ggd = inc.fetch() if mean==None: mean = ggd.getMean() if covar==None: covar = ggd.getCovariance() * scale if numpy.linalg.det(covar)<1e-12: return False # Update the prior... self.prior.reset() self.prior.addPrior(mean, covar, weight) self.priorT = self.prior.intProb() return True def setConcGamma(self, alpha, beta): """Sets the parameters for the Gamma prior over the concentration. Note that whilst alpha and beta are used for the parameter names, in accordance with standard terminology for Gamma distributions, they are not related to the model variable names. Default values are (1,1). The concentration parameter controls how much information the model requires to justify using a stick, such that lower numbers result in fewer sticks, higher numbers in larger numbers of sticks. The concentration parameter is learnt from the data, under the Gamma distribution prior set with this method, but this prior can still have a strong effect. If your data is not producing as many clusters as you expect then adjust this parameter accordingly, (e.g. increase alpha or decrease beta, or both.), but don't go too high or it will start hallucinating patterns where none exist!""" self.beta[0] = alpha self.beta[1] = beta def setThreshold(self, epsilon): """Sets the threshold for parameter change below which it considers it to have converged, and stops iterating.""" self.epsilon = epsilon def add(self, sample): """Adds either a single sample or several samples - either give a single sample as a 1D array or a 2D array as a data matrix, where each sample is [i,:]. (Sample = feature. I refer to them as samples as that more effectivly matches the concept of this modeling the probability distribution from which the features are drawn.)""" sample = numpy.asarray(sample, dtype=numpy.float32) if len(sample.shape)==1: self.data.append(numpy.reshape(sample, (1,self.dims))) else: assert(len(sample.shape)==2) assert(sampler.shape[1]==self.dims) self.data.append(sample) def getDM(self): """Returns a data matrix containing all the samples that have been added.""" if len(self.data)==1: return self.data[0] if len(self.data)==0: return None self.data = [numpy.vstack(self.data)] return self.data[0] def size(self): """Returns the number of samples that have been added.""" dm = self.getDM() if dm!=None: return dm.shape[0] else: return 0 def lock(self, num=0): """Prevents the algorithm updating the component weighting for the first num samples in the database - potentially useful for incrimental learning if in a rush. If set to 0, the default, everything is updated.""" self.skip = num def solve(self, iterCap=None): """Iterates updating the parameters until the model has converged. Note that the system is designed such that you can converge, add more samples, and converge again, i.e. incrimental learning. Alternativly you can converge, add more sticks, and then convegre again without issue, which makes finding the correct number of sticks computationally reasonable.. Returns the number of iterations required to acheive convergance. You can optionally provide a cap on the number of iterations it will perform.""" # Deal with the z array being incomplete - enlarge/create as needed. Random initialisation is used... dm = self.getDM() if self.z==None or self.z.shape[0]<dm.shape[0]: newZ = numpy.empty((dm.shape[0],self.stickCap), dtype=numpy.float32) if self.z==None: offset = 0 else: offset = self.z.shape[0] newZ[:offset,:] = self.z self.z = newZ self.z[offset:,:] = numpy.random.mtrand.dirichlet(32.0*numpy.ones(self.stickCap), size=self.z.shape[0]-offset) # 32 is to avoid extreme values, which can lock it in place, without the distribution being too flat as to cause problems converging. # Iterate until convergance... prev = self.z.copy() iters = 0 while True: iters += 1 # Update the concentration parameter... self.alpha[0] = self.beta[0] + self.stickCap self.alpha[1] = self.beta[1] - self.vExpNegLog.sum() # Record the expected values of a stick given the prior alone - needed to normalise the z values... expLogStick = -scipy.special.psi(1.0 + self.alpha[0]/self.alpha[1]) expNegLogStick = expLogStick expLogStick += scipy.special.psi(1.0) expNegLogStick += scipy.special.psi(self.alpha[0]/self.alpha[1]) # Update the stick breaking weights... self.v[:,0] = 1.0 self.v[:,1] = self.alpha[0]/self.alpha[1] sums = self.z.sum(axis=0) self.v[:,0] += sums self.v[:,1] += self.z.shape[0] self.v[:,1] -= numpy.cumsum(sums) # Calculate the log expectations on the stick breaking weights... self.vExpLog[:] = -scipy.special.psi(self.v.sum(axis=1)) self.vExpNegLog[:] = self.vExpLog self.vExpLog[:] += scipy.special.psi(self.v[:,0]) self.vExpNegLog[:] += scipy.special.psi(self.v[:,1]) # Update the Gaussian conjugate priors, extracting the student-t distributions as well... for k in xrange(self.stickCap): self.n[k].reset() self.n[k].addGP(self.prior) self.n[k].addSamples(dm, self.z[:,k]) self.nT[k] = self.n[k].intProb() # Update the z values... prev[self.skip:,:] = self.z[self.skip:,:] vExpNegLogCum = self.vExpNegLog.cumsum() base = self.vExpLog.copy() base[1:] += vExpNegLogCum[:-1] self.z[self.skip:,:] = numpy.exp(base).reshape((1,self.stickCap)) for k in xrange(self.stickCap): self.z[self.skip:,k] *= self.nT[k].batchProb(dm[self.skip:,:]) norm = self.priorT.batchProb(dm[self.skip:,:]) norm *= math.exp(expLogStick + vExpNegLogCum[-1]) / (1.0 - math.exp(expNegLogStick)) self.z[self.skip:,:] /= (self.z[self.skip:,:].sum(axis=1) + norm).reshape((self.z.shape[0]-self.skip,1)) # Check for convergance... change = numpy.abs(prev[self.skip:,:]-self.z[self.skip:,:]).sum(axis=1).max() if change<self.epsilon: break if iters==iterCap: break # Return the number of iterations that were required to acheive convergance... return iters def solveGrow(self, iterCap=None): """This method works by solving for the current stick cap, and then it keeps increasing the stick cap until there is no longer an improvement in the model quality. If using this method you should probably initialise with a stick cap of 1. By using this method the stick cap parameter is lost, and you no longer have to guess what a good value is.""" it = 0 prev = None while True: it += self.solve(iterCap) value = self.nllData() if prev==None or value<prev: prev = value self.incStickCap() else: return it def sampleMixture(self): """Once solve has been called and a distribution over models determined this allows you to draw a specific model. Returns a 2-tuple, where the first entry is an array of weights and the second entry a list of Gaussian distributions - they line up, to give a specific Gaussian mixture model. For density estimation the probability of a specific point is then the sum of each weight multiplied by the probability of it comming from the associated Gaussian. For clustering the probability of a specific point belonging to a cluster is the weight multiplied by the probability of it comming from a specific Gaussian, normalised for all clusters. Note that this includes an additional term to cover the infinite number of terms that follow, which is really an approximation, but tends to be such a small amount as to not matter. Be warned that if doing clustering a point could be assigned to this 'null cluster', indicating that the model thinks the point belongs to an unknown cluster (i.e. one that it doesn't have enough information, or possibly sticks, to instanciate.).""" weight = numpy.empty(self.stickCap+1, dtype=numpy.float32) stick = 1.0 for i in xrange(self.stickCap): val = random.betavariate(self.v[i,0], self.v[i,1]) weight[i] = stick * val stick *= 1.0 - val weight[-1] = stick gauss = map(lambda x: x.sample(), self.n) gauss.append(self.prior.sample()) return (weight,gauss) def intMixture(self): """Returns the details needed to calculate the probability of a point given the model (density estimation), or its probability of belonging to each stick (clustering), but with the actual draw of a mixture model from the model integrated out. It is an apprximation, though not a bad one. Basically you get a 2-tuple - the first entry is an array of weights, the second a list of student-t distributions. The weights and distributions align, such that for density estimation the probability for a point is the sum over all entrys of the weight multiplied by the probability of the sample comming from the student-t distribution. The prob method of this class calculates the use of this for a sample directly. For clustering the probability of belonging to each cluster is calculated as the weight multiplied by the probability of comming from the associated student-t, noting that you will need to normalise. stickProb allows you to get this assesment directly. Do not edit the returned value; also, it will not persist if solve is called again. This must only be called after solve is called at least once. Note that an extra element is included to cover the remainder of the infinite number of elements - be warned that a sample could have the highest probability of belonging to this dummy element, indicating that it probably belongs to something for which there is not enough data to infer a reasonable model.""" weights = numpy.empty(self.stickCap+1, dtype=numpy.float32) stick = 1.0 for i in xrange(self.stickCap): ev = self.v[i,0] / self.v[i,:].sum() weights[i] = stick * ev stick *= 1.0 - ev weights[-1] = stick return (weights, self.nT + [self.priorT]) def prob(self, x): """Given a sample this returns its probability, with the actual draw from the model integrated out. Must not be called until after solve has been called. This is the density estimate if using this model for density estimation. Will also accept a data matrix, in which case it will return a 1D array of probabilities aligning with the input data matrix.""" x = numpy.asarray(x) if len(x.shape)==1: ret = 0.0 stick = 1.0 for i in xrange(self.stickCap): bp = self.nT[i].prob(x) ev = self.v[i,0] / self.v[i,:].sum() ret += bp * stick * ev stick *= 1.0 - ev bp = self.priorT.prob(x) ret += bp * stick return ret else: ret = numpy.zeros(x.shape[0]) stick = 1.0 for i in xrange(self.stickCap): bp = self.nT[i].batchProb(x) ev = self.v[i,0] / self.v[i,:].sum() ret += bp * stick * ev stick *= 1.0 - ev bp = self.priorT.batchProb(x) ret += bp * stick return ret def stickProb(self, x): """Given a sample this returns its probability of belonging to each of the components, as a 1D array, including a dummy element at the end to cover the infinite number of sticks not being explicitly modeled. This is the probability of belonging to each cluster if using the model for clustering. Must not be called until after solve has been called. Will also accept a data matrix, in which case it will return a matrix with a row for each vector in the input data matrix.""" x = numpy.asarray(x) if len(x.shape)==1: ret = numpy.empty(self.stickCap+1, dtype=numpy.float32) stick = 1.0 for i in xrange(self.stickCap): bp = self.nT[i].prob(x) ev = self.v[i,0] / self.v[i,:].sum() ret[i] = bp * stick * ev stick *= 1.0 - ev bp = self.priorT.prob(x) ret[self.stickCap] = bp * stick ret /= ret.sum() return ret else: ret = numpy.empty((x.shape[0],self.stickCap+1), dtype=numpy.float32) stick = 1.0 for i in xrange(self.stickCap): bp = self.nT[i].batchProb(x) ev = self.v[i,0] / self.v[i,:].sum() ret[:,i] = bp * stick * ev stick *= 1.0 - ev bp = self.priorT.batchProb(x) ret[:,self.stickCap] = bp * stick ret /= ret.sum(axis=1).reshape((-1,1)) return ret def reset(self, alphaParam = True, vParam = True, zParam = True): """Allows you to reset the parameters associated with each variable - good for doing a restart if your doing multiple restarts, or if you suspect it has got stuck in a local minima whilst doing incrimental stuff.""" if alphaParam: self.alpha[:] = 1.0 if vParam: self.v[:,:] = 1.0 self.vExpLog[:] = -1.0 self.vExpNegLog[:] = -1.0 if zParam: self.z = None def nllData(self): """Returns the negative log likelihood of the data given the current distribution over models, with the model integrated out - good for comparing multiple restarts/different numbers of sticks to find which is the best.""" dm = self.getDM() model = self.intMixture() probs = numpy.empty((dm.shape[0],model[0].shape[0]), dtype=numpy.float32) for i, st in enumerate(model[1]): probs[:,i] = st.batchLogProb(dm) + math.log(model[0][i]) offsets = probs.max(axis=1) probs -= offsets.reshape((-1,1)) ret = offsets.sum() ret += numpy.log(numpy.exp(probs).sum(axis=1)).sum() return -ret def multiSolve(self, runs, testIter=256): """Clones this object a number of times, given by the runs parameter, and then runs each for the testIters parameter number of iterations, to give them time to converge a bit. It then selects the one with the best nllData() score and runs that to convergance, before returning that specific clone. This is basically a simple way of avoiding getting stuck in a really bad local minima, though chances are you will end up in another one, just not a terrible one. Obviously testIter limits the effectivness of this, but as it tends to converge faster if your closer to the correct answer hopefully not by much. (To be honest I have not found this method to be of much use - in testing when this techneque converges to the wrong answer it does so consistantly, indicating that there is insufficient data regardless of initialisation.)""" best = None bestNLL = None for _ in xrange(runs): clone = DPGMM(self) clone.solve(testIter) score = clone.nllData() if bestNLL==None or score<bestNLL: best = clone bestNLL = score best.solve() return best def multiGrowSolve(self, runs): """This effectivly does multiple calls of growSolve, as indicated by runs, and returns a clone of this object that has converged to the best solution found. This is without a doubt the best solving techneque provided by this method, just remember to use the default stickCap of 1 when setting up the object. Also be warned - this can take an aweful long time to produce its awesomness. Can return self, if the starting number of sticks is the best (Note that self will always be converged after this method returns.).""" self.solve() best = self bestNLL = self.nllData() for _ in xrange(runs): current = self lastScore = None while True: current = DPGMM(current) current.incStickCap() current.solve() score = current.nllData() if score<bestNLL: best = current bestNLL = score if lastScore!=None and score>lastScore: break lastScore = score return best
Python
# -*- coding: utf-8 -*- # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import math import numpy import numpy.linalg import numpy.random from wishart import Wishart from gaussian import Gaussian from student_t import StudentT class GaussianPrior: """The conjugate prior for the multivariate Gaussian distribution. Maintains the 4 values and supports various operations of interest - initialisation of prior, Bayesian update, drawing a Gaussian and calculating the probability of a data point comming from a Gaussian drawn from the distribution. Not a particularly efficient implimentation, and it has no numerical protection against extremelly large data sets. Interface is not entirly orthogonal, due to the demands of real world usage.""" def __init__(self, dims): """Initialises with everything zeroed out, such that a prior must added before anything interesting is done. Supports cloning.""" if isinstance(dims, GaussianPrior): self.invShape = dims.invShape.copy() self.shape = dims.shape.copy() if dims.shape!=None else None self.mu = dims.mu.copy() self.n = dims.n self.k = dims.k else: self.invShape = numpy.zeros((dims,dims), dtype=numpy.float32) # The inverse of lambda in the equations. self.shape = None # Cached value - inverse is considered primary. self.mu = numpy.zeros(dims, dtype=numpy.float32) self.n = 0.0 self.k = 0.0 def reset(self): """Resets as though there is no data, other than the dimensions of course.""" self.invShape[:] = 0.0 self.shape = None self.mu[:] = 0.0 self.n = 0.0 self.k = 0.0 def addPrior(self, mean, covariance, weight = None): """Adds a prior to the structure, as an estimate of the mean and covariance matrix, with a weight which can be interpreted as how many samples that estimate is worth. Note the use of 'add' - you can call this after adding actual samples, or repeatedly. If weight is omitted it defaults to the number of dimensions, as the total weight in the system must match or excede this value before draws etc can be done.""" if weight==None: weight = float(self.mu.shape[0]) delta = mean - self.mu self.invShape += weight * covariance # *weight converts to a scatter matrix. self.invShape += ((self.k*weight)/(self.k+weight)) * numpy.outer(delta,delta) self.shape = None self.mu += (weight/(self.k+weight)) * delta self.n += weight self.k += weight def addSample(self, sample, weight=1.0): """Updates the prior given a single sample drawn from the Gaussian being estimated. Can have a weight provided, in which case it will be equivalent to repetition of that data point, where the repetition count can be fractional.""" sample = numpy.asarray(sample, dtype=numpy.float32) if len(sample.shape)==0: sample.shape = (1,) delta = sample - self.mu self.invShape += (weight*self.k/(self.k+weight)) * numpy.outer(delta,delta) self.shape = None self.mu += delta * (weight / (self.k+weight)) self.n += weight self.k += weight def remSample(self, sample): """Does the inverse of addSample, to in effect remove a previously added sample. Note that the issues of floating point (in-)accuracy mean its not perfect, and removing all samples is bad if there is no prior. Does not support weighting - effectvily removes a sample of weight 1.""" sample = numpy.asarray(sample, dtype=numpy.float32) if len(sample.shape)==0: sample.shape = (1,) delta = sample - self.mu self.k -= 1.0 self.n -= 1.0 self.mu -= delta / self.k self.invShape -= ((self.k+1.0)/self.k) * numpy.outer(delta,delta) self.shape = None def addSamples(self, samples, weight = None): """Updates the prior given multiple samples drawn from the Gaussian being estimated. Expects a data matrix ([sample, position in sample]), or an object that numpy.asarray will interpret as such. Note that if you have only a few samples it might be faster to repeatedly call addSample, as this is designed to be efficient for hundreds+ of samples. You can optionally weight the samples, by providing an array to the weight parameter.""" samples = numpy.asarray(samples, dtype=numpy.float32) # Calculate the mean and scatter matrices... if weight==None: # Unweighted samples... # Calculate the mean and scatter matrix... d = self.mu.shape[0] num = samples.shape[0] mean = numpy.average(samples, axis=0) scatter = numpy.tensordot(delta, delta, ([0],[0])) else: # Weighted samples... # Calculate the mean and scatter matrix... d = self.mu.shape[0] num = weight.sum() mean = numpy.average(samples, axis=0, weights=weight) delta = samples - mean.reshape((1,-1)) scatter = numpy.tensordot(weight.reshape((-1,1))*delta, delta, ([0],[0])) # Update parameters... delta = mean-self.mu self.invShape += scatter self.invShape += ((self.k*num)/(self.k+num)) * numpy.outer(delta,delta) self.shape = None self.mu += (num/(self.k+num)) * delta self.n += num self.k += num def addGP(self, gp): """Adds another Gaussian prior, combining the two.""" delta = gp.mu - self.mu self.invShape += gp.invShape self.invShape += ((gp.k*self.k)/(gp.k+self.k)) * numpy.outer(delta,delta) self.shape = None self.mu += (gp.k/(self.k+gp.k)) * delta self.n += gp.n self.k += gp.k def make_safe(self): """Checks for a singular inverse shape matrix - if singular replaces it with the identity. Also makes sure n and k are not less than the number of dimensions, clamping them if need be. obviously the result of this is quite arbitary, but its better than getting a crash from bad data.""" dims = self.mu.shape[0] det = math.fabs(numpy.linalg.det(self.invShape)) if det<1e-3: self.invShape = numpy.identity(dims, dtype=numpy.float32) if self.n<dims: self.n = dims if self.k<1e-3: self.k = 1e-3 def reweight(self, newN = None, newK = None): """A slightly cheaky method that reweights the gp such that it has the new values of n and k, effectivly adjusting the relevant weightings of the samples - can be useful for generating a prior for some GPs using the data stored in those GPs. If a new k is not provided it is set to n; if a new n is not provided it is set to the number of dimensions.""" if newN==None: newN = float(self.mu.shape[0]) if newK==None: newK = newN self.invShape *= newN / self.n self.shape = None self.n = newN self.k = newK def getN(self): """Returns n.""" return self.n def getK(self): """Returns k.""" return self.k def getMu(self): """Returns mu.""" return self.mu def getLambda(self): """Returns lambda.""" if self.shape==None: self.shape = numpy.linalg.inv(self.invShape) return self.shape def getInverseLambda(self): """Returns the inverse of lambda.""" return self.invShape def safe(self): """Returns true if it is possible to sample the prior, work out the probability of samples or work out the probability of samples being drawn from a collapsed sample - basically a test that there is enough information.""" return self.n>=self.mu.shape[0] and self.k>0.0 def prob(self, gauss): """Returns the probability of drawing the provided Gaussian from this prior.""" d = self.mu.shape[0] wishart = Wishart(d) gaussian = Gaussian(d) wishart.setDof(self.n) wishart.setScale(self.getLambda()) gaussian.setMean(self.mu) gaussian.setPrecision(self.k*gauss.getPrecision()) return wishart.prob(gauss.getPrecision()) * gaussian.prob(gauss.getMean()) def intProb(self): """Returns a multivariate student-t distribution object that gives the probability of drawing a sample from a Gaussian drawn from this prior, with the Gaussian integrated out. You may then call the prob method of this object on each sample obtained.""" d = self.mu.shape[0] st = StudentT(d) dof = self.n-d+1.0 st.setDOF(dof) st.setLoc(self.mu) mult = self.k*dof / (self.k+1.0) st.setInvScale(mult * self.getLambda()) return st def sample(self): """Returns a Gaussian, drawn from this prior.""" d = self.mu.shape[0] wishart = Wishart(d) gaussian = Gaussian(d) ret = Gaussian(d) wishart.setDof(self.n) wishart.setScale(self.getLambda()) ret.setPrecision(wishart.sample()) gaussian.setPrecision(self.k*ret.getPrecision()) gaussian.setMean(self.mu) ret.setMean(gaussian.sample()) return ret def __str__(self): return '{n:%f, k:%f, mu:%s, lambda:%s}'%(self.n, self.k, str(self.mu), str(self.getLambda()))
Python
# -*- coding: utf-8 -*- # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import math import numpy import numpy.linalg import scipy.special class StudentT: """A feature incomplete multivariate student-t distribution object - at this time it only supports calculating the probability of a sample, and not the ability to make a draw.""" def __init__(self, dims): """dims is the number of dimensions - initalises it to default values with the degrees of freedom set to 1, the location as the zero vector and the identity matrix for the scale. Suports copy construction.""" if isinstance(dims, StudentT): self.dof = dims.dof self.loc = dims.loc.copy() self.scale = dims.scale.copy() if dims.scale!=None else None self.invScale = dims.invScale.copy() if dims.invScale!=None else None self.norm = dims.norm.copy() if dims.norm!=None else None else: self.dof = 1.0 self.loc = numpy.zeros(dims, dtype=numpy.float32) self.scale = numpy.identity(dims, dtype=numpy.float32) self.invScale = None self.norm = None # Actually the log of the normalising constant. def setDOF(self, dof): """Sets the degrees of freedom.""" self.dof = dof self.norm = None def setLoc(self, loc): """Sets the location vector.""" l = numpy.array(loc, dtype=numpy.float32) assert(l.shape==self.loc.shape) self.loc = l def setScale(self, scale): """Sets the scale matrix.""" s = numpy.array(scale, dtype=numpy.float32) assert(s.shape==(self.loc.shape[0],self.loc.shape[0])) self.scale = s self.invScale = None self.norm = None def setInvScale(self, invScale): """Sets the scale matrix by providing its inverse.""" i = numpy.array(invScale, dtype=numpy.float32) assert(i.shape==(self.loc.shape[0],self.loc.shape[0])) self.scale = None self.invScale = i self.norm = None def getDOF(self): """Returns the degrees of freedom.""" return self.dof def getLoc(self): """Returns the location vector.""" return self.loc def getScale(self): """Returns the scale matrix.""" if self.scale==None: self.scale = numpy.linalg.inv(self.invScale) return self.scale def getInvScale(self): """Returns the inverse of the scale matrix.""" if self.invScale==None: self.invScale = numpy.linalg.inv(self.scale) return self.invScale def getLogNorm(self): """Returns the logarithm of the normalising constant of the distribution. Typically for internal use only.""" if self.norm==None: d = self.loc.shape[0] self.norm = scipy.special.gammaln(0.5*(self.dof+d)) self.norm -= scipy.special.gammaln(0.5*self.dof) self.norm -= math.log(self.dof*math.pi)*(0.5*d) self.norm += 0.5*math.log(numpy.linalg.det(self.getInvScale())) return self.norm def prob(self, x): """Given a vector x evaluates the density function at that point.""" x = numpy.asarray(x) d = self.loc.shape[0] delta = x - self.loc val = numpy.dot(delta,numpy.dot(self.getInvScale(),delta)) val = 1.0 + val/self.dof return math.exp(self.getLogNorm() + math.log(val)*(-0.5*(self.dof+d))) def logProb(self, x): """Returns the logarithm of prob - faster than a straight call to prob.""" x = numpy.asarray(x) d = self.loc.shape[0] delta = x - self.loc val = numpy.dot(delta,numpy.dot(self.getInvScale(),delta)) val = 1.0 + val/self.dof return self.getLogNorm() + math.log(val)*(-0.5*(self.dof+d)) def batchProb(self, dm): """Given a data matrix evaluates the density function for each entry and returns the resulting array of probabilities.""" d = self.loc.shape[0] delta = dm - self.loc.reshape((1,d)) if hasattr(numpy, 'einsum'): # Can go away when scipy older than 1.6 is no longer in use. val = numpy.einsum('kj,ij,ik->i', self.getInvScale(), delta, delta) else: val = ((self.getInvScale().reshape(1,d,d) * delta.reshape(dm.shape[0],1,d)).sum(axis=2) * delta).sum(axis=1) val = 1.0 + val/self.dof return numpy.exp(self.getLogNorm() + numpy.log(val)*(-0.5*(self.dof+d))) def batchLogProb(self, dm): """Same as batchProb, but returns the logarithm of the probability instead.""" d = self.loc.shape[0] delta = dm - self.loc.reshape((1,d)) if hasattr(numpy, 'einsum'): # Can go away when scipy older than 1.6 is no longer in use. val = numpy.einsum('kj,ij,ik->i', self.getInvScale(), delta, delta) else: val = ((self.getInvScale().reshape(1,d,d) * delta.reshape(dm.shape[0],1,d)).sum(axis=2) * delta).sum(axis=1) val = 1.0 + val/self.dof return self.getLogNorm() + numpy.log(val)*(-0.5*(self.dof+d)) def __str__(self): return '{dof:%f, location:%s, scale:%s}'%(self.getDOF(), str(self.getLoc()), str(self.getScale()))
Python
# -*- coding: utf-8 -*- # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import math import random import numpy import numpy.linalg import numpy.random import scipy.special class Wishart: """Simple Wishart distribution class, quite basic really, but has caching to avoid duplicate computation.""" def __init__(self, dims): """dims is the number of dimensions - it initialises with the dof set to 1 and the scale set to the identity matrix. Has copy constructor support.""" if isinstance(dims, Wishart): self.dof = dims.dof self.scale = dims.scale.copy() self.invScale = dims.invScale.copy() if dims.invScale!=None else None self.norm = dims.norm self.cholesky = dims.cholesky.copy() if dims.cholesky!=None else None else: self.dof = 1.0 self.scale = numpy.identity(dims, dtype=numpy.float32) self.invScale = None self.norm = None self.cholesky = None def setDof(self, dof): """Sets the degrees of freedom of the distribution.""" self.dof = dof self.norm = None def setScale(self, scale): """Sets the scale matrix, must be symmetric positive definite""" ns = numpy.array(scale, dtype=numpy.float32) assert(ns.shape==self.scale.shape) self.scale = ns self.invScale = None self.norm = None self.cholesky = None def getDof(self): """Returns the degrees of freedom.""" return self.dof def getScale(self): """Returns the scale matrix.""" return self.scale def getInvScale(self): """Returns the inverse of the scale matrix.""" if self.invScale==None: self.invScale = numpy.linalg.inv(self.scale) return self.invScale def getNorm(self): """Returns the normalising constant of the distribution, typically not used by users.""" if self.norm==None: d = self.scale.shape[0] self.norm = math.pow(2.0,-0.5*self.dof*d) self.norm *= math.pow(numpy.linalg.det(self.scale),-0.5*self.dof) self.norm *= math.pow(math.pi,-0.25*d*(d-1)) for i in xrange(d): self.norm /= scipy.special.gamma(0.5*(n-i)) return self.norm def prob(self, mat): """Returns the probability of the provided matrix, which must be the same shape as the scale matrix and also symmetric and positive definite.""" d = self.scale.shape[0] val = math.pow(numpy.linalg.det(mat),0.5*(n-1-d)) val *= math.exp(-0.5 * numpy.linalg.trace(numpy.dot(mat,self.getInvScale()))) return self.getNorm() * val def sample(self): """Returns a draw from the distribution - will be a symmetric positive definite matrix.""" if self.cholesky==None: self.cholesky = numpy.linalg.cholesky(self.scale) d = self.scale.shape[0] a = numpy.zeros((d,d),dtype=numpy.float32) for r in xrange(d): if r!=0: a[r,:r] = numpy.random.normal(size=(r,)) a[r,r] = math.sqrt(random.gammavariate(0.5*(self.dof-d+1),2.0)) return numpy.dot(numpy.dot(numpy.dot(self.cholesky,a),a.T),self.cholesky.T) def __str__(self): return '{dof:%f, scale:%s}'%(self.dof, str(self.scale))
Python
# -*- coding: utf-8 -*- # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. from gaussian import Gaussian from gaussian_inc import GaussianInc from wishart import Wishart from student_t import StudentT from gaussian_prior import GaussianPrior
Python
# -*- coding: utf-8 -*- # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import numpy import numpy.linalg from gaussian import Gaussian class GaussianInc: """Allows you to incrimentally calculate a Gaussian distribution by providing lots of samples.""" def __init__(self, dims): """You provide the number of dimensions - you must add at least dims samples before there is the possibility of extracting a gaussian from this. Can also act as a copy constructor.""" if isinstance(dims, GaussianInc): self.n = dims.n self.mean = dims.mean.copy() self.scatter = dims.scatter.copy() else: self.n = 0 self.mean = numpy.zeros(dims, dtype=numpy.float32) self.scatter = numpy.zeros((dims,dims), dtype=numpy.float32) def add(self, sample, weight=1.0): """Updates the state given a new sample - sample can have a weight, which obviously defaults to 1, but can be set to other values to indicate repetition of a single point, including fractional.""" sample = numpy.asarray(sample) # Sample count goes up... self.n += weight # Update mean vector... delta = sample - self.mean self.mean += delta*(weight/float(self.n)) # Update scatter matrix (Yes, there is duplicated calculation here as it is symmetric, but who cares?)... self.scatter += weight * numpy.outer(delta, sample - self.mean) def safe(self): """Returns True if it has enough data to provide an actual Gaussian, False if it does not.""" return math.fabs(numpy.linalg.det(self.scatter)) > 1e-6 def makeSafe(self): """Bodges the internal representation so it can provide a non-singular covariance matrix - obviously a total hack, but potentially useful when insufficient information exists. Works by taking the svd, nudging zero entrys away from 0 in the diagonal matrix, then multiplying the terms back together again. End result is arbitary, but won't be inconsistant with the data provided.""" u, s, v = numpy.linalg.svd(self.scatter) epsilon = 1e-5 for i in xrange(s.shape[0]): if math.fabs(s[i])<epsilon: s[i] = math.copysign(epsilon, s[i]) self.scatter[:,:] = numpy.dot(u, numpy.dot(numpy.diag(s), v)) def fetch(self): """Returns the Gaussian distribution calculated so far.""" ret = Gaussian(self.mean.shape[0]) ret.setMean(self.mean) ret.setCovariance(self.scatter/float(self.n)) return ret
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Some basic matrix operations that come in use... matrix_code = start_cpp() + """ #ifndef MATRIX_CODE #define MATRIX_CODE template <typename T> inline void MemSwap(T * lhs, T * rhs, int count = 1) { while(count!=0) { T t = *lhs; *lhs = *rhs; *rhs = t; ++lhs; ++rhs; --count; } } // Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default. template <typename T> inline T Determinant(T * pos, int size, int stride = -1) { if (stride==-1) stride = size; if (size==1) return pos[0]; else { if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride]; else { T ret = 0.0; for (int i=0; i<size; i++) { if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1); T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1]; if ((i+size)%2) ret += sub; else ret -= sub; } for (int i=1; i<size; i++) { MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1); } return ret; } } } // Inverts a square matrix, will fail on singular and very occasionally on // non-singular matrices, returns true on success. Uses Gauss-Jordan elimination // with partial pivoting. // in is the input matrix, out the output matrix, just be aware that the input matrix is trashed. // You have to provide its size (Its square, obviously.), and optionally a stride if different from size. template <typename T> inline bool Inverse(T * in, T * out, int size, int stride = -1) { if (stride==-1) stride = size; for (int r=0; r<size; r++) { for (int c=0; c<size; c++) { out[r*stride + c] = (c==r)?1.0:0.0; } } for (int r=0; r<size; r++) { // Find largest pivot and swap in, fail if best we can get is 0... T max = in[r*stride + r]; int index = r; for (int i=r+1; i<size; i++) { if (fabs(in[i*stride + r])>fabs(max)) { max = in[i*stride + r]; index = i; } } if (index!=r) { MemSwap(&in[index*stride], &in[r*stride], size); MemSwap(&out[index*stride], &out[r*stride], size); } if (fabs(max-0.0)<1e-6) return false; // Divide through the entire row... max = 1.0/max; in[r*stride + r] = 1.0; for (int i=r+1; i<size; i++) in[r*stride + i] *= max; for (int i=0; i<size; i++) out[r*stride + i] *= max; // Row subtract to generate 0's in the current column, so it matches an identity matrix... for (int i=0; i<size; i++) { if (i==r) continue; T factor = in[i*stride + r]; in[i*stride + r] = 0.0; for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j]; for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j]; } } return true; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import time class ProgBar: """Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops.""" def __init__(self, width = 60, onCallback = None): self.start = time.time() self.fill = 0 self.width = width self.onCallback = onCallback sys.stdout.write(('_'*self.width)+'\n') sys.stdout.flush() def __del__(self): self.end = time.time() self.__show(self.width) sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n') sys.stdout.flush() def callback(self, nDone, nToDo): """Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo).""" if self.onCallback: self.onCallback() n = int(float(self.width)*float(nDone)/float(nToDo)) n = min((n,self.width)) if n>self.fill: self.__show(n) def __show(self,n): sys.stdout.write('|'*(n-self.fill)) sys.stdout.flush() self.fill = n
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pydoc import inspect class DocGen: """A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code.""" def __init__(self, name, title = None, summary = None): """name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title.""" if title==None: title = name if summary==None: summary = title self.doc = pydoc.HTMLDoc() self.html = open('%s.html'%name,'w') self.html.write('<html>\n') self.html.write('<head>\n') self.html.write('<title>%s</title>\n'%title) self.html.write('</head>\n') self.html.write('<body>\n') self.html_variables = '' self.html_functions = '' self.html_classes = '' self.wiki = open('%s.wiki'%name,'w') self.wiki.write('#summary %s\n\n'%summary) self.wiki.write('= %s= \n\n'%title) self.wiki_variables = '' self.wiki_functions = '' self.wiki_classes = '' def __del__(self): if self.html_variables!='': self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables)) if self.html_functions!='': self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions)) if self.html_classes!='': self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes)) self.html.write('</body>\n') self.html.write('</html>\n') self.html.close() if self.wiki_variables!='': self.wiki.write('= Variables =\n\n') self.wiki.write(self.wiki_variables) self.wiki.write('\n') if self.wiki_functions!='': self.wiki.write('= Functions =\n\n') self.wiki.write(self.wiki_functions) self.wiki.write('\n') if self.wiki_classes!='': self.wiki.write('= Classes =\n\n') self.wiki.write(self.wiki_classes) self.wiki.write('\n') self.wiki.close() def addFile(self, fn, title, fls = True): """Given a filename and section title adds the contents of said file to the output. Various flags influence how this works.""" html = [] wiki = [] for i, line in enumerate(open(fn,'r').readlines()): hl = line.replace('\n', '') if i==0 and fls: hl = '<strong>' + hl + '</strong>' for ext in ['py','txt']: if '.%s - '%ext in hl: s = hl.split('.%s - '%ext, 1) hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1] html.append(hl) wl = line.strip() if i==0 and fls: wl = '*%s*'%wl for ext in ['py','txt']: if '.%s - '%ext in wl: s = wl.split('.%s - '%ext, 1) wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n' wiki.append(wl) self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html))) self.wiki.write('== %s ==\n'%title) self.wiki.write('\n'.join(wiki)) self.wiki.write('----\n\n') def addVariable(self, var, desc): """Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc..""" self.html_variables += '<strong>%s</strong><br/>'%var self.html_variables += '%s<br/><br/>\n'%desc self.wiki_variables += '*`%s`*\n'%var self.wiki_variables += ' %s\n\n'%desc def addFunction(self, func): """Adds a function to the documentation. You provide the actual function instance.""" self.html_functions += self.doc.docroutine(func).replace('&nbsp;',' ') self.html_functions += '\n' name = func.__name__ args, varargs, keywords, defaults = inspect.getargspec(func) doc = inspect.getdoc(func) if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_functions += ' %s\n\n'%doc def addClass(self, cls): """Adds a class to the documentation. You provide the actual class object.""" self.html_classes += self.doc.docclass(cls).replace('&nbsp;',' ') self.html_classes += '\n' name = cls.__name__ parents = filter(lambda a: a!=cls, inspect.getmro(cls)) doc = inspect.getdoc(cls) par_str = '' if len(parents)!=0: par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents)) self.wiki_classes += '== %s(%s) ==\n'%(name, par_str) self.wiki_classes += ' %s\n\n'%doc methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x)) def method_key(pair): if pair[0]=='__init__': return '___' else: return pair[0] methods.sort(key=method_key) for name, method in methods: if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'): if inspect.ismethod(method): args, varargs, keywords, defaults = inspect.getargspec(method) else: args = ['?'] varargs = None keywords = None defaults = None if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords def fetch_doc(cls, name): try: method = getattr(cls, name) if method.__doc__!=None: return inspect.getdoc(method) except: pass for parent in filter(lambda a: a!=cls, inspect.getmro(cls)): ret = fetch_doc(parent, name) if ret!=None: return ret return None doc = fetch_doc(cls, name) self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_classes += ' %s\n\n'%doc variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float)) for name, var in variables: if not name.startswith('__'): if hasattr(var, '__doc__'): d = var.__doc__ else: d = str(var) self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import random import math from scipy.special import gammaln, psi, polygamma from scipy import weave from utils.start_cpp import start_cpp # Provides various gamma-related functions... gamma_code = start_cpp() + """ #ifndef GAMMA_CODE #define GAMMA_CODE #include <cmath> // Returns the natural logarithm of the Gamma function... // (Uses Lanczos's approximation.) double lnGamma(double z) { static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7}; if (z<0.5) { // Use reflection formula, as approximation doesn't work down here... return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z); } else { double x = coeff[0]; for (int i=1;i<9;i++) x += coeff[i]/(z+i-1); double t = z + 6.5; return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x); } } // Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values... double digamma(double z) { static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point. double ret = 0.0; while (z<highVal) { ret -= 1.0/z; z += 1.0; } double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz4 = iz2*iz2; double iz6 = iz4*iz2; ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0; return ret; } // Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically... double trigamma(double z) { static const double highVal = 8.0; double ret = 0.0; while (z<highVal) { ret += 1.0/(z*z); z += 1.0; } z -= 1.0; double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz3 = iz1*iz2; double iz5 = iz3*iz2; double iz7 = iz5*iz2; double iz9 = iz7*iz2; ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0; return ret; } #endif """ def lnGamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function""" code = start_cpp(gamma_code) + """ return_val = lnGamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def digamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function""" code = start_cpp(gamma_code) + """ return_val = digamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def trigamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function""" code = start_cpp(gamma_code) + """ return_val = trigamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) class TestFuncs(unittest.TestCase): """Test code for the assorted gamma-related functions.""" def test_compile(self): code = start_cpp(gamma_code) + """ """ weave.inline(code, support_code=gamma_code) def test_error_lngamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = lnGamma(z) good = gammaln(z) assert(math.fabs(own-good)<1e-12) def test_error_digamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = digamma(z) good = psi(z) assert(math.fabs(own-good)<1e-9) def test_error_trigamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = trigamma(z) good = polygamma(1,z) assert(math.fabs(own-good)<1e-9) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import inspect import hashlib def start_cpp(hash_str = None): """This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>""" frame = inspect.currentframe().f_back info = inspect.getframeinfo(frame) if hash_str==None: return '#line %i "%s"\n'%(info[1],info[0]) else: h = hashlib.md5() h.update(hash_str) hash_val = h.hexdigest() return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
Python
# -*- coding: utf-8 -*- # Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.). import cv import numpy as np def cv2array(im): """Converts a cv array to a numpy array.""" depth2dtype = { cv.IPL_DEPTH_8U: 'uint8', cv.IPL_DEPTH_8S: 'int8', cv.IPL_DEPTH_16U: 'uint16', cv.IPL_DEPTH_16S: 'int16', cv.IPL_DEPTH_32S: 'int32', cv.IPL_DEPTH_32F: 'float32', cv.IPL_DEPTH_64F: 'float64', } arrdtype=im.depth a = np.fromstring( im.tostring(), dtype=depth2dtype[im.depth], count=im.width*im.height*im.nChannels) a.shape = (im.height,im.width,im.nChannels) return a def array2cv(a): """Converts a numpy array to a cv array, if possible.""" dtype2depth = { 'uint8': cv.IPL_DEPTH_8U, 'int8': cv.IPL_DEPTH_8S, 'uint16': cv.IPL_DEPTH_16U, 'int16': cv.IPL_DEPTH_16S, 'int32': cv.IPL_DEPTH_32S, 'float32': cv.IPL_DEPTH_32F, 'float64': cv.IPL_DEPTH_64F, } try: nChannels = a.shape[2] except: nChannels = 1 cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]), dtype2depth[str(a.dtype)], nChannels) cv.SetData(cv_im, a.tostring(), a.dtype.itemsize*nChannels*a.shape[1]) return cv_im
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import multiprocessing as mp import multiprocessing.synchronize # To make sure we have all the functionality. import types import marshal import unittest def repeat(x): """A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant.""" while True: yield x def run_code(code,args): """Internal use function that does the work in each process.""" code = marshal.loads(code) func = types.FunctionType(code, globals(), '_') return func(*args) def mp_map(func, *iters, **keywords): """A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity.""" if 'pool' in keywords: pool = keywords['pool'] else: pool = mp.Pool() code = marshal.dumps(func.func_code) jobs = [] for args in zip(*iters): jobs.append(pool.apply_async(run_code,(code,args))) for i in xrange(len(jobs)): jobs[i] = jobs[i].get() return jobs class TestMpMap(unittest.TestCase): def test_simple1(self): data = ['a','b','c','d'] def noop(data): return data data_noop = mp_map(noop, data) self.assertEqual(data, data_noop) def test_simple2(self): data = [x for x in xrange(1000)] data_double = mp_map(lambda a: a*2, data) self.assertEqual(map(lambda a: a*2,data), data_double) def test_gen(self): def gen(): for i in xrange(100): yield i data_double = mp_map(lambda a: a*2, gen()) self.assertEqual(map(lambda a: a*2,gen()), data_double) def test_repeat(self): def mult(a,b): return a*b data = [x for x in xrange(50,5000,5)] data_triple = mp_map(mult, data, repeat(3)) self.assertEqual(map(lambda a: a*3,data),data_triple) def test_none(self): data = [] data_sqr = mp_map(lambda x: x*x, data) self.assertEqual([],data_sqr) if __name__ == '__main__': unittest.main()
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import os.path import tempfile import shutil from distutils.core import setup, Extension import distutils.ccompiler import distutils.dep_util try: __default_compiler = distutils.ccompiler.new_compiler() except: __default_compiler = None def make_mod(name, base, source, openCL = False): """Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place.""" if __default_compiler==None: raise Exception('No compiler!') # Work out the various file names - check if we actually need to do anything... if not isinstance(source, list): source = [source] source_path = map(lambda s: os.path.join(base, s), source) library_path = os.path.join(base, __default_compiler.shared_object_filename(name)) if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)): try: print 'b' # Backup the argv variable and create a temporary directory to do all work in... old_argv = sys.argv[:] temp_dir = tempfile.mkdtemp() # Prepare the extension... sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir] comp_path = filter(lambda s: not s.endswith('.h'), source_path) depends = filter(lambda s: s.endswith('.h'), source_path) if openCL: ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends) else: ext = Extension(name, comp_path, depends=depends) # Compile... setup(name=name, version='1.0.0', ext_modules=[ext]) finally: # Cleanup the argv variable and the temporary directory... sys.argv = old_argv shutil.rmtree(temp_dir, True)
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp from numpy_help_cpp import numpy_util_code # Provides various functions to assist with manipulating python objects from c++ code. python_obj_code = numpy_util_code + start_cpp() + """ #ifndef PYTHON_OBJ_CODE #define PYTHON_OBJ_CODE // Extracts a boolean from an object... bool GetObjectBoolean(PyObject * obj, const char * name) { PyObject * b = PyObject_GetAttrString(obj, name); bool ret = b!=Py_False; Py_DECREF(b); return ret; } // Extracts an int from an object... int GetObjectInt(PyObject * obj, const char * name) { PyObject * i = PyObject_GetAttrString(obj, name); int ret = PyInt_AsLong(i); Py_DECREF(i); return ret; } // Extracts a float from an object... float GetObjectFloat(PyObject * obj, const char * name) { PyObject * f = PyObject_GetAttrString(obj, name); float ret = PyFloat_AsDouble(f); Py_DECREF(f); return ret; } // Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored... unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); unsigned char * ret = new unsigned char[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i); Py_DECREF(nao); return ret; } // Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored... float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); float * ret = new float[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i); Py_DECREF(nao); return ret; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Defines helper functions for accessing numpy arrays... numpy_util_code = start_cpp() + """ #ifndef NUMPY_UTIL_CODE #define NUMPY_UTIL_CODE float & Float1D(PyArrayObject * arr, int index = 0) { return *(float*)(arr->data + index*arr->strides[0]); } float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } unsigned char & Byte1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index*arr->strides[0]); } unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } int & Int1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index*arr->strides[0]); } int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } #endif """
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cvarray import mp_map import prog_bar import numpy_help_cpp import python_obj_cpp import matrix_cpp import gamma_cpp import setProcName import start_cpp import make import doc_gen # Setup... doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.') doc.addFile('readme.txt', 'Overview') # Variables... doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.') doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.') doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++') doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++') # Functions... doc.addFunction(make.make_mod) doc.addFunction(cvarray.cv2array) doc.addFunction(cvarray.array2cv) doc.addFunction(mp_map.repeat) doc.addFunction(mp_map.mp_map) doc.addFunction(setProcName.setProcName) doc.addFunction(start_cpp.start_cpp) doc.addFunction(make.make_mod) # Classes... doc.addClass(prog_bar.ProgBar) doc.addClass(doc_gen.DocGen)
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from ctypes import * def setProcName(name): """Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases.""" # Call the process control function... libc = cdll.LoadLibrary('libc.so.6') libc.prctl(15, c_char_p(name), 0, 0, 0) # Update argv... charPP = POINTER(POINTER(c_char)) argv = charPP.in_dll(libc,'_dl_argv') size = libc.strlen(argv[0]) libc.strncpy(argv[0],c_char_p(name),size) if __name__=='__main__': # Quick test that it works... import os ps1 = 'ps' ps2 = 'ps -f' os.system(ps1) os.system(ps2) setProcName('wibble_wobble') os.system(ps1) os.system(ps2)
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import gcp from utils import doc_gen # Setup... doc = doc_gen.DocGen('gcp', 'Gaussian Conjugate Prior', 'Library of distributions focused on the Gaussian and its conjugate prior') doc.addFile('readme.txt', 'Overview') # Classes... doc.addClass(gcp.Gaussian) doc.addClass(gcp.GaussianInc) doc.addClass(gcp.Wishart) doc.addClass(gcp.StudentT) doc.addClass(gcp.GaussianPrior)
Python
# -*- coding: utf-8 -*- # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import math import numpy import numpy.linalg import numpy.random class Gaussian: """A basic multivariate Gaussian class. Has caching to avoid duplicate calculation.""" def __init__(self, dims): """dims is the number of dimensions. Initialises with mu at the origin and the identity matrix for the precision/covariance. dims can also be another Gaussian object, in which case it acts as a copy constructor.""" if isinstance(dims, Gaussian): self.mean = dims.mean.copy() self.precision = dims.precision.copy() if dims.precision!=None else None self.covariance = dims.covariance.copy() if dims.covariance!=None else None self.norm = dims.norm self.cholesky = dims.cholesky.copy() if dims.cholesky!=None else None else: self.mean = numpy.zeros(dims, dtype=numpy.float32) self.precision = numpy.identity(dims, dtype=numpy.float32) self.covariance = None self.norm = None self.cholesky = None def setMean(self, mean): """Sets the mean - you can use anything numpy will interprete as a 1D array of the correct length.""" nm = numpy.array(mean, dtype=numpy.float32) assert(nm.shape==self.mean.shape) self.mean = nm def setPrecision(self, precision): """Sets the precision matrix. Alternativly you can use the setCovariance method.""" np = numpy.array(precision, dtype=numpy.float32) assert(np.shape==(self.mean.shape[0],self.mean.shape[0])) self.precision = np self.covariance = None self.norm = None self.cholesky = None def setCovariance(self, covariance): """Sets the covariance matrix. Alternativly you can use the setPrecision method.""" nc = numpy.array(covariance, dtype=numpy.float32) assert(nc.shape==(self.mean.shape[0],self.mean.shape[0])) self.covariance = nc self.precision = None self.norm = None self.cholesky = None def getMean(self): """Returns the mean.""" return self.mean def getPrecision(self): """Returns the precision matrix.""" if self.precision==None: self.precision = numpy.linalg.inv(self.covariance) return self.precision def getCovariance(self): """Returns the covariance matrix.""" if self.covariance==None: self.covariance = numpy.linalg.inv(self.precision) return self.covariance def getNorm(self): """Returns the normalising constant of the distribution. Typically for internal use only.""" if self.norm==None: self.norm = math.pow(2.0*math.pi,-0.5*self.mean.shape[0]) * math.sqrt(numpy.linalg.det(self.getPrecision())) return self.norm def prob(self, x): """Given a vector x evaluates the probability density function at that point.""" x = numpy.asarray(x) offset = x - self.mean val = numpy.dot(offset,numpy.dot(self.getPrecision(),offset)) return self.getNorm() * math.exp(-0.5 * val) def sample(self): """Draws and returns a sample from the distribution.""" if self.cholesky==None: self.cholesky = numpy.linalg.cholesky(self.getCovariance()) z = numpy.random.normal(size=self.mean.shape) return self.mean + numpy.dot(self.cholesky,z) def __str__(self): return '{mean:%s, covar:%s}'%(str(self.mean), str(self.getCovariance()))
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Some basic matrix operations that come in use... matrix_code = start_cpp() + """ #ifndef MATRIX_CODE #define MATRIX_CODE template <typename T> inline void MemSwap(T * lhs, T * rhs, int count = 1) { while(count!=0) { T t = *lhs; *lhs = *rhs; *rhs = t; ++lhs; ++rhs; --count; } } // Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default. template <typename T> inline T Determinant(T * pos, int size, int stride = -1) { if (stride==-1) stride = size; if (size==1) return pos[0]; else { if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride]; else { T ret = 0.0; for (int i=0; i<size; i++) { if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1); T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1]; if ((i+size)%2) ret += sub; else ret -= sub; } for (int i=1; i<size; i++) { MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1); } return ret; } } } // Inverts a square matrix, will fail on singular and very occasionally on // non-singular matrices, returns true on success. Uses Gauss-Jordan elimination // with partial pivoting. // in is the input matrix, out the output matrix, just be aware that the input matrix is trashed. // You have to provide its size (Its square, obviously.), and optionally a stride if different from size. template <typename T> inline bool Inverse(T * in, T * out, int size, int stride = -1) { if (stride==-1) stride = size; for (int r=0; r<size; r++) { for (int c=0; c<size; c++) { out[r*stride + c] = (c==r)?1.0:0.0; } } for (int r=0; r<size; r++) { // Find largest pivot and swap in, fail if best we can get is 0... T max = in[r*stride + r]; int index = r; for (int i=r+1; i<size; i++) { if (fabs(in[i*stride + r])>fabs(max)) { max = in[i*stride + r]; index = i; } } if (index!=r) { MemSwap(&in[index*stride], &in[r*stride], size); MemSwap(&out[index*stride], &out[r*stride], size); } if (fabs(max-0.0)<1e-6) return false; // Divide through the entire row... max = 1.0/max; in[r*stride + r] = 1.0; for (int i=r+1; i<size; i++) in[r*stride + i] *= max; for (int i=0; i<size; i++) out[r*stride + i] *= max; // Row subtract to generate 0's in the current column, so it matches an identity matrix... for (int i=0; i<size; i++) { if (i==r) continue; T factor = in[i*stride + r]; in[i*stride + r] = 0.0; for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j]; for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j]; } } return true; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import time class ProgBar: """Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops.""" def __init__(self, width = 60, onCallback = None): self.start = time.time() self.fill = 0 self.width = width self.onCallback = onCallback sys.stdout.write(('_'*self.width)+'\n') sys.stdout.flush() def __del__(self): self.end = time.time() self.__show(self.width) sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n') sys.stdout.flush() def callback(self, nDone, nToDo): """Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo).""" if self.onCallback: self.onCallback() n = int(float(self.width)*float(nDone)/float(nToDo)) n = min((n,self.width)) if n>self.fill: self.__show(n) def __show(self,n): sys.stdout.write('|'*(n-self.fill)) sys.stdout.flush() self.fill = n
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pydoc import inspect class DocGen: """A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code.""" def __init__(self, name, title = None, summary = None): """name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title.""" if title==None: title = name if summary==None: summary = title self.doc = pydoc.HTMLDoc() self.html = open('%s.html'%name,'w') self.html.write('<html>\n') self.html.write('<head>\n') self.html.write('<title>%s</title>\n'%title) self.html.write('</head>\n') self.html.write('<body>\n') self.html_variables = '' self.html_functions = '' self.html_classes = '' self.wiki = open('%s.wiki'%name,'w') self.wiki.write('#summary %s\n\n'%summary) self.wiki.write('= %s= \n\n'%title) self.wiki_variables = '' self.wiki_functions = '' self.wiki_classes = '' def __del__(self): if self.html_variables!='': self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables)) if self.html_functions!='': self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions)) if self.html_classes!='': self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes)) self.html.write('</body>\n') self.html.write('</html>\n') self.html.close() if self.wiki_variables!='': self.wiki.write('= Variables =\n\n') self.wiki.write(self.wiki_variables) self.wiki.write('\n') if self.wiki_functions!='': self.wiki.write('= Functions =\n\n') self.wiki.write(self.wiki_functions) self.wiki.write('\n') if self.wiki_classes!='': self.wiki.write('= Classes =\n\n') self.wiki.write(self.wiki_classes) self.wiki.write('\n') self.wiki.close() def addFile(self, fn, title, fls = True): """Given a filename and section title adds the contents of said file to the output. Various flags influence how this works.""" html = [] wiki = [] for i, line in enumerate(open(fn,'r').readlines()): hl = line.replace('\n', '') if i==0 and fls: hl = '<strong>' + hl + '</strong>' for ext in ['py','txt']: if '.%s - '%ext in hl: s = hl.split('.%s - '%ext, 1) hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1] html.append(hl) wl = line.strip() if i==0 and fls: wl = '*%s*'%wl for ext in ['py','txt']: if '.%s - '%ext in wl: s = wl.split('.%s - '%ext, 1) wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n' wiki.append(wl) self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html))) self.wiki.write('== %s ==\n'%title) self.wiki.write('\n'.join(wiki)) self.wiki.write('----\n\n') def addVariable(self, var, desc): """Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc..""" self.html_variables += '<strong>%s</strong><br/>'%var self.html_variables += '%s<br/><br/>\n'%desc self.wiki_variables += '*`%s`*\n'%var self.wiki_variables += ' %s\n\n'%desc def addFunction(self, func): """Adds a function to the documentation. You provide the actual function instance.""" self.html_functions += self.doc.docroutine(func).replace('&nbsp;',' ') self.html_functions += '\n' name = func.__name__ args, varargs, keywords, defaults = inspect.getargspec(func) doc = inspect.getdoc(func) if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_functions += ' %s\n\n'%doc def addClass(self, cls): """Adds a class to the documentation. You provide the actual class object.""" self.html_classes += self.doc.docclass(cls).replace('&nbsp;',' ') self.html_classes += '\n' name = cls.__name__ parents = filter(lambda a: a!=cls, inspect.getmro(cls)) doc = inspect.getdoc(cls) par_str = '' if len(parents)!=0: par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents)) self.wiki_classes += '== %s(%s) ==\n'%(name, par_str) self.wiki_classes += ' %s\n\n'%doc methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x)) def method_key(pair): if pair[0]=='__init__': return '___' else: return pair[0] methods.sort(key=method_key) for name, method in methods: if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'): if inspect.ismethod(method): args, varargs, keywords, defaults = inspect.getargspec(method) else: args = ['?'] varargs = None keywords = None defaults = None if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords def fetch_doc(cls, name): try: method = getattr(cls, name) if method.__doc__!=None: return inspect.getdoc(method) except: pass for parent in filter(lambda a: a!=cls, inspect.getmro(cls)): ret = fetch_doc(parent, name) if ret!=None: return ret return None doc = fetch_doc(cls, name) self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_classes += ' %s\n\n'%doc variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float)) for name, var in variables: if not name.startswith('__'): if hasattr(var, '__doc__'): d = var.__doc__ else: d = str(var) self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import random import math from scipy.special import gammaln, psi, polygamma from scipy import weave from utils.start_cpp import start_cpp # Provides various gamma-related functions... gamma_code = start_cpp() + """ #ifndef GAMMA_CODE #define GAMMA_CODE #include <cmath> // Returns the natural logarithm of the Gamma function... // (Uses Lanczos's approximation.) double lnGamma(double z) { static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7}; if (z<0.5) { // Use reflection formula, as approximation doesn't work down here... return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z); } else { double x = coeff[0]; for (int i=1;i<9;i++) x += coeff[i]/(z+i-1); double t = z + 6.5; return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x); } } // Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values... double digamma(double z) { static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point. double ret = 0.0; while (z<highVal) { ret -= 1.0/z; z += 1.0; } double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz4 = iz2*iz2; double iz6 = iz4*iz2; ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0; return ret; } // Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically... double trigamma(double z) { static const double highVal = 8.0; double ret = 0.0; while (z<highVal) { ret += 1.0/(z*z); z += 1.0; } z -= 1.0; double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz3 = iz1*iz2; double iz5 = iz3*iz2; double iz7 = iz5*iz2; double iz9 = iz7*iz2; ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0; return ret; } #endif """ def lnGamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function""" code = start_cpp(gamma_code) + """ return_val = lnGamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def digamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function""" code = start_cpp(gamma_code) + """ return_val = digamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def trigamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function""" code = start_cpp(gamma_code) + """ return_val = trigamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) class TestFuncs(unittest.TestCase): """Test code for the assorted gamma-related functions.""" def test_compile(self): code = start_cpp(gamma_code) + """ """ weave.inline(code, support_code=gamma_code) def test_error_lngamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = lnGamma(z) good = gammaln(z) assert(math.fabs(own-good)<1e-12) def test_error_digamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = digamma(z) good = psi(z) assert(math.fabs(own-good)<1e-9) def test_error_trigamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = trigamma(z) good = polygamma(1,z) assert(math.fabs(own-good)<1e-9) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import inspect import hashlib def start_cpp(hash_str = None): """This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>""" frame = inspect.currentframe().f_back info = inspect.getframeinfo(frame) if hash_str==None: return '#line %i "%s"\n'%(info[1],info[0]) else: h = hashlib.md5() h.update(hash_str) hash_val = h.hexdigest() return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
Python
# -*- coding: utf-8 -*- # Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.). import cv import numpy as np def cv2array(im): """Converts a cv array to a numpy array.""" depth2dtype = { cv.IPL_DEPTH_8U: 'uint8', cv.IPL_DEPTH_8S: 'int8', cv.IPL_DEPTH_16U: 'uint16', cv.IPL_DEPTH_16S: 'int16', cv.IPL_DEPTH_32S: 'int32', cv.IPL_DEPTH_32F: 'float32', cv.IPL_DEPTH_64F: 'float64', } arrdtype=im.depth a = np.fromstring( im.tostring(), dtype=depth2dtype[im.depth], count=im.width*im.height*im.nChannels) a.shape = (im.height,im.width,im.nChannels) return a def array2cv(a): """Converts a numpy array to a cv array, if possible.""" dtype2depth = { 'uint8': cv.IPL_DEPTH_8U, 'int8': cv.IPL_DEPTH_8S, 'uint16': cv.IPL_DEPTH_16U, 'int16': cv.IPL_DEPTH_16S, 'int32': cv.IPL_DEPTH_32S, 'float32': cv.IPL_DEPTH_32F, 'float64': cv.IPL_DEPTH_64F, } try: nChannels = a.shape[2] except: nChannels = 1 cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]), dtype2depth[str(a.dtype)], nChannels) cv.SetData(cv_im, a.tostring(), a.dtype.itemsize*nChannels*a.shape[1]) return cv_im
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import multiprocessing as mp import multiprocessing.synchronize # To make sure we have all the functionality. import types import marshal import unittest def repeat(x): """A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant.""" while True: yield x def run_code(code,args): """Internal use function that does the work in each process.""" code = marshal.loads(code) func = types.FunctionType(code, globals(), '_') return func(*args) def mp_map(func, *iters, **keywords): """A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity.""" if 'pool' in keywords: pool = keywords['pool'] else: pool = mp.Pool() code = marshal.dumps(func.func_code) jobs = [] for args in zip(*iters): jobs.append(pool.apply_async(run_code,(code,args))) for i in xrange(len(jobs)): jobs[i] = jobs[i].get() return jobs class TestMpMap(unittest.TestCase): def test_simple1(self): data = ['a','b','c','d'] def noop(data): return data data_noop = mp_map(noop, data) self.assertEqual(data, data_noop) def test_simple2(self): data = [x for x in xrange(1000)] data_double = mp_map(lambda a: a*2, data) self.assertEqual(map(lambda a: a*2,data), data_double) def test_gen(self): def gen(): for i in xrange(100): yield i data_double = mp_map(lambda a: a*2, gen()) self.assertEqual(map(lambda a: a*2,gen()), data_double) def test_repeat(self): def mult(a,b): return a*b data = [x for x in xrange(50,5000,5)] data_triple = mp_map(mult, data, repeat(3)) self.assertEqual(map(lambda a: a*3,data),data_triple) def test_none(self): data = [] data_sqr = mp_map(lambda x: x*x, data) self.assertEqual([],data_sqr) if __name__ == '__main__': unittest.main()
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import os.path import tempfile import shutil from distutils.core import setup, Extension import distutils.ccompiler import distutils.dep_util try: __default_compiler = distutils.ccompiler.new_compiler() except: __default_compiler = None def make_mod(name, base, source, openCL = False): """Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place.""" if __default_compiler==None: raise Exception('No compiler!') # Work out the various file names - check if we actually need to do anything... if not isinstance(source, list): source = [source] source_path = map(lambda s: os.path.join(base, s), source) library_path = os.path.join(base, __default_compiler.shared_object_filename(name)) if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)): try: print 'b' # Backup the argv variable and create a temporary directory to do all work in... old_argv = sys.argv[:] temp_dir = tempfile.mkdtemp() # Prepare the extension... sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir] comp_path = filter(lambda s: not s.endswith('.h'), source_path) depends = filter(lambda s: s.endswith('.h'), source_path) if openCL: ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends) else: ext = Extension(name, comp_path, depends=depends) # Compile... setup(name=name, version='1.0.0', ext_modules=[ext]) finally: # Cleanup the argv variable and the temporary directory... sys.argv = old_argv shutil.rmtree(temp_dir, True)
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp from numpy_help_cpp import numpy_util_code # Provides various functions to assist with manipulating python objects from c++ code. python_obj_code = numpy_util_code + start_cpp() + """ #ifndef PYTHON_OBJ_CODE #define PYTHON_OBJ_CODE // Extracts a boolean from an object... bool GetObjectBoolean(PyObject * obj, const char * name) { PyObject * b = PyObject_GetAttrString(obj, name); bool ret = b!=Py_False; Py_DECREF(b); return ret; } // Extracts an int from an object... int GetObjectInt(PyObject * obj, const char * name) { PyObject * i = PyObject_GetAttrString(obj, name); int ret = PyInt_AsLong(i); Py_DECREF(i); return ret; } // Extracts a float from an object... float GetObjectFloat(PyObject * obj, const char * name) { PyObject * f = PyObject_GetAttrString(obj, name); float ret = PyFloat_AsDouble(f); Py_DECREF(f); return ret; } // Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored... unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); unsigned char * ret = new unsigned char[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i); Py_DECREF(nao); return ret; } // Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored... float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); float * ret = new float[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i); Py_DECREF(nao); return ret; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Defines helper functions for accessing numpy arrays... numpy_util_code = start_cpp() + """ #ifndef NUMPY_UTIL_CODE #define NUMPY_UTIL_CODE float & Float1D(PyArrayObject * arr, int index = 0) { return *(float*)(arr->data + index*arr->strides[0]); } float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } unsigned char & Byte1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index*arr->strides[0]); } unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } int & Int1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index*arr->strides[0]); } int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } #endif """
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cvarray import mp_map import prog_bar import numpy_help_cpp import python_obj_cpp import matrix_cpp import gamma_cpp import setProcName import start_cpp import make import doc_gen # Setup... doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.') doc.addFile('readme.txt', 'Overview') # Variables... doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.') doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.') doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++') doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++') # Functions... doc.addFunction(make.make_mod) doc.addFunction(cvarray.cv2array) doc.addFunction(cvarray.array2cv) doc.addFunction(mp_map.repeat) doc.addFunction(mp_map.mp_map) doc.addFunction(setProcName.setProcName) doc.addFunction(start_cpp.start_cpp) doc.addFunction(make.make_mod) # Classes... doc.addClass(prog_bar.ProgBar) doc.addClass(doc_gen.DocGen)
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from ctypes import * def setProcName(name): """Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases.""" # Call the process control function... libc = cdll.LoadLibrary('libc.so.6') libc.prctl(15, c_char_p(name), 0, 0, 0) # Update argv... charPP = POINTER(POINTER(c_char)) argv = charPP.in_dll(libc,'_dl_argv') size = libc.strlen(argv[0]) libc.strncpy(argv[0],c_char_p(name),size) if __name__=='__main__': # Quick test that it works... import os ps1 = 'ps' ps2 = 'ps -f' os.system(ps1) os.system(ps2) setProcName('wibble_wobble') os.system(ps1) os.system(ps2)
Python
#! /usr/bin/env python # Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import dpgmm from utils import doc_gen # Setup... doc = doc_gen.DocGen('dpgmm', 'Dirichlet Process Gaussian Mixture Model', 'Dynamically resizing Gaussian mixture model') doc.addFile('readme.txt', 'Overview') # Classes... doc.addClass(dpgmm.DPGMM)
Python
# Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import solvers from solve_shared import Params, State from document import Document from dp_conc import PriorConcDP class Corpus: """Contains a set of Document-s, plus parameters for the graphical models priors - everything required as input to build a model, except a Params object.""" def __init__(self, other = None): """Basic setup, sets a whole bunch of stuff to sensible parameters, or a copy constructor if provided with another Corpus.""" if other!=None: # Create the array of documents and support variables... self.docs = map(lambda d: Document(d), other.docs) self.sampleCount = other.sampleCount self.wordCount = other.wordCount # Behavoural flags/parameters... self.dnrDocInsts = other.dnrDocInsts self.dnrCluInsts = other.dnrCluInsts self.seperateClusterConc = other.seperateClusterConc self.seperateDocumentConc = other.seperateDocumentConc self.oneCluster = other.oneCluster self.calcBeta = other.calcBeta self.calcCluBmn = other.calcCluBmn self.calcPhi = other.calcPhi self.resampleConcs = other.resampleConcs self.behSamples = other.behSamples self.alpha = PriorConcDP(other.alpha) self.beta = other.beta self.gamma = PriorConcDP(other.gamma) self.rho = PriorConcDP(other.rho) self.mu = PriorConcDP(other.mu) self.phiConc = other.phiConc self.phiRatio = other.phiRatio self.abnorms = dict(other.abnorms) else: # Create the array of documents and support variables... self.docs = [] self.sampleCount = 0 # How many samples exist in all the documents. self.wordCount = 0 # How many types of words exist. # Behavoural flags/parameters... self.dnrDocInsts = False self.dnrCluInsts = True self.seperateClusterConc = False self.seperateDocumentConc = False self.oneCluster = False self.calcBeta = True self.calcCluBmn = True self.calcPhi = True self.resampleConcs = True self.behSamples = 1024 # Parameters for the priors in the graphical model... self.alpha = PriorConcDP() # Document instance DP self.beta = 1.0 # Topic multinomial symmetric Dirichlet distribution prior. self.gamma = PriorConcDP() # Topic DP self.rho = PriorConcDP() # Cluster instance DP self.mu = PriorConcDP() # Cluster creating DP self.phiConc = 2.0 # For the prior on behaviour multinomials. (conc is multiplied by the number of entrys.) self.phiRatio = 10.0 # " # Abnormalities - a dictionary taking each abnormality to a natural number which is its index in the various arrays... self.abnorms = dict() def setDocInstsDNR(self, val): """False to resample the document instances, True to not. Defaults to False, but can be set True to save a bit of computation. Not recomended to be changed, as convergance is poor without it.""" self.dnrDocInsts = val def getDocInstsDNR(self): """Returns False if the document instances are going to be resampled, True if they are not.""" return self.dnrDocInsts def setCluInstsDNR(self, val): """False to resample the cluster instances, True to not. Defaults to False, but can be set True to save quite a bit of computation. Its debatable if switching this to True causes the results to degrade in any way, but left on by default as indicated in the paper.""" self.dnrCluInsts = val def getCluInstsDNR(self): """Returns False if the cluster instances are going to be resampled, True if they are not.""" return self.dnrCluInsts def setSeperateClusterConc(self, val): """True if you want clusters to each have their own concentration parameter, False, the default, if you want a single concentration parameter shared between all clusters. Note that setting this True doesn't really work in my experiance.""" self.seperateClusterConc = val def getSeperateClusterConc(self): """True if each cluster has its own seperate concentration parameter, False if they are shared.""" return self.seperateClusterConc def setSeperateDocumentConc(self, val): """True if you want each document to have its own concentration value, False if you want a single value shared between all documents. Experiance shows that the default, False, is the only sensible option most of the time, though when single cluster is on True can give advantages.""" self.seperateDocumentConc = val def getSeperateDocumentConc(self): """True if each document has its own concetration parameter, False if they all share a single concentration parameter.""" return self.seperateDocumentConc def setOneCluster(self, val): """Leave as False to keep the default cluster behaviour, but set to True to only have a single cluster - this results in a HDP implimentation that has an extra pointless layer, making a it a bit inefficient, but not really affecting the results relative to a normal HDP implimentation.""" self.oneCluster = val def getOneCluster(self): """Returns False for normal behaviour, True if only one cluster will be used - this forces the algorithm to be normal HDP, with an excess level, rather than dual HDP.""" return self.oneCluster def setCalcBeta(self, val): """Set False to have beta constant as the algorithm runs, leave as True if you want it recalculated based on the topic multinomials drawn from it.""" self.calcBeta = val def getCalcBeta(self): """Returns False to leave the beta prior on topic word multinomials as is, True to indicate that it should be optimised""" return self.calcBeta def setCalcClusterBMN(self, val): """Sets if the per-cluster behaviour multinomial should be resampled.""" self.calcCluBmn = val def getCalcClusterBMN(self): """Returns True if it is going to recalculate the per-cluster behaviour distribution, False otherwise.""" return self.calcCluBmn def setCalcPhi(self, val): """Set False to have phi constant as the algorithm runs, leave True if you want it recalculated based on the cluster multinomials over behaviour drawn from it.""" self.calcPhi = val def getCalcPhi(self): """Returns False if it is going to leave the phi prior as is, True to indicate that it will be optimised.""" return self.calcPhi def setResampleConcs(self, val): """Sets True, the default, to resample concentration parameters, False to not.""" self.resampleConcs = val def getResampleConcs(self): """Returns True if it will be resampling the concentration parameters, False otherwise.""" return self.resampleConcs def setBehSamples(self, samples): """Sets the number of samples to use when integrating the prior over each per-cluster behaviour multinomial.""" self.behSamples = samples def getBehSamples(self): """Returns the number of samples to be used by the behaviour multinomial estimator. Defaults to 1024.""" return self.behSamples def setAlpha(self, alpha, beta, conc): """Sets the concentration details for the per-document DP from which the topics for words are drawn.""" self.alpha.alpha = alpha self.alpha.beta = beta self.alpha.conc = conc def getAlpha(self): """Returns the PriorConcDP for the alpha parameter.""" return self.alpha def setBeta(self, beta): """Parameter for the symmetric Dirichlet prior on the multinomial distribution from which words are drawn, one for each topic.""" assert(beta>=0.0) self.beta = beta def getBeta(self): """Returns the current beta value. Defaults to 1.0.""" return self.beta def setGamma(self, alpha, beta, conc): """Sets the concentration details for the topic DP, from which topics are drawn""" self.gamma.alpha = alpha self.gamma.beta = beta self.gamma.conc = conc def getGamma(self): """Returns the PriorConcDP for the gamma parameter.""" return self.gamma def setRho(self, alpha, beta, conc): """Sets the concentration details used for each cluster instance.""" self.rho.alpha = alpha self.rho.beta = beta self.rho.conc = conc def getRho(self): """Returns the PriorConcDP for the rho parameter.""" return self.rho def setMu(self, alpha, beta, conc): """Sets the concentration details used for the DP from which clusters are drawn for documents.""" self.mu.alpha = alpha self.mu.beta = beta self.mu.conc = conc def getMu(self): """Returns the PriorConcDP for the mu parameter.""" return self.mu def setPhi(self, conc, ratio): """Sets the weight and ratio for Phi, which is a Dirichlet distribution prior on the multinomial over which behaviour each word belongs to, as stored on a per-cluster basis. conc is the concentration for the distribution, whilst ratio is how many times more likelly normal behaviour is presumed to be than any given abnormal behaviour.""" self.phiConc = conc self.phiRatio = ratio def getPhiConc(self): """Returns the concentration parameter for the phi prior. Defaults to 1.""" return self.phiConc def getPhiRatio(self): """Returns the current phi ratio, which is the ratio of how many times more likelly normal words are than any given abnormal class of words in the prior. Defaults to 10.""" return self.phiRatio def add(self, doc, igIdent = False): """Adds a document to the corpus.""" if igIdent==False: doc.ident = len(self.docs) self.docs.append(doc) self.sampleCount += doc.getSampleCount() self.wordCount = max((self.wordCount, doc.words[-1,0]+1)) for abnorm in doc.getAbnorms(): if abnorm not in self.abnorms: num = len(self.abnorms)+1 # +1 to account for normal behaviour being entry 0. self.abnorms[abnorm] = num def getDocumentCount(self): """Number of documents.""" return len(self.docs) def getDocument(self,ident): """Returns the Document associated with the given ident.""" return self.docs[ident] def documentList(self): """Returns a list of all documents.""" return self.docs def getAbnormDict(self): """Returns a dictionary indexed by the abnormalities seen in all the documents added so far. The values of the dictionary are unique natural numbers, starting from 1, which index the abnormalities in the arrays used internally for simulation.""" return self.abnorms def setWordCount(self, wordCount): """Because the system autodetects words as being the identifiers 0..max where max is the largest identifier seen it is possible for you to tightly pack words but to want to reserve some past the end. Its also possible for a data set to never contain the last word, creating problems. This allows you to set the number of words, forcing the issue. Note that setting the number less than actually exists will be ignored.""" self.wordCount = max((self.wordCount, wordCount)) def getWordCount(self): """Number of words as far as a fitter will be concerned; doesn't mean that they have all actually been sampled within documents however.""" return self.wordCount def getSampleCount(self): """Returns the number of samples stored in all the documents contained within.""" return self.sampleCount def sampleModel(self, params=None, callback=None, mp=True): """Given parameters to run the Gibbs sampling with this does the sampling, and returns the resulting Model object. If params is not provided it uses the default. callback can be a function to report progress, and mp can be set to False if you don't want to make use of multiprocessing.""" if params==None: params = Params() state = State(self,params) if mp and params.runs>1 and hasattr(solvers,'gibbs_all_mp'): solvers.gibbs_all_mp(state, callback) else: solvers.gibbs_all(state, callback) return state.getModel()
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. class Params: """Parameters for running the fitter that are universal to all fitters - basically the parameters you would typically associate with Gibbs sampling.""" def __init__(self, toClone = None): """Sets the parameters to reasonable defaults. Will act as a copy constructor if given an instance of this object.""" if toClone!=None: self.__runs = toClone.runs self.__samples = toClone.samples self.__burnIn = toClone.burnIn self.__lag = toClone.lag else: self.__runs = 8 self.__samples = 10 self.__burnIn = 1000 self.__lag = 100 def setRuns(self, runs): """Sets the number of runs, i.e. how many seperate chains are run.""" assert(isinstance(runs, int)) assert(runs>0) self.__runs = runs def setSamples(self, samples): """Number of samples to extract from each chain - total number of samples extracted will hence be samples*runs.""" assert(isinstance(samples, int)) assert(samples>0) self.__samples = samples def setBurnIn(self, burnIn): """Number of Gibbs iterations to do for burn in before sampling starts.""" assert(isinstance(burnIn, int)) assert(burnIn>=0) self.__burnIn = burnIn def setLag(self, lag): """Number of Gibbs iterations to do between samples.""" assert(isinstance(lag, int)) assert(lag>0) self.__lag = lag def getRuns(self): """Returns the number of runs.""" return self.__runs def getSamples(self): """Returns the number of samples.""" return self.__samples def getBurnIn(self): """Returns the burn in length.""" return self.__burnIn def getLag(self): """Returns the lag length.""" return self.__lag runs = property(getRuns, setRuns, None, "Number of seperate chains to run.") samples = property(getSamples, setSamples, None, "Number of samples to extract from each chain") burnIn = property(getBurnIn, setBurnIn, None, "Number of iterations to do before taking the first sample of a chain.") lag = property(getLag, setLag, None, "Number of iterations to do between samples.") def fromArgs(self, args, prefix = ''): """Extracts from an arg string, typically sys.argv[1:], the parameters, leaving them untouched if not given. Uses --runs, --samples, --burnIn and --lag. Can optionally provide a prefix which is inserted after the '--'""" try: ind = args[:-1].index('--'+prefix+'runs') self.runs = int(args[ind+1]) except ValueError: pass try: ind = args[:-1].index('--'+prefix+'samples') self.samples = int(args[ind+1]) except ValueError: pass try: ind = args[:-1].index('--'+prefix+'burnIn') self.burnIn = int(args[ind+1]) except ValueError: pass try: ind = args[:-1].index('--'+prefix+'lag') self.lag = int(args[ind+1]) except ValueError: pass
Python