code
stringlengths
1
1.72M
language
stringclasses
1 value
# Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import math import numpy from dp_conc import PriorConcDP from params import Params from smp.smp import FlagIndexArray class DocState: """Helper class to contain the State of Gibbs sampling for a specific document.""" def __init__(self, doc, alphaConc = None, abnormDict = None): if isinstance(doc, DocState): self.cluster = doc.cluster self.use = doc.use.copy() self.conc = doc.conc self.samples = doc.samples.copy() self.behFlags = doc.behFlags.copy() self.behFlagsIndex = doc.behFlagsIndex self.behCounts = doc.behCounts.copy() self.ident = doc.ident else: # Index of the cluster its assigned to, initialised to -1 to indicate it is not currently assigned... self.cluster = -1 # Definition of the documents DP, initialised to be empty, which contains instances of cluster instances. The use array is, as typical, indexed by instance in the first dimension and {0,1,2} in the second, where 0 gives the behaviour, 1 the index of the cluster instance it is instancing and 2 gives the number of users, which at this level will be the number of words. conc provides a sample of the concentration value for the DP... self.use = numpy.empty((0,3), dtype=numpy.int32) self.conc = alphaConc.conc # Contains the documents samples - a 2D array where the first dimension indexes each sample. There are then two columns - the first contains the instance index, which indexes the use array, and the second the word index, which indexes the multinomial assigned to each topic. We default to -1 in the instance index column to indicate that it is unassigned... self.samples = numpy.empty((doc.getSampleCount(),2), dtype=numpy.int32) self.samples[:,0] = -1 si = 0 for word, count in map(lambda i: doc.getWord(i), xrange(doc.getWordCount())): for _ in xrange(count): self.samples[si,1] = word si += 1 assert(si==doc.getSampleCount()) # Create the documents behaviour flags, which are an array of {0,1} values indicating if the given behaviour is in the document or not - entry 0 is reserved for normal behaviour, whilst the rest are reserved for abnormalities... self.behFlags = numpy.zeros(1+len(abnormDict), dtype=numpy.uint8) self.behFlags[0] = 1 # Normal behaviour always exists. for abnorm in doc.getAbnorms(): self.behFlags[abnormDict[abnorm]] = 1 # Index associated with the above behFlags - set on initialisation to match the corpus's FlagIndexArray... self.behFlagsIndex = -1 # We also need the behaviour counts - how many samples have been assigned to each behaviour... self.behCounts = numpy.zeros(1+len(abnormDict), dtype=numpy.int32) self.ident = doc.getIdent() class State: """State object, as manipulated by a Gibbs sampler to get samples of the unknown parameters of the model.""" def __init__(self, obj, params = None): """Constructs a state object given either another State object (clone), or a Corpus and a Params object. If the Params object is omitted it uses the default. Also supports construction from a single Document, where it uses lots of defaults but is basically identical to a Corpus with a single Document in - used as a shortcut when fitting a Document to an already learnt model.""" if isinstance(obj, State): # Cloning time... self.dnrDocInsts = obj.dnrDocInsts self.dnrCluInsts = obj.dnrCluInsts self.seperateClusterConc = obj.seperateClusterConc self.seperateDocumentConc = obj.seperateDocumentConc self.oneCluster = obj.oneCluster self.calcBeta = obj.calcBeta self.calcCluBmn = obj.calcCluBmn self.calcPhi = obj.calcPhi self.resampleConcs = obj.resampleConcs self.behSamples = obj.behSamples self.alpha = PriorConcDP(obj.alpha) self.beta = obj.beta.copy() self.gamma = PriorConcDP(obj.gamma) self.rho = PriorConcDP(obj.rho) self.mu = PriorConcDP(obj.mu) self.phi = obj.phi.copy() self.topicWord = obj.topicWord.copy() self.topicUse = obj.topicUse.copy() self.topicConc = obj.topicConc self.abnormTopicWord = obj.abnormTopicWord.copy() self.cluster = map(lambda t: (t[0].copy(),t[1],t[2].copy()),obj.cluster) self.clusterUse = obj.clusterUse.copy() self.clusterConc = obj.clusterConc self.doc = map(lambda d: DocState(d), obj.doc) self.abnorms = dict(obj.abnorms) self.fia = FlagIndexArray(obj.fia) self.params = Params(obj.params) self.model = Model(obj.model) elif isinstance(obj, Document): # Construct from a single document... self.dnrDocInsts = False self.dnrCluInsts = False self.seperateClusterConc = False self.seperateDocumentConc = False self.oneCluster = False self.calcBeta = False self.calcCluBmn = False self.calcPhi = False self.resampleConcs = False self.behSamples = 1024 wordCount = obj.getWord(obj.getWordCount()-1)[0] self.alpha = PriorConcDP() self.beta = numpy.ones(wordCount, dtype=numpy.float32) self.gamma = PriorConcDP() self.rho = PriorConcDP() self.mu = PriorConcDP() self.phi = numpy.ones(1+len(obj.getAbnorms()), dtype=numpy.float32) self.phi[0] *= 10.0 self.phi /= self.phi.sum() self.topicWord = numpy.zeros((0,wordCount), dtype=numpy.int32) self.topicUse = numpy.zeros(0,dtype=numpy.int32) self.topicConc = self.gamma.conc self.abnormTopicWord = numpy.zeros((1+len(obj.getAbnorms()), wordCount), dtype=numpy.int32) self.cluster = [] self.clusterUse = numpy.zeros(0,dtype=numpy.int32) self.clusterConc = self.mu.conc abnormDict = dict() for i, abnorm in enumerate(obj.getAbnorms()): abnormDict[abnorm] = i+1 self.doc = [DocState(obj,self.alpha,abnormDict)] self.abnorms = dict() for num, abnorm in enumerate(obj.getAbnorms()): self.abnorms[abnorm] = num+1 self.fia = FlagIndexArray(len(self.abnorms)+1) self.fia.addSingles() for doc in self.doc: doc.behFlagsIndex = self.fia.flagIndex(doc.behFlags) if params!=None: self.params = params else: self.params = Params() self.model = Model() else: # Construct from a corpus, as that is the only remaining option... # Behaviour flags... self.dnrDocInsts = obj.getDocInstsDNR() self.dnrCluInsts = obj.getCluInstsDNR() self.seperateClusterConc = obj.getSeperateClusterConc() self.seperateDocumentConc = obj.getSeperateDocumentConc() self.oneCluster = obj.getOneCluster() self.calcBeta = obj.getCalcBeta() self.calcCluBmn = obj.getCalcClusterBMN() self.calcPhi = obj.getCalcPhi() self.resampleConcs = obj.getResampleConcs() self.behSamples = obj.getBehSamples() # Concentration parameters - these are all constant... self.alpha = PriorConcDP(obj.getAlpha()) self.beta = numpy.ones(obj.getWordCount(),dtype=numpy.float32) self.beta *= obj.getBeta() self.gamma = PriorConcDP(obj.getGamma()) self.rho = PriorConcDP(obj.getRho()) self.mu = PriorConcDP(obj.getMu()) self.phi = numpy.ones(1+len(obj.getAbnormDict()), dtype=numpy.float32) self.phi[0] *= obj.getPhiRatio() self.phi *= obj.getPhiConc()*self.phi.shape[0] / self.phi.sum() # The topics in the model - consists of three parts - first an array indexed by [topic,word] which gives how many times each word has been drawn from the given topic - this alongside beta allows the relevant Dirichlet posterior to be determined. Additionally we have topicUse, which counts how many times each topic has been instanced in a cluster - this alongside topicConc, which is the sampled concentration, defines the DP from which topics are drawn for inclusion in clusters... self.topicWord = numpy.zeros((0,obj.getWordCount()),dtype=numpy.int32) self.topicUse = numpy.zeros(0,dtype=numpy.int32) self.topicConc = self.gamma.conc # A second topicWord-style matrix, indexed by behaviour and containing the abnormal topics. Entry 0, which is normal, is again an empty dummy... self.abnormTopicWord = numpy.zeros((1+len(obj.getAbnormDict()), obj.getWordCount()), dtype=numpy.int32) # Defines the clusters, as a list of (inst, conc, bmn, bmnPrior). inst is a 2D array, containing all the topic instances that make up the cluster - whilst the first dimension of the array indexes each instance the second has two entrys only, the first the index number for the topic, the second the number of using document instances. conc is the sampled concentration that completes the definition of the DP defined for each cluster. bmn is the multinomial on behaviours associated with the cluster - a 1D array of floats. bmnPrior is the flagSet aligned integer array that is the prior on bmn. Additionally we have the DDP from which the specific clusters are drawn - this is defined by clusterUse and clusterConc, just as for the topics... self.cluster = [] self.clusterUse = numpy.zeros(0, dtype=numpy.int32) self.clusterConc = self.mu.conc # List of document objects, to contain the documents - whilst declared immediatly below as an empty list we then proceed to fill it in with the information from the given Corpus... self.doc = [] for doc in obj.documentList(): self.doc.append(DocState(doc, self.alpha, obj.getAbnormDict())) # The abnormality dictionary - need a copy so we can convert from flags to the user provided codes after fitting the model... self.abnorms = dict(obj.getAbnormDict()) # The flag index array - converts each flag combination to an index - required for learning the per-cluster behaviour multinomials... self.fia = FlagIndexArray(len(self.abnorms)+1) self.fia.addSingles() for doc in self.doc: doc.behFlagsIndex = self.fia.flagIndex(doc.behFlags) # Store the parameters... if params!=None: self.params = params else: self.params = Params() # Create a model object, for storing samples into... self.model = Model() def setGlobalParams(self, sample): """Sets a number of parameters for the State after initialisation, taking them from the given Sample object. Designed for use with the addPrior method this allows you to extract all relevant parameters from a Sample. Must be called before any Gibbs sampling takes place.""" self.alpha = PriorConcDP(sample.alpha) self.beta = sample.beta.copy() self.gamma = PriorConcDP(sample.gamma) self.rho = PriorConcDP(sample.rho) self.mu = PriorConcDP(sample.mu) # No correct way of combining - the below seems reasonable enough however, and is correct if they have the same entrys... for key,fromIndex in sample.abnorms.iteritems(): if key in self.abnorms: toIndex = self.abnorms[key] self.phi[toIndex] = sample.phi[fromIndex] self.phi /= self.phi.sum() self.topicConc = sample.topicConc self.clusterConc = sample.clusterConc for doc in self.doc: doc.conc = self.alpha.conc def addPrior(self, sample): """Given a Sample object this uses it as a prior - this is primarilly used to sample a single or small number of documents using a model already trainned on another set of documents. It basically works by adding the topics, clusters and behaviours from the sample into this corpus, with the counts all intact so they have the relevant weight and can't be deleted. Note that you could in principle add multiple priors, though that would be quite a strange scenario. If only called once then the topic indices will line up. Note that all the prior parameters are not transfered, though often you would want to - setGlobalParams is provided to do this. Must be called before any Gibbs sampling takes place.""" # Below code has evolved into spagetti, via several other tasty culinary dishes, and needs a rewrite. Or to never be looked at or edited ever again. ################### # Do the topics... offset = self.topicWord.shape[0] if self.topicWord.shape[0]!=0: self.topicWord = numpy.vstack((self.topicWord,sample.topicWord)) else: self.topicWord = sample.topicWord.copy() self.topicUse = numpy.hstack((self.topicUse,sample.topicUse)) # Calculate the new abnormalities dictionary... newAbnorms = dict(sample.abnorms) for key,_ in self.abnorms.iteritems(): if key not in newAbnorms: val = len(newAbnorms)+1 newAbnorms[key] = val # Transfer over the abnormal word counts... newAbnormTopicWord = numpy.zeros((1+len(newAbnorms), max((self.abnormTopicWord.shape[1], sample.abnormTopicWord.shape[1]))), dtype=numpy.int32) for abnorm,origin in self.abnorms.iteritems(): dest = newAbnorms[abnorm] limit = self.abnormTopicWord.shape[1] newAbnormTopicWord[dest,:limit] += self.abnormTopicWord[origin,:limit] for abnorm,origin in sample.abnorms.iteritems(): dest = newAbnorms[abnorm] limit = sample.abnormTopicWord.shape[1] newAbnormTopicWord[dest,:limit] += sample.abnormTopicWord[origin,:limit] # Update the document flags/counts for behaviours... for doc in self.doc: newFlags = numpy.zeros(1+len(newAbnorms), dtype=numpy.uint8) newCounts = numpy.zeros(1+len(newAbnorms), dtype=numpy.int32) newFlags[0] = doc.behFlags[0] newCounts[0] = doc.behCounts[0] for abnorm,origin in self.abnorms.iteritems(): dest = newAbnorms[abnorm] newFlags[dest] = doc.behFlags[origin] newCounts[dest] = doc.behCounts[origin] doc.behFlags = newFlags doc.behCounts = newCounts # Update the old clusters behaviour arrays... def mapOldCluster(c): c2 = numpy.ones(1+len(newAbnorms), dtype=numpy.float32) c2 /= c2.sum() c2[0] *= c[2][0] for abnorm,origin in self.abnorms.iteritems(): dest = newAbnorms[abnorm] c2[dest] *= c[2][origin] c2 /= c2.sum() return (c[0],c[1],c2,c[3]) self.cluster = map(mapOldCluster ,self.cluster) origCluCount = len(self.cluster) # Add the new clusters, updating their behaviour arrays and topic indices, plus getting their priors updated with their associated documents... def mapCluster(pair): ci, c = pair c0 = c[0].copy() c0[:,0] += offset c2 = numpy.ones(1+len(newAbnorms), dtype=numpy.float32) c2 /= c2.sum() c2[0] *= c[2][0] for abnorm,origin in sample.abnorms.iteritems(): dest = newAbnorms[abnorm] c2[dest] *= c[2][origin] c2 /= c2.sum() c3 = c[3].copy() for doc in filter(lambda doc: doc.cluster==ci, sample.doc): fi = sample.fia.flagIndex(doc.behFlags, False) if fi>=len(doc.behFlags): # Only bother if the document has abnormalities, of which this is a valid test. total = 0 for i in xrange(doc.dp.shape[0]): c3[doc.dp[i,0]] += doc.dp[i,2] total += doc.dp[i,2] c3[fi] -= total + 1 return (c0,c[1],c2,c3) self.cluster += map(mapCluster, enumerate(sample.cluster)) self.clusterUse = numpy.hstack((self.clusterUse, sample.clusterUse)) # Update phi... newPhi = numpy.ones(len(newAbnorms)+1,dtype=numpy.float32) newPhi[0] = 0.5*(self.phi[0]+sample.phi[0]) for abnorm,origin in self.abnorms.iteritems(): dest = newAbnorms[abnorm] newPhi[dest] = self.phi[origin] for abnorm,origin in sample.abnorms.iteritems(): dest = newAbnorms[abnorm] if abnorm not in self.abnorms: newPhi[dest] = sample.phi[origin] else: newPhi[dest] = 0.5*(newPhi[dest] + sample.phi[origin]) self.phi = newPhi self.phi /= self.phi.sum() # Recreate the flag index array... remapOrig = dict() # Old flag positions to new flag positions. remapOrig[0] = 0 for abnorm,origin in self.abnorms.iteritems(): remapOrig[origin] = newAbnorms[abnorm] remapSam = dict() # sample flag positions to new flag positions. remapSam[0] = 0 for abnorm,origin in sample.abnorms.iteritems(): remapSam[origin] = newAbnorms[abnorm] newFia = FlagIndexArray(len(newAbnorms)+1) newFia.addSingles() behIndAdjOrig = newFia.addFlagIndexArray(self.fia,remapOrig) behIndAdjSam = newFia.addFlagIndexArray(sample.fia,remapSam) for doc in self.doc: doc.behFlagsIndex = behIndAdjOrig[doc.behFlagsIndex] # Update cluster priors on bmn arrays... for c in xrange(len(self.cluster)): clu = self.cluster[c] newBmn = numpy.zeros(newFia.flagCount(),dtype=numpy.int32) oldBmn = clu[3].copy() # Transilate from old set... for b in xrange(oldBmn.shape[0]): index = behIndAdjOrig[b] if c<origCluCount else behIndAdjSam[b] newBmn[index] += oldBmn[b] self.cluster[c] = (clu[0], clu[1], clu[2], newBmn) # Replace the old abnormality and fia stuff... self.abnormTopicWord = newAbnormTopicWord self.abnorms = newAbnorms self.fia = newFia def sample(self): """Samples the current state, storing the current estimate of the model parameters.""" self.model.sampleState(self) def absorbClone(self,clone): """Given a clone absorb all its samples - used for multiprocessing.""" self.model.absorbModel(clone.model) def getParams(self): """Returns the parameters object.""" return self.params def getModel(self): """Returns the model constructed from all the calls to sample().""" return self.model # Includes at tail of file to resolve circular dependencies... from document import Document from model import Model
Python
# Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from scipy import weave import unittest from utils.start_cpp import start_cpp from ds_cpp import ds_code # Provides code for converting from the python to the C++ data structure, and back again - this is so the data can be stored in a suitable form in both situations, though it comes at the expense of a complex conversion... ds_link_code = ds_code + start_cpp() + """ // Helper for extracting a boolean from a Python object... bool GetObjectBool(PyObject * obj, const char * name) { PyObject * boolObj = PyObject_GetAttrString(obj, name); bool ret = boolObj==Py_True; Py_DECREF(boolObj); return ret; } // Helper converter for the Conc class and its python equivalent, PriorConcDP - just need to go one way. Given an object and the name of the variable in the object that is the PriorConcDP object... void ConcPyToCpp(PyObject * obj, const char * name, Conc & out) { PyObject * pyConc = PyObject_GetAttrString(obj,name); PyObject * alpha = PyObject_GetAttrString(pyConc,"_PriorConcDP__alpha"); out.alpha = PyFloat_AsDouble(alpha); Py_DECREF(alpha); PyObject * beta = PyObject_GetAttrString(pyConc,"_PriorConcDP__beta"); out.beta = PyFloat_AsDouble(beta); Py_DECREF(beta); PyObject * conc = PyObject_GetAttrString(pyConc,"_PriorConcDP__conc"); out.conc = PyFloat_AsDouble(conc); Py_DECREF(conc); Py_DECREF(pyConc); } // Python -> C++ - given pointers to the State class and a State object... // (The State object should be empty when passed.) void StatePyToCpp(PyObject * from, State * to) { // Extract the flags... to->dnrDocInsts = GetObjectBool(from,"dnrDocInsts"); to->dnrCluInsts = GetObjectBool(from,"dnrCluInsts"); to->seperateClusterConc = GetObjectBool(from,"seperateClusterConc"); to->seperateDocumentConc = GetObjectBool(from,"seperateDocumentConc"); to->oneCluster = GetObjectBool(from,"oneCluster"); to->calcBeta = GetObjectBool(from,"calcBeta"); to->calcCluBmn = GetObjectBool(from,"calcCluBmn"); to->calcPhi = GetObjectBool(from,"calcPhi"); to->resampleConcs = GetObjectBool(from,"resampleConcs"); to->behSamples = GetObjectInt(from,"behSamples"); // Extract all the parameters, though only rho and beta get stored in the state - others get used later when filling out other structures... Conc alpha; ConcPyToCpp(from,"alpha",alpha); PyArrayObject * beta = (PyArrayObject*)PyObject_GetAttrString(from,"beta"); to->beta = new float[beta->dimensions[0]]; to->betaSum = 0.0; for (int i=0;i<beta->dimensions[0];i++) { to->beta[i] = Float1D(beta,i); to->betaSum += to->beta[i]; } Py_DECREF(beta); Conc gamma; ConcPyToCpp(from,"gamma",gamma); ConcPyToCpp(from,"rho",to->rho); Conc mu; ConcPyToCpp(from,"mu",mu); PyArrayObject * phi = (PyArrayObject*)PyObject_GetAttrString(from,"phi"); to->phi = new float[phi->dimensions[0]]; for (int i=0;i<phi->dimensions[0];i++) { to->phi[i] = Float1D(phi,i); } Py_DECREF(phi); // Number of behaviours... { PyObject * abnorms = PyObject_GetAttrString(from,"abnorms"); to->behCount = 1 + PyDict_Size(abnorms); Py_DECREF(abnorms); } // Store the flag set matrix (Involves calling a python function.)... { PyObject * fia = PyObject_GetAttrString(from,"fia"); PyObject * func = PyObject_GetAttrString(fia,"getFlagMatrix"); to->flagSets = (PyArrayObject*)PyObject_CallObject(func, 0); Py_DECREF(func); Py_DECREF(fia); } // Create the topic objects... PyArrayObject * topicWord = (PyArrayObject*)PyObject_GetAttrString(from,"topicWord"); PyArrayObject * topicUse = (PyArrayObject*)PyObject_GetAttrString(from,"topicUse"); int topicCount = topicWord->dimensions[0]; int wordCount = topicWord->dimensions[1]; to->wordCount = wordCount; ItemRef<Topic,Conc> ** topicArray = new ItemRef<Topic,Conc>*[topicCount]; for (int t=0;t<topicCount;t++) { ItemRef<Topic,Conc> * topic = to->topics.Append(); topicArray[t] = topic; topic->id = t; topic->wc = new int[wordCount]; topic->wcTotal = 0; for (int w=0;w<wordCount;w++) { int val = Int2D(topicWord,t,w); topic->wc[w] = val; topic->wcTotal += val; } topic->beh = 0; topic->IncRef(Int1D(topicUse,t)); } Py_DECREF(topicUse); Py_DECREF(topicWord); PyObject * topicConc = PyObject_GetAttrString(from,"topicConc"); to->topics.Body().alpha = gamma.alpha; to->topics.Body().beta = gamma.beta; to->topics.Body().conc = PyFloat_AsDouble(topicConc); Py_DECREF(topicConc); // Do the abnormal topics... PyArrayObject * abnormTopicWord = (PyArrayObject*)PyObject_GetAttrString(from, "abnormTopicWord"); ItemRef<ClusterInst,Conc> ** abArray = new ItemRef<ClusterInst,Conc>*[to->behCount]; for (int b=0;b<to->behCount;b++) { ItemRef<Topic,Conc> * topic = to->behTopics.Append(); ItemRef<ClusterInst,Conc> * cluInst = to->behCluInsts.Append(); abArray[b] = cluInst; topic->IncRef(); cluInst->IncRef(); cluInst->SetTopic(topic,false); topic->id = -1; topic->wc = new int[wordCount]; topic->wcTotal = 0; for (int w=0;w<wordCount;w++) { int val = Int2D(abnormTopicWord,b,w); topic->wc[w] = val; topic->wcTotal += val; } topic->beh = b; cluInst->id = -1; } Py_DECREF(abnormTopicWord); // Now create the clusters... PyObject * cluster = PyObject_GetAttrString(from,"cluster"); PyArrayObject * clusterUse = (PyArrayObject*)PyObject_GetAttrString(from,"clusterUse"); int clusterCount = PyList_Size(cluster); ItemRef<Cluster,Conc> ** clusterArray = new ItemRef<Cluster,Conc>*[clusterCount]; ItemRef<ClusterInst,Conc> *** clusterInstArray = new ItemRef<ClusterInst,Conc>**[clusterCount]; for (int c=0;c<clusterCount;c++) { PyObject * cluEntry = PyList_GetItem(cluster,c); PyArrayObject * cluInst = (PyArrayObject*)PyTuple_GetItem(cluEntry,0); PyObject * cluConc = PyTuple_GetItem(cluEntry,1); PyArrayObject * cluBMN = (PyArrayObject*)PyTuple_GetItem(cluEntry,2); PyArrayObject * cluPriorBMN = (PyArrayObject*)PyTuple_GetItem(cluEntry,3); // Create the cluster instance... ItemRef<Cluster,Conc> * clu = to->clusters.Append(); clu->id = c; clusterArray[c] = clu; clu->IncRef(Int1D(clusterUse,c)); // Create the clusters topic instances, including filling in the counts... clusterInstArray[c] = new ItemRef<ClusterInst,Conc>*[cluInst->dimensions[0]]; for (int ci=0;ci<cluInst->dimensions[0];ci++) { ItemRef<ClusterInst,Conc> * nci = clu->Append(); nci->id = ci; clusterInstArray[c][ci] = nci; int topic = Int2D(cluInst,ci,0); int users = Int2D(cluInst,ci,1); if (topic!=-1) nci->SetTopic(topicArray[topic],false); nci->IncRef(users); } // Fill in the clusters concentration stuff... clu->Body().alpha = to->rho.alpha; clu->Body().beta = to->rho.beta; clu->Body().conc = PyFloat_AsDouble(cluConc); // Do the multinomial... float * bmn = new float[to->behCount]; for (int b=0;b<to->behCount;b++) { bmn[b] = Float1D(cluBMN, b); } clu->SetBMN(bmn); // Do the prior on bmn... int * bmnPrior = new int[to->flagSets->dimensions[0]]; for (int fs=0;fs<to->flagSets->dimensions[0];fs++) { bmnPrior[fs] = Int1D(cluPriorBMN, fs); } clu->SetBehCountPrior(bmnPrior); } Py_DECREF(clusterUse); Py_DECREF(cluster); PyObject * clusterConc = PyObject_GetAttrString(from,"clusterConc"); to->clusters.Body().alpha = mu.alpha; to->clusters.Body().beta = mu.beta; to->clusters.Body().conc = PyFloat_AsDouble(clusterConc); Py_DECREF(clusterConc); // Finally, create the documents... PyObject * docList = PyObject_GetAttrString(from,"doc"); to->docCount = PyList_Size(docList); delete[] to->doc; to->doc = new Document[to->docCount]; for (int d=0;d<to->docCount;d++) { // Get the relevant entities... PyObject * fromDoc = PyList_GetItem(docList,d); Document & toDoc = to->doc[d]; // Setup the link to the cluster... PyObject * clusterIndex = PyObject_GetAttrString(fromDoc,"cluster"); int cluIndex = PyInt_AsLong(clusterIndex); Py_DECREF(clusterIndex); if (cluIndex!=-1) toDoc.SetCluster(clusterArray[cluIndex],false); // Prep the documents DP... PyArrayObject * use = (PyArrayObject*)PyObject_GetAttrString(fromDoc,"use"); ItemRef<DocInst,Conc> ** docInstArray = new ItemRef<DocInst,Conc>*[use->dimensions[0]]; for (int di=0;di<use->dimensions[0];di++) { ItemRef<DocInst,Conc> * docInst = toDoc.Append(); docInst->id = di; docInstArray[di] = docInst; int ciBeh = Int2D(use,di,0); int ciIndex = Int2D(use,di,1); int ciUse = Int2D(use,di,2); if (ciBeh!=-1) { if (ciBeh==0) { docInst->SetClusterInst(clusterInstArray[cluIndex][ciIndex],false); } else { docInst->SetClusterInst(abArray[ciBeh]); } } docInst->IncRef(ciUse); } Py_DECREF(use); PyObject * docConc = PyObject_GetAttrString(fromDoc,"conc"); toDoc.Body().alpha = alpha.alpha; toDoc.Body().beta = alpha.beta; toDoc.Body().conc = PyFloat_AsDouble(docConc); Py_DECREF(docConc); // Store the samples... PyArrayObject * samples = (PyArrayObject*)PyObject_GetAttrString(fromDoc,"samples"); Sample * sArray = new Sample[samples->dimensions[0]]; for (int s=0;s<samples->dimensions[0];s++) { int di = Int2D(samples,s,0); if (di!=-1) sArray[s].SetDocInst(docInstArray[di],false); sArray[s].SetWord(Int2D(samples,s,1)); } toDoc.SetSamples(samples->dimensions[0],sArray); Py_DECREF(samples); // Do the abnormality vectors... PyArrayObject * behFlags = (PyArrayObject*)PyObject_GetAttrString(fromDoc,"behFlags"); PyArrayObject * behCounts = (PyArrayObject*)PyObject_GetAttrString(fromDoc,"behCounts"); unsigned char * bFlags = new unsigned char[behFlags->dimensions[0]]; int * bCounts = new int[behCounts->dimensions[0]]; for (int b=0;b<behFlags->dimensions[0];b++) { bFlags[b] = Byte1D(behFlags,b); bCounts[b] = Int1D(behCounts,b); } toDoc.SetBehFlags(bFlags); toDoc.SetFlagIndex(GetObjectInt(fromDoc,"behFlagsIndex")); toDoc.SetBehCounts(bCounts); Py_DECREF(behCounts); Py_DECREF(behFlags); // Clean up... delete[] docInstArray; } Py_DECREF(docList); // Some temporary storage... to->tempWord = new int[to->wordCount]; // Clean up... for (int c=0;c<clusterCount;c++) delete[] clusterInstArray[c]; delete[] clusterInstArray; delete[] clusterArray; delete[] abArray; delete[] topicArray; } // C++ -> Python - given pointers to the State class and a State object... // Note that this assumes that the State object was created from the PyObject in the first place - if not it will almost certainly break. void StateCppToPy(State * from, PyObject * to) { // Update the initial values of alpha, gamma, rho and mu to the current values... { float alpha = from->doc[0].Body().conc; float gamma = from->topics.Body().conc; float rho = from->rho.conc; float mu = from->clusters.Body().conc; PyObject * pyAlpha = PyFloat_FromDouble(alpha); PyObject * pyGamma = PyFloat_FromDouble(gamma); PyObject * pyRho = PyFloat_FromDouble(rho); PyObject * pyMu = PyFloat_FromDouble(mu); PyObject * alphaStore = PyObject_GetAttrString(to, "alpha"); PyObject * gammaStore = PyObject_GetAttrString(to, "gamma"); PyObject * rhoStore = PyObject_GetAttrString(to, "rho"); PyObject * muStore = PyObject_GetAttrString(to, "mu"); PyObject_SetAttrString(alphaStore, "conc", pyAlpha); PyObject_SetAttrString(gammaStore, "conc", pyGamma); PyObject_SetAttrString(rhoStore, "conc", pyRho); PyObject_SetAttrString(muStore, "conc", pyMu); Py_DECREF(pyAlpha); Py_DECREF(pyGamma); Py_DECREF(pyRho); Py_DECREF(pyMu); } // Extract beta - it could of been updated... npy_intp size[2]; size[0] = from->wordCount; PyArrayObject * beta = (PyArrayObject*)PyArray_SimpleNew(1,size,NPY_FLOAT); for (int i=0;i<from->wordCount;i++) { Float1D(beta,i) = from->beta[i]; } PyObject_SetAttrString(to,"beta",(PyObject*)beta); Py_DECREF(beta); // Extract phi - same as for beta... size[0] = from->behCount; PyArrayObject * phi = (PyArrayObject*)PyArray_SimpleNew(1,size,NPY_FLOAT); for (int i=0;i<from->behCount;i++) { Float1D(phi,i) = from->phi[i]; } PyObject_SetAttrString(to,"phi",(PyObject*)phi); Py_DECREF(phi); // Update the topics information - replace current... size[0] = from->topics.Size(); size[1] = from->wordCount; PyArrayObject * topicWord = (PyArrayObject*)PyArray_SimpleNew(2,size,NPY_INT); PyArrayObject * topicUse = (PyArrayObject*)PyArray_SimpleNew(1,size,NPY_INT); { ItemRef<Topic,Conc> * targ = from->topics.First(); for (int t=0;t<topicWord->dimensions[0];t++) { targ->id = t; for (int w=0;w<topicWord->dimensions[1];w++) { Int2D(topicWord,t,w) = targ->wc[w]; } Int1D(topicUse,t) = targ->RefCount(); targ = targ->Next(); } } PyObject_SetAttrString(to,"topicUse",(PyObject*)topicUse); PyObject_SetAttrString(to,"topicWord",(PyObject*)topicWord); Py_DECREF(topicUse); Py_DECREF(topicWord); PyObject * topicConc = PyFloat_FromDouble(from->topics.Body().conc); PyObject_SetAttrString(to,"topicConc",topicConc); Py_DECREF(topicConc); // Update the abnormal topic information - treat as an update... PyArrayObject * abnormTopicWord = (PyArrayObject*)PyObject_GetAttrString(to, "abnormTopicWord"); { ItemRef<Topic,Conc> * topic = from->behTopics.First(); while (topic->Valid()) { for (int w=0;w<abnormTopicWord->dimensions[1];w++) { Int2D(abnormTopicWord,topic->beh,w) = topic->wc[w]; } topic = topic->Next(); } } Py_DECREF(abnormTopicWord); // Update the clusters information - replace current... size[0] = from->clusters.Size(); PyObject * cluster = PyList_New(size[0]); PyArrayObject * clusterUse = (PyArrayObject*)PyArray_SimpleNew(1,size,NPY_INT); { ItemRef<Cluster,Conc> * clu = from->clusters.First(); for (int c=0;c<from->clusters.Size();c++) { clu->id = c; PyObject * tup = PyTuple_New(4); PyList_SetItem(cluster,c,tup); size[0] = clu->Size(); size[1] = 2; PyArrayObject * clusterInstance = (PyArrayObject*)PyArray_SimpleNew(2,size,NPY_INT); size[0] = from->behCount; PyArrayObject * behMultinomial = (PyArrayObject*)PyArray_SimpleNew(1,size,NPY_FLOAT); size[0] = from->flagSets->dimensions[0]; PyArrayObject * behPriorMulti = (PyArrayObject*)PyArray_SimpleNew(1,size,NPY_INT); PyTuple_SetItem(tup, 0, (PyObject*)clusterInstance); PyTuple_SetItem(tup, 1, PyFloat_FromDouble(clu->Body().conc)); PyTuple_SetItem(tup, 2, (PyObject*)behMultinomial); PyTuple_SetItem(tup, 3, (PyObject*)behPriorMulti); ItemRef<ClusterInst,Conc> * cluInst = clu->First(); for (int ci=0;ci<clu->Size();ci++) { cluInst->id = ci; if (cluInst->GetTopic()) Int2D(clusterInstance,ci,0) = cluInst->GetTopic()->id; else Int2D(clusterInstance,ci,0) = -1; Int2D(clusterInstance,ci,1) = cluInst->RefCount(); cluInst = cluInst->Next(); } for (int b=0;b<from->behCount;b++) { Float1D(behMultinomial,b) = clu->GetBMN()[b]; } if (clu->GetBehCountPrior()) // Easier to introduce it here - lets the rest of the code be null pointer safe as I made it that due to incrimental implimentation anyway. { for (int fs=0;fs<from->flagSets->dimensions[0];fs++) { Int1D(behPriorMulti,fs) = clu->GetBehCountPrior()[fs]; } } else { for (int fs=0;fs<from->flagSets->dimensions[0];fs++) { Int1D(behPriorMulti,fs) = 0; } } Int1D(clusterUse,c) = clu->RefCount(); clu = clu->Next(); } } PyObject_SetAttrString(to,"clusterUse",(PyObject*)clusterUse); PyObject_SetAttrString(to,"cluster",cluster); Py_DECREF(clusterUse); Py_DECREF(cluster); PyObject * clusterConc = PyFloat_FromDouble(from->clusters.Body().conc); PyObject_SetAttrString(to,"clusterConc",clusterConc); Py_DECREF(clusterConc); // Update the documents information - keep it simple by just overwriting cluster and sample assignments whilst replacing the per-document DP... PyObject * docList = PyObject_GetAttrString(to,"doc"); for (int d=0;d<from->docCount;d++) { Document & fromDoc = from->doc[d]; PyObject * toDoc = PyList_GetItem(docList,d); // Set cluster... int clusterID = -1; if (fromDoc.GetCluster()) clusterID = fromDoc.GetCluster()->id; PyObject * cluID = PyInt_FromLong(clusterID); PyObject_SetAttrString(toDoc,"cluster",cluID); Py_DECREF(cluID); // Replace DP... size[0] = fromDoc.Size(); size[1] = 3; PyArrayObject * use = (PyArrayObject*)PyArray_SimpleNew(2,size,NPY_INT); ItemRef<DocInst,Conc> * docInst = fromDoc.First(); for (int di=0;di<size[0];di++) { docInst->id = di; if (docInst->GetClusterInst()) { Int2D(use,di,0) = docInst->GetClusterInst()->GetTopic()->beh; Int2D(use,di,1) = docInst->GetClusterInst()->id; } else { Int2D(use,di,0) = -1; Int2D(use,di,1) = -1; } Int2D(use,di,2) = docInst->RefCount(); docInst = docInst->Next(); } PyObject_SetAttrString(toDoc,"use",(PyObject*)use); Py_DECREF(use); PyObject * conc = PyFloat_FromDouble(fromDoc.Body().conc); PyObject_SetAttrString(toDoc,"conc",conc); Py_DECREF(conc); // Update samples DP assignments... { PyArrayObject * samples = (PyArrayObject*)PyObject_GetAttrString(toDoc,"samples"); for (int s=0;s<fromDoc.SampleCount();s++) { Sample & sam = fromDoc.GetSample(s); if (sam.GetDocInst()) Int2D(samples,s,0) = sam.GetDocInst()->id; else Int2D(samples,s,0) = -1; } Py_DECREF(samples); } // Update behaviour counts... { PyArrayObject * behCounts = (PyArrayObject*)PyObject_GetAttrString(toDoc,"behCounts"); for (int b=0;b<from->behCount;b++) { Int1D(behCounts,b) = fromDoc.GetBehCounts()[b]; } Py_DECREF(behCounts); } } Py_DECREF(docList); } """ class TestDSLink(unittest.TestCase): """Test code for the data structure.""" def test_compile(self): code = start_cpp(dual_hdp_ds_link) + """ """ weave.inline(code, support_code=dual_hdp_ds_link) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
#! /usr/bin/env python # Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import random import numpy import numpy.random import numpy.random.mtrand from smp import FlagIndexArray, SMP # Parameters... size = 4 n_to_keep = 2 draws = 30000 # Number of draws in each draw taken from the multinomial. samples = 8 # Number of draws from multinomial. hold_first = True samCount = 1024 # Generate a multinomial... mn = numpy.random.mtrand.dirichlet(numpy.ones(size)) print 'Actual multinomial =' print mn # Draw some samples, with only a finite number of entrys each... sam = [] for _ in xrange(samples): pos_to_use = range(size) if hold_first: pos_to_use = pos_to_use[1:] random.shuffle(pos_to_use) pos_to_use = [0] + pos_to_use else: random.shuffle(pos_to_use) pos_to_use = pos_to_use[:n_to_keep] draw_mn = mn[numpy.array(pos_to_use)] draw_mn /= draw_mn.sum() draw = numpy.random.multinomial(draws,draw_mn) counts = numpy.zeros(size,dtype=numpy.int32) flags = numpy.zeros(size,dtype=numpy.uint8) for a,b in enumerate(pos_to_use): counts[b] = draw[a] flags[b] = 1 sam.append((counts,flags)) # Construct the flag index array, put relevant index into each sample... fia = FlagIndexArray(size) fia.addSingles() for i in xrange(len(sam)): ind = fia.flagIndex(sam[i][1]) sam[i] = (sam[i][0],sam[i][1],ind) # Construct the SMP object... smp = SMP(fia) smp.setSampleCount(samCount) for s in sam: smp.add(s[2],s[0]) # Get and print out the mean and its distance from the actual multinomial... mean = smp.mean() print 'Mean =', mean print 'error =', numpy.fabs(mn-mean).sum()/mn.shape[0] print 'cError =\n', numpy.fabs(mn-mean)
Python
# Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy from scipy import weave from utils.start_cpp import start_cpp from flag_index_array import FlagIndexArray from smp_cpp import smp_code class SMP: """Impliments a Python wrapper around the C++ code for the Sparse Multinomial Posterior. Estimates the multinomial distribution from which various samples have been drawn, where those samples are sparse, i.e. not all counts are provided.""" def __init__(self, fia): """Initialises with a FlagIndexArray object (Which must of had the addSingles method correctly called.) - this specifies the various combinations of counts being provided that are allowed.""" self.flagMat = fia.getFlagMatrix() self.power = numpy.zeros(self.flagMat.shape[0], dtype=numpy.int32) self.sampleCount = 1024 self.priorMN = numpy.ones(self.flagMat.shape[1], dtype=numpy.float32) self.priorMN /= self.flagMat.shape[1] self.priorConc = 0.0 def setSampleCount(self, count): """Sets the number of samples to use when approximating the integral.""" self.sampleCount = count def setPrior(self, conc, mn = None): """Sets the prior, as a Dirichlet distribution represented by a concentration and a multinomial distribution. Can leave out the multinomial to just update the concentration.""" if mn!=None: self.priorMN[:] = mn self.priorConc = conc def reset(self): """Causes a reset, so you may add a new set of samples.""" self.power[:] = 0 def add(self, fi, counts): """Given the flag index returned from the relevant fia and an array of counts this adds it to the smp.""" c = counts * self.flagMat[fi,:] self.power[:self.flagMat.shape[1]] += c self.power[fi] -= c.sum() + 1 def mean(self): """Returns an estimate of the mean for each value of the multinomial, as an array, given the evidence provided. (Will itself sum to one - a necesary consequence of being an average of points constrained to the simplex.""" code = start_cpp(smp_code) + """ srand48(time(0)); SMP smp(NflagMat[1],NflagMat[0]); smp.SetFIA(flagMat); smp.SetSampleCount(sampleCount); smp.SetPrior(priorMN, priorConc); smp.Add(power_array); smp.Mean(out); """ flagMat = self.flagMat sampleCount = self.sampleCount priorMN = self.priorMN priorConc = self.priorConc power = self.power out = numpy.empty(flagMat.shape[1] ,dtype=numpy.float32) weave.inline(code, ['flagMat', 'sampleCount', 'priorMN', 'priorConc', 'power', 'out'], support_code=smp_code) return out
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from utils.start_cpp import start_cpp # Code for sampling from various distributions, including some very specific situations involving Dirichlet processes... sampling_code = start_cpp() + """ #ifndef SAMPLING_CODE #define SAMPLING_CODE #include <stdlib.h> #include <math.h> const double gamma_approx = 32.0; // Threshold between the two methods of doing a gamma draw. // Returns a sample from the natural numbers [0,n)... int sample_nat(int n) { return lrand48()%n; } // Returns a sample from [0.0,1.0)... double sample_uniform() { return drand48(); //return double(random())/(double(RAND_MAX)+1.0); } // Samples from a normal distribution with a mean of 0 and a standard deviation of 1... double sample_standard_normal() { double u = 1.0-sample_uniform(); double v = 1.0-sample_uniform(); return sqrt(-2.0*log(u)) * cos(2.0*M_PI*v); } // Samples from a normal distribution with the given mean and standard deviation... double sample_normal(double mean, double sd) { return mean + sd*sample_standard_normal(); } // Samples from the Gamma distribution, base version that has no scaling parameter... /*double sample_gamma(double alpha) { // Check if the alpha value is high enough to approximate via a normal distribution... if (alpha>gamma_approx) { while (true) { double ret = sample_normal(alpha, sqrt(alpha)); if (ret<0.0) continue; return ret; } } // First do the integer part of gamma(alpha)... double ret = 0.0; // 1.0 while (alpha>=1.0) { alpha -= 1.0; //ret /= 1.0 - sample_uniform(); ret -= log(1.0-sample_uniform()); } //ret = log(ret); // Now do the remaining fractional part and sum it in - uses rejection sampling... if (alpha>1e-4) { while (true) { double u1 = 1.0 - sample_uniform(); double u2 = 1.0 - sample_uniform(); double u3 = 1.0 - sample_uniform(); double frac, point; if (u1<=(M_E/(M_E+alpha))) { frac = pow(u2,1.0/alpha); point = u3*pow(frac,alpha-1.0); } else { frac = 1.0 - log(u2); point = u3*exp(-frac); } if (point<=(pow(frac,alpha-1.0)*exp(-frac))) { ret += frac; break; } } } // Finally return... return ret; }*/ // As above, but faster... double sample_gamma(double alpha) { // Check if the alpha value is high enough to approximate via a normal distribution... if (alpha>gamma_approx) { while (true) { double ret = sample_normal(alpha, sqrt(alpha)); if (ret<0.0) continue; return ret; } } // If alpha is one, within tolerance, just use an exponential distribution... if (fabs(alpha-1.0)<1e-4) { return -log(1.0-sample_uniform()); } if (alpha>1.0) { // If alpha is 1 or greater use the Cheng/Feast method... while (true) { double u1 = sample_uniform(); double u2 = sample_uniform(); double v = ((alpha - 1.0/(6.0*alpha))*u1) / ((alpha-1.0)*u2); double lt2 = 2.0*(u2-1.0)/(alpha-1) + v + 1.0/v; if (lt2<=2.0) { return (alpha-1.0)*v; } double lt1 = 2.0*log(u2)/(alpha-1.0) - log(v) + v; if (lt1<=1.0) { return (alpha-1.0)*v; } } } else { // If alpha is less than 1 use a rejection sampling method... while (true) { double u1 = 1.0 - sample_uniform(); double u2 = 1.0 - sample_uniform(); double u3 = 1.0 - sample_uniform(); double frac, point; if (u1<=(M_E/(M_E+alpha))) { frac = pow(u2,1.0/alpha); point = u3*pow(frac,alpha-1.0); } else { frac = 1.0 - log(u2); point = u3*exp(-frac); } if (point<=(pow(frac,alpha-1.0)*exp(-frac))) { return frac; break; } } } } // Samples from the Gamma distribution, version that has a scaling parameter... double sample_gamma(double alpha, double beta) { return sample_gamma(alpha)/beta; } // Samples from the Beta distribution... double sample_beta(double alpha, double beta) { double g1 = sample_gamma(alpha); double g2 = sample_gamma(beta); return g1 / (g1 + g2); } #endif """
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from utils.python_obj_cpp import python_obj_code from linked_list_cpp import linked_list_gc_code from utils.gamma_cpp import gamma_code from sampling_cpp import sampling_code from conc_cpp import conc_code from dir_est_cpp import dir_est_code # Put all the suplied code together into one easy to use include... dp_utils_code = python_obj_code + linked_list_gc_code + gamma_code + sampling_code + conc_code + dir_est_code
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from utils.start_cpp import start_cpp conc_code = start_cpp() + """ // This funky little function is used to resample the concentration parameter of a Dirichlet process, using the previous parameter - allows this parameter to be Gibbs sampled. Also works for any level of a HDP, due to the limited interactions. // Parameters are: // pcp - previous concentration parameter. // n - number of samples taken from the Dirichlet process // k - number of discretly different samples, i.e. table count in the Chinese restaurant process. // prior_alpha - alpha value of the Gamma prior on the concentration parameter. // prior_beta - beta value of the Gamma prior on the concentration parameter. double sample_dirichlet_proc_conc(double pcp, double n, double k, double prior_alpha = 1.01, double prior_beta = 0.01) { if ((n<(1.0-1e-6))||(k<(2.0-1e-6))) { return pcp; // Doesn't work in this case, so just repeat. } double nn = sample_beta(pcp+1.0, n); double log_nn = log(nn); double f_alpha = prior_alpha + k; double f_beta = prior_beta - log_nn; double pi_n_mod = (f_alpha - 1.0) / (n * f_beta); double r = sample_uniform(); double r_mod = r / (1.0 - r); if (r_mod>=pi_n_mod) f_alpha -= 1.0; double ret = sample_gamma(f_alpha, f_beta); if (ret<1e-3) ret = 1e-3; return ret; } // Class to represent the concentration parameter associated with a DP - consists of the prior and the previous/current value... struct Conc { float alpha; // Parameter for Gamma prior. float beta; // " float conc; // Previously sampled concentration value - needed for next sample, and for output/use. // Resamples the concentration value, assuming only a single DP is using it. n = number of samples from DP, k = number of unique samples, i.e. respectivly RefTotal() and Size() for a ListRef. void ResampleConc(int n, int k) { conc = sample_dirichlet_proc_conc(conc, n, k, alpha, beta); if (conc<1e-3) conc = 1e-3; } }; // This class is the generalisation of the above for when multiple Dirichlet processes share a single concentration parameter - again allows a new concentration parameter to be drawn given the previous one and a Gamma prior, but takes multiple pairs of sample count/discrete sample counts, hence the class interface to allow it to accumilate the relevant information. class SampleConcDP { public: SampleConcDP():f_alpha(1.0),f_beta(1.0),prev_conc(1.0) {} ~SampleConcDP() {} // Sets the prior and resets the entire class.... void SetPrior(double alpha, double beta) { f_alpha = alpha; f_beta = beta; } // Set the previous concetration parameter - must be called before any DP stats are added... void SetPrevConc(double prev) { prev_conc = prev; } // Call once for each DP that is using the concentration parameter... // (n is the number of samples drawn, k the number of discretly different samples.) void AddDP(double n, double k) { if (k>1.0) { double s = 0.0; if (sample_uniform()>(1.0/(1.0+n/prev_conc))) s = 1.0; double w = sample_beta(prev_conc+1.0,n); f_alpha += k - s; f_beta -= log(w); } } // Once all DP have been added call this to draw a new concentration value... double Sample() { double ret = sample_gamma(f_alpha, f_beta); if (ret<1e-3) ret = 1e-3; return ret; } private: double f_alpha; double f_beta; double prev_conc; }; """
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from scipy import weave import unittest from utils.start_cpp import start_cpp # Defines code for a doubly linked list - simple but works as expected... (Includes its data via templated inheritance - a little strange, but neat and saves on memory thrashing.) linked_list_code = start_cpp() + """ // Predefinitions... template <typename ITEM, typename BODY> class Item; template <typename ITEM, typename BODY> class List; // Useful default... struct Empty {}; // Item for the linked list data structure - simply inherits extra data stuff... template <typename ITEM = Empty, typename BODY = Empty> class Item : public ITEM { public: Item(List<ITEM,BODY> * head):head(head),next(this),prev(this) {} ~Item() {} Item<ITEM,BODY> * Next() {return next;} Item<ITEM,BODY> * Prev() {return prev;} List<ITEM,BODY> * GetList() {return head;} bool Valid() {return static_cast< Item<ITEM,BODY>* >(head)!=this;} bool IsDummy() {return static_cast< Item<ITEM,BODY>* >(head)==this;} Item<ITEM,BODY> * PreNew() // Adds a new item before this one. { Item<ITEM,BODY> * ret = new Item<ITEM,BODY>(head); head->size += 1; ret->prev = this->prev; ret->next = this; ret->prev->next = ret; ret->next->prev = ret; return ret; } Item<ITEM,BODY> * PostNew() // Adds a new item after this one. { Item<ITEM,BODY> * ret = new Item<ITEM,BODY>(head); head->size += 1; ret->prev = this; ret->next = this->next; ret->prev->next = ret; ret->next->prev = ret; return ret; } void Suicide() // Removes this node from its list and makes it delete itself. { head->size -= 1; next->prev = prev; prev->next = next; delete this; } protected: List<ITEM,BODY> * head; Item<ITEM,BODY> * next; Item<ITEM,BODY> * prev; }; // Simple totally inline doubly linked list structure, where template <typename ITEM = Empty, typename BODY = Empty> class List : protected Item<ITEM,BODY> { public: List():Item<ITEM,BODY>(this),size(0) {} ~List() { while(this->size!=0) { this->next->Suicide(); } } Item<ITEM,BODY> * Append() {return this->PreNew();} Item<ITEM,BODY> * Prepend() {return this->PostNew();} Item<ITEM,BODY> * First() {return this->next;} Item<ITEM,BODY> * Last() {return this->prev;} int Size() {return this->size;} BODY & Body() {return body;} Item<ITEM,BODY> * Index(int i) { Item<ITEM,BODY> * ret = this->next; while(i>0) { ret = ret->next; i -= 1; } return ret; } protected: friend class Item<ITEM,BODY>; int size; BODY body; }; """ class TestLinkedList(unittest.TestCase): """Test code for the linked list.""" def test_compile(self): code = start_cpp(linked_list) + """ """ weave.inline(code, support_code=linked_list) def test_size(self): code = start_cpp(linked_list) + """ int errors = 0; List<> wibble; if (wibble.Size()!=0) errors += 1; Item<> * it = wibble.Append(); if (wibble.Size()!=1) errors += 1; it->Suicide(); if (wibble.Size()!=0) errors += 1; return_val = errors; """ errors = weave.inline(code, support_code=linked_list) self.assertEqual(errors,0) def test_loop(self): extra = """ struct Number { int num; }; """ code = start_cpp(linked_list_code+extra) + """ int errors = 0; List<Number> wibble; for (int i=0;i<10;i++) { Item<Number> * it = wibble.Append(); it->num = i; } if (wibble.Size()!=10) errors += 1; int i = 0; for (Item<Number> * targ = wibble.First(); targ->Valid(); targ = targ->Next()) { if (i!=targ->num) errors += 1; i += 1; } return_val = errors; """ errors = weave.inline(code, support_code=linked_list_code+extra) self.assertEqual(errors,0) # Code for a linked list with garbage collection - each entry has a reference count, and it also allows access of the reference counts and the total number of reference counts for all entrys. This structure is very useful for modelling a Dirichlet process as a direct consequence, as it has all its properties... linked_list_gc_code = linked_list_code + start_cpp() + """ // Predefinitions... template <typename ITEM, typename BODY> class ItemRef; template <typename ITEM, typename BODY> class ListRef; // Item for the linked list data structure - simply inherits extra data stuff... template <typename ITEM = Empty, typename BODY = Empty> class ItemRef : public ITEM { public: ItemRef(ListRef<ITEM,BODY> * head):head(head),next(this),prev(this),refCount(0) {} ~ItemRef() {} ItemRef<ITEM,BODY> * Next() {return next;} ItemRef<ITEM,BODY> * Prev() {return prev;} ListRef<ITEM,BODY> * GetList() {return head;} bool Valid() {return static_cast< ItemRef<ITEM,BODY>* >(head)!=this;} bool IsDummy() {return static_cast< ItemRef<ITEM,BODY>* >(head)==this;} ItemRef<ITEM,BODY> * PreNew() // Adds a new item before this one. { ItemRef<ITEM,BODY> * ret = new ItemRef<ITEM,BODY>(head); head->size += 1; ret->prev = this->prev; ret->next = this; ret->prev->next = ret; ret->next->prev = ret; return ret; } ItemRef<ITEM,BODY> * PostNew() // Adds a new item after this one. { ItemRef<ITEM,BODY> * ret = new ItemRef<ITEM,BODY>(head); head->size += 1; ret->prev = this; ret->next = this->next; ret->prev->next = ret; ret->next->prev = ret; return ret; } void Suicide() // Removes this node from its list and makes it delete itself. { head->size -= 1; head->refTotal -= refCount; next->prev = prev; prev->next = next; delete this; } void IncRef(int amount = 1) { this->refCount += amount; head->refTotal += amount; } void DecRef(int amount = 1) // If the ref count reaches zero the object will delete itself. { this->refCount -= amount; head->refTotal -= amount; if (refCount<=0) this->Suicide(); } int RefCount() {return refCount;} protected: ListRef<ITEM,BODY> * head; ItemRef<ITEM,BODY> * next; ItemRef<ITEM,BODY> * prev; int refCount; }; // Simple totally inline doubly linked list structure... template <typename ITEM = Empty, typename BODY = Empty> class ListRef : protected ItemRef<ITEM,BODY> { public: ListRef():ItemRef<ITEM,BODY>(this),size(0),refTotal(0) {} ~ListRef() { while(this->size!=0) { this->next->Suicide(); } } ItemRef<ITEM,BODY> * Append() {return this->PreNew();} ItemRef<ITEM,BODY> * Prepend() {return this->PostNew();} ItemRef<ITEM,BODY> * First() {return this->next;} ItemRef<ITEM,BODY> * Last() {return this->prev;} int Size() {return this->size;} int RefTotal() {return this->refTotal;} BODY & Body() {return body;} ItemRef<ITEM,BODY> * Index(int i) { ItemRef<ITEM,BODY> * ret = this->next; while(i>0) { ret = ret->Next(); i -= 1; } return ret; } protected: friend class ItemRef<ITEM,BODY>; int size; int refTotal; BODY body; }; """ class TestLinkedListGC(unittest.TestCase): """Test code for the linked list with garbage collection.""" def test_compile(self): code = start_cpp(linked_list_gc) + """ """ weave.inline(code, support_code=linked_list_gc) def test_size_gc(self): code = start_cpp(linked_list_gc_code) + """ int errors = 0; ListRef<> wibble; if (wibble.Size()!=0) errors += 1; ItemRef<> * it = wibble.Append(); if (wibble.Size()!=1) errors += 1; if (wibble.RefTotal()!=0) errors += 1; it->IncRef(); it->IncRef(); if (it->RefCount()!=2) errors += 1; if (wibble.RefTotal()!=2) errors += 1; it->DecRef(); it->DecRef(); if (wibble.RefTotal()!=0) errors += 1; if (wibble.Size()!=0) errors += 1; return_val = errors; """ errors = weave.inline(code, support_code=linked_list_gc_code) self.assertEqual(errors,0) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from utils.start_cpp import start_cpp # Provides code for estimating the Dirichlet distribution from which a number of multinomial distributions were drawn from, given those multinomials... dir_est_code = start_cpp() + """ // Defined as a class - you then add each multinomial before requesting a maximum likelihood update of the Dirichlet distribution. It uses Newton-Raphson iterations, and so needs a starting point - you provide a vector to be updated, which can of course save time if it is already close... class EstimateDir { public: EstimateDir(int vecSize):size(vecSize), samples(0), meanLog(new double[vecSize]), grad(new double[vecSize]), qq(new double[vecSize]) { for (int i=0;i<vecSize;i++) meanLog[i] = 0.0; } ~EstimateDir() {delete[] meanLog; delete[] grad; delete[] qq;} void Add(float * mn) { samples += 1; for (int i=0;i<size;i++) { meanLog[i] += (log(mn[i]) - meanLog[i]) / double(samples); } } void Add(double * mn) { samples += 1; for (int i=0;i<size;i++) { meanLog[i] += (log(mn[i]) - meanLog[i]) / double(samples); } } void Update(float * dir, int maxIter = 64, float epsilon = 1e-3, float cap = 1e6) { for (int iter=0;iter<maxIter;iter++) { // We will need the sum of the dir vector... double dirSum = 0.0; for (int i=0;i<size;i++) { dirSum += dir[i]; } // Check for Nan/inf - if so reset to basic value... if ((dirSum==dirSum) || (dirSum>1e100)) { for (int i=0;i<size;i++) dir[i] = 1.0; dirSum = size; } // Safety - don't let it get too precise, that probably means its being crazy (Can happen with too few samples.)... if (dirSum>cap) { float mult = cap / dirSum; for (int i=0;i<size;i++) { dir[i] *= mult; } dirSum = cap; } // Calculate the gradiant and the Hessian 'matrix', except its actually diagonal... double digDirSum = digamma(dirSum); for (int i=0;i<size;i++) { grad[i] = samples * (digDirSum - digamma(dir[i]) + meanLog[i]); qq[i] = -samples * trigamma(dir[i]); } // Calculate b... double b = 0.0; double bDiv = 1.0 / (samples*trigamma(dirSum)); for (int i=0;i<size;i++) { b += grad[i]/qq[i]; bDiv += 1.0/qq[i]; } b /= bDiv; // Do the update, sum the change... double change = 0.0; for (int i=0;i<size;i++) { double delta = (grad[i] - b) / qq[i]; dir[i] -= delta; if (dir[i]<1e-3) dir[i] = 1e-3; change += fabs(delta); } // Break if no change... if (change<epsilon) break; } } void Update(double * dir, int maxIter = 64, double epsilon = 1e-6, double cap = 1e6) { for (int iter=0;iter<maxIter;iter++) { // We will need the sum of the dir vector... double dirSum = 0.0; for (int i=0;i<size;i++) { dirSum += dir[i]; } // Check for Nan/inf - if so reset to basic value... if ((dirSum==dirSum) || (dirSum>1e100)) { for (int i=0;i<size;i++) dir[i] = 1.0; dirSum = size; } // Safety - don't let it get too precise, that probably means its being crazy (Can happen with too few samples.)... if (dirSum>cap) { float mult = cap / dirSum; for (int i=0;i<size;i++) { dir[i] *= mult; } dirSum = cap; } // Calculate the gradiant and the Hessian 'matrix', except its actually diagonal... double digDirSum = digamma(dirSum); for (int i=0;i<size;i++) { grad[i] = samples * (digDirSum - digamma(dir[i]) + meanLog[i]); qq[i] = -samples * trigamma(dir[i]); } // Calculate b... double b = 0.0; double bDiv = 1.0 / (samples*trigamma(dirSum)); for (int i=0;i<size;i++) { b += grad[i]/qq[i]; bDiv += 1.0/qq[i]; } b /= bDiv; // Do the update, sum the change... double change = 0.0; for (int i=0;i<size;i++) { double delta = (grad[i] - b) / qq[i]; dir[i] -= delta; change += fabs(delta); } // Break if no change... if (change<epsilon) break; } } private: int size; int samples; double * meanLog; // Vector of length size, contains the component-wise mean of the log of each of the samples - consititutes the sufficient statistics required to do the update. double * grad; // Temporary during update. double * qq; // Temporary during update. }; """
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Some basic matrix operations that come in use... matrix_code = start_cpp() + """ #ifndef MATRIX_CODE #define MATRIX_CODE template <typename T> inline void MemSwap(T * lhs, T * rhs, int count = 1) { while(count!=0) { T t = *lhs; *lhs = *rhs; *rhs = t; ++lhs; ++rhs; --count; } } // Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default. template <typename T> inline T Determinant(T * pos, int size, int stride = -1) { if (stride==-1) stride = size; if (size==1) return pos[0]; else { if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride]; else { T ret = 0.0; for (int i=0; i<size; i++) { if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1); T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1]; if ((i+size)%2) ret += sub; else ret -= sub; } for (int i=1; i<size; i++) { MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1); } return ret; } } } // Inverts a square matrix, will fail on singular and very occasionally on // non-singular matrices, returns true on success. Uses Gauss-Jordan elimination // with partial pivoting. // in is the input matrix, out the output matrix, just be aware that the input matrix is trashed. // You have to provide its size (Its square, obviously.), and optionally a stride if different from size. template <typename T> inline bool Inverse(T * in, T * out, int size, int stride = -1) { if (stride==-1) stride = size; for (int r=0; r<size; r++) { for (int c=0; c<size; c++) { out[r*stride + c] = (c==r)?1.0:0.0; } } for (int r=0; r<size; r++) { // Find largest pivot and swap in, fail if best we can get is 0... T max = in[r*stride + r]; int index = r; for (int i=r+1; i<size; i++) { if (fabs(in[i*stride + r])>fabs(max)) { max = in[i*stride + r]; index = i; } } if (index!=r) { MemSwap(&in[index*stride], &in[r*stride], size); MemSwap(&out[index*stride], &out[r*stride], size); } if (fabs(max-0.0)<1e-6) return false; // Divide through the entire row... max = 1.0/max; in[r*stride + r] = 1.0; for (int i=r+1; i<size; i++) in[r*stride + i] *= max; for (int i=0; i<size; i++) out[r*stride + i] *= max; // Row subtract to generate 0's in the current column, so it matches an identity matrix... for (int i=0; i<size; i++) { if (i==r) continue; T factor = in[i*stride + r]; in[i*stride + r] = 0.0; for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j]; for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j]; } } return true; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import time class ProgBar: """Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops.""" def __init__(self, width = 60, onCallback = None): self.start = time.time() self.fill = 0 self.width = width self.onCallback = onCallback sys.stdout.write(('_'*self.width)+'\n') sys.stdout.flush() def __del__(self): self.end = time.time() self.__show(self.width) sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n') sys.stdout.flush() def callback(self, nDone, nToDo): """Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo).""" if self.onCallback: self.onCallback() n = int(float(self.width)*float(nDone)/float(nToDo)) n = min((n,self.width)) if n>self.fill: self.__show(n) def __show(self,n): sys.stdout.write('|'*(n-self.fill)) sys.stdout.flush() self.fill = n
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pydoc import inspect class DocGen: """A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code.""" def __init__(self, name, title = None, summary = None): """name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title.""" if title==None: title = name if summary==None: summary = title self.doc = pydoc.HTMLDoc() self.html = open('%s.html'%name,'w') self.html.write('<html>\n') self.html.write('<head>\n') self.html.write('<title>%s</title>\n'%title) self.html.write('</head>\n') self.html.write('<body>\n') self.html_variables = '' self.html_functions = '' self.html_classes = '' self.wiki = open('%s.wiki'%name,'w') self.wiki.write('#summary %s\n\n'%summary) self.wiki.write('= %s= \n\n'%title) self.wiki_variables = '' self.wiki_functions = '' self.wiki_classes = '' def __del__(self): if self.html_variables!='': self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables)) if self.html_functions!='': self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions)) if self.html_classes!='': self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes)) self.html.write('</body>\n') self.html.write('</html>\n') self.html.close() if self.wiki_variables!='': self.wiki.write('= Variables =\n\n') self.wiki.write(self.wiki_variables) self.wiki.write('\n') if self.wiki_functions!='': self.wiki.write('= Functions =\n\n') self.wiki.write(self.wiki_functions) self.wiki.write('\n') if self.wiki_classes!='': self.wiki.write('= Classes =\n\n') self.wiki.write(self.wiki_classes) self.wiki.write('\n') self.wiki.close() def addFile(self, fn, title, fls = True): """Given a filename and section title adds the contents of said file to the output. Various flags influence how this works.""" html = [] wiki = [] for i, line in enumerate(open(fn,'r').readlines()): hl = line.replace('\n', '') if i==0 and fls: hl = '<strong>' + hl + '</strong>' for ext in ['py','txt']: if '.%s - '%ext in hl: s = hl.split('.%s - '%ext, 1) hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1] html.append(hl) wl = line.strip() if i==0 and fls: wl = '*%s*'%wl for ext in ['py','txt']: if '.%s - '%ext in wl: s = wl.split('.%s - '%ext, 1) wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n' wiki.append(wl) self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html))) self.wiki.write('== %s ==\n'%title) self.wiki.write('\n'.join(wiki)) self.wiki.write('----\n\n') def addVariable(self, var, desc): """Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc..""" self.html_variables += '<strong>%s</strong><br/>'%var self.html_variables += '%s<br/><br/>\n'%desc self.wiki_variables += '*`%s`*\n'%var self.wiki_variables += ' %s\n\n'%desc def addFunction(self, func): """Adds a function to the documentation. You provide the actual function instance.""" self.html_functions += self.doc.docroutine(func).replace('&nbsp;',' ') self.html_functions += '\n' name = func.__name__ args, varargs, keywords, defaults = inspect.getargspec(func) doc = inspect.getdoc(func) if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_functions += ' %s\n\n'%doc def addClass(self, cls): """Adds a class to the documentation. You provide the actual class object.""" self.html_classes += self.doc.docclass(cls).replace('&nbsp;',' ') self.html_classes += '\n' name = cls.__name__ parents = filter(lambda a: a!=cls, inspect.getmro(cls)) doc = inspect.getdoc(cls) par_str = '' if len(parents)!=0: par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents)) self.wiki_classes += '== %s(%s) ==\n'%(name, par_str) self.wiki_classes += ' %s\n\n'%doc methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x)) def method_key(pair): if pair[0]=='__init__': return '___' else: return pair[0] methods.sort(key=method_key) for name, method in methods: if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'): if inspect.ismethod(method): args, varargs, keywords, defaults = inspect.getargspec(method) else: args = ['?'] varargs = None keywords = None defaults = None if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords def fetch_doc(cls, name): try: method = getattr(cls, name) if method.__doc__!=None: return inspect.getdoc(method) except: pass for parent in filter(lambda a: a!=cls, inspect.getmro(cls)): ret = fetch_doc(parent, name) if ret!=None: return ret return None doc = fetch_doc(cls, name) self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_classes += ' %s\n\n'%doc variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float)) for name, var in variables: if not name.startswith('__'): if hasattr(var, '__doc__'): d = var.__doc__ else: d = str(var) self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import random import math from scipy.special import gammaln, psi, polygamma from scipy import weave from utils.start_cpp import start_cpp # Provides various gamma-related functions... gamma_code = start_cpp() + """ #ifndef GAMMA_CODE #define GAMMA_CODE #include <cmath> // Returns the natural logarithm of the Gamma function... // (Uses Lanczos's approximation.) double lnGamma(double z) { static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7}; if (z<0.5) { // Use reflection formula, as approximation doesn't work down here... return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z); } else { double x = coeff[0]; for (int i=1;i<9;i++) x += coeff[i]/(z+i-1); double t = z + 6.5; return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x); } } // Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values... double digamma(double z) { static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point. double ret = 0.0; while (z<highVal) { ret -= 1.0/z; z += 1.0; } double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz4 = iz2*iz2; double iz6 = iz4*iz2; ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0; return ret; } // Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically... double trigamma(double z) { static const double highVal = 8.0; double ret = 0.0; while (z<highVal) { ret += 1.0/(z*z); z += 1.0; } z -= 1.0; double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz3 = iz1*iz2; double iz5 = iz3*iz2; double iz7 = iz5*iz2; double iz9 = iz7*iz2; ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0; return ret; } #endif """ def lnGamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function""" code = start_cpp(gamma_code) + """ return_val = lnGamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def digamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function""" code = start_cpp(gamma_code) + """ return_val = digamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def trigamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function""" code = start_cpp(gamma_code) + """ return_val = trigamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) class TestFuncs(unittest.TestCase): """Test code for the assorted gamma-related functions.""" def test_compile(self): code = start_cpp(gamma_code) + """ """ weave.inline(code, support_code=gamma_code) def test_error_lngamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = lnGamma(z) good = gammaln(z) assert(math.fabs(own-good)<1e-12) def test_error_digamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = digamma(z) good = psi(z) assert(math.fabs(own-good)<1e-9) def test_error_trigamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = trigamma(z) good = polygamma(1,z) assert(math.fabs(own-good)<1e-9) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import inspect import hashlib def start_cpp(hash_str = None): """This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>""" frame = inspect.currentframe().f_back info = inspect.getframeinfo(frame) if hash_str==None: return '#line %i "%s"\n'%(info[1],info[0]) else: h = hashlib.md5() h.update(hash_str) hash_val = h.hexdigest() return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
Python
# -*- coding: utf-8 -*- # Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.). import cv import numpy as np def cv2array(im): """Converts a cv array to a numpy array.""" depth2dtype = { cv.IPL_DEPTH_8U: 'uint8', cv.IPL_DEPTH_8S: 'int8', cv.IPL_DEPTH_16U: 'uint16', cv.IPL_DEPTH_16S: 'int16', cv.IPL_DEPTH_32S: 'int32', cv.IPL_DEPTH_32F: 'float32', cv.IPL_DEPTH_64F: 'float64', } arrdtype=im.depth a = np.fromstring( im.tostring(), dtype=depth2dtype[im.depth], count=im.width*im.height*im.nChannels) a.shape = (im.height,im.width,im.nChannels) return a def array2cv(a): """Converts a numpy array to a cv array, if possible.""" dtype2depth = { 'uint8': cv.IPL_DEPTH_8U, 'int8': cv.IPL_DEPTH_8S, 'uint16': cv.IPL_DEPTH_16U, 'int16': cv.IPL_DEPTH_16S, 'int32': cv.IPL_DEPTH_32S, 'float32': cv.IPL_DEPTH_32F, 'float64': cv.IPL_DEPTH_64F, } try: nChannels = a.shape[2] except: nChannels = 1 cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]), dtype2depth[str(a.dtype)], nChannels) cv.SetData(cv_im, a.tostring(), a.dtype.itemsize*nChannels*a.shape[1]) return cv_im
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import multiprocessing as mp import multiprocessing.synchronize # To make sure we have all the functionality. import types import marshal import unittest def repeat(x): """A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant.""" while True: yield x def run_code(code,args): """Internal use function that does the work in each process.""" code = marshal.loads(code) func = types.FunctionType(code, globals(), '_') return func(*args) def mp_map(func, *iters, **keywords): """A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity.""" if 'pool' in keywords: pool = keywords['pool'] else: pool = mp.Pool() code = marshal.dumps(func.func_code) jobs = [] for args in zip(*iters): jobs.append(pool.apply_async(run_code,(code,args))) for i in xrange(len(jobs)): jobs[i] = jobs[i].get() return jobs class TestMpMap(unittest.TestCase): def test_simple1(self): data = ['a','b','c','d'] def noop(data): return data data_noop = mp_map(noop, data) self.assertEqual(data, data_noop) def test_simple2(self): data = [x for x in xrange(1000)] data_double = mp_map(lambda a: a*2, data) self.assertEqual(map(lambda a: a*2,data), data_double) def test_gen(self): def gen(): for i in xrange(100): yield i data_double = mp_map(lambda a: a*2, gen()) self.assertEqual(map(lambda a: a*2,gen()), data_double) def test_repeat(self): def mult(a,b): return a*b data = [x for x in xrange(50,5000,5)] data_triple = mp_map(mult, data, repeat(3)) self.assertEqual(map(lambda a: a*3,data),data_triple) def test_none(self): data = [] data_sqr = mp_map(lambda x: x*x, data) self.assertEqual([],data_sqr) if __name__ == '__main__': unittest.main()
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import os.path import tempfile import shutil from distutils.core import setup, Extension import distutils.ccompiler import distutils.dep_util try: __default_compiler = distutils.ccompiler.new_compiler() except: __default_compiler = None def make_mod(name, base, source, openCL = False): """Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place.""" if __default_compiler==None: raise Exception('No compiler!') # Work out the various file names - check if we actually need to do anything... if not isinstance(source, list): source = [source] source_path = map(lambda s: os.path.join(base, s), source) library_path = os.path.join(base, __default_compiler.shared_object_filename(name)) if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)): try: print 'b' # Backup the argv variable and create a temporary directory to do all work in... old_argv = sys.argv[:] temp_dir = tempfile.mkdtemp() # Prepare the extension... sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir] comp_path = filter(lambda s: not s.endswith('.h'), source_path) depends = filter(lambda s: s.endswith('.h'), source_path) if openCL: ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends) else: ext = Extension(name, comp_path, depends=depends) # Compile... setup(name=name, version='1.0.0', ext_modules=[ext]) finally: # Cleanup the argv variable and the temporary directory... sys.argv = old_argv shutil.rmtree(temp_dir, True)
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp from numpy_help_cpp import numpy_util_code # Provides various functions to assist with manipulating python objects from c++ code. python_obj_code = numpy_util_code + start_cpp() + """ #ifndef PYTHON_OBJ_CODE #define PYTHON_OBJ_CODE // Extracts a boolean from an object... bool GetObjectBoolean(PyObject * obj, const char * name) { PyObject * b = PyObject_GetAttrString(obj, name); bool ret = b!=Py_False; Py_DECREF(b); return ret; } // Extracts an int from an object... int GetObjectInt(PyObject * obj, const char * name) { PyObject * i = PyObject_GetAttrString(obj, name); int ret = PyInt_AsLong(i); Py_DECREF(i); return ret; } // Extracts a float from an object... float GetObjectFloat(PyObject * obj, const char * name) { PyObject * f = PyObject_GetAttrString(obj, name); float ret = PyFloat_AsDouble(f); Py_DECREF(f); return ret; } // Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored... unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); unsigned char * ret = new unsigned char[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i); Py_DECREF(nao); return ret; } // Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored... float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); float * ret = new float[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i); Py_DECREF(nao); return ret; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Defines helper functions for accessing numpy arrays... numpy_util_code = start_cpp() + """ #ifndef NUMPY_UTIL_CODE #define NUMPY_UTIL_CODE float & Float1D(PyArrayObject * arr, int index = 0) { return *(float*)(arr->data + index*arr->strides[0]); } float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } unsigned char & Byte1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index*arr->strides[0]); } unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } int & Int1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index*arr->strides[0]); } int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } #endif """
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cvarray import mp_map import prog_bar import numpy_help_cpp import python_obj_cpp import matrix_cpp import gamma_cpp import setProcName import start_cpp import make import doc_gen # Setup... doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.') doc.addFile('readme.txt', 'Overview') # Variables... doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.') doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.') doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++') doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++') # Functions... doc.addFunction(make.make_mod) doc.addFunction(cvarray.cv2array) doc.addFunction(cvarray.array2cv) doc.addFunction(mp_map.repeat) doc.addFunction(mp_map.mp_map) doc.addFunction(setProcName.setProcName) doc.addFunction(start_cpp.start_cpp) doc.addFunction(make.make_mod) # Classes... doc.addClass(prog_bar.ProgBar) doc.addClass(doc_gen.DocGen)
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from ctypes import * def setProcName(name): """Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases.""" # Call the process control function... libc = cdll.LoadLibrary('libc.so.6') libc.prctl(15, c_char_p(name), 0, 0, 0) # Update argv... charPP = POINTER(POINTER(c_char)) argv = charPP.in_dll(libc,'_dl_argv') size = libc.strlen(argv[0]) libc.strncpy(argv[0],c_char_p(name),size) if __name__=='__main__': # Quick test that it works... import os ps1 = 'ps' ps2 = 'ps -f' os.system(ps1) os.system(ps2) setProcName('wibble_wobble') os.system(ps1) os.system(ps2)
Python
#! /usr/bin/env python # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import dp_utils from utils import doc_gen # Setup... doc = doc_gen.DocGen('dp_utils', 'Dirichlet Process Utilities', 'Utility library for handling Dirichlet processes') doc.addFile('readme.txt', 'Overview') # Variables... doc.addVariable('sampling_code', 'Code for sampling from various distributions - uniform, Gaussian, gamma and beta.') doc.addVariable('conc_code', 'Contains code to sample a concentration parameter and two classes - one to represent the status of a concentration parameter - its prior and its estimated value, and another to do the same thing for when a concentration parameter is shared between multiple Dirichlet processes.') doc.addVariable('dir_est_code', 'Contains a class for doing maximum likelihood estimation of a Dirichlet distrbution given multinomials that have been drawn from it.') doc.addVariable('linked_list_code', 'A linked list implimentation - doubly linked, adds data via templated inheritance.') doc.addVariable('linked_list_gc_code', 'A linked list with reference counting and garabge collection for its entries. Happens to be very good at representing a Dirichlet process.') doc.addVariable('dp_utils_code', 'Combines all of the code provided in this module into a single variable.')
Python
# Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from utils.start_cpp import start_cpp from utils.numpy_help_cpp import numpy_util_code from dp_utils.sampling_cpp import sampling_code smp_code = numpy_util_code + sampling_code + start_cpp() + """ #ifndef SMP_CODE #define SMP_CODE class SMP { public: // Basic constructor - after construction before anything else the Init method must be called... SMP() :fia(0),priorMN(0),sam(0),samPos(0),samTemp(0),power(0),temp(0) {} // Constructor that calls the init method... SMP(int flagSize, int fliSize) :fia(0),priorMN(0),sam(0),samPos(0),samTemp(0),power(0),temp(0) { Init(flagSize, fliSize); } // Cleans up... ~SMP() { delete[] temp; delete[] power; delete[] samTemp; delete[] samPos; delete[] sam; delete[] priorMN; delete[] fia; } // Initialises the Sparse Multinomial Posterior object with the length of each flag sequence as flagSize and the number of such flag sequences in the system with fliSize. Note that the flag list must be provided by a flag index array that has had its addSingles method correctly called... void Init(int flagSize, int fliSize) { // Clean up... delete[] temp; delete[] power; delete[] samTemp; delete[] samPos; delete[] sam; delete[] priorMN; delete[] fia; // Store sizes... flagLen = flagSize; fliLen = fliSize; // Initialise the flag index array - its filled in later... fia = new unsigned char[flagLen*fliLen]; // Setup the prior - by default a uniform... priorMN = new float[flagLen]; for (int f=0;f<flagLen;f++) priorMN[f] = 1.0/flagLen; priorConc = flagLen; // Zero out the sampling set - user has to add some samples before use... samLen = 0; sam = 0; samPos = 0; samTemp = 0; // The power counting array - stores the exponent term for each flag list... power = new int[fliLen]; for (int s=0;s<fliLen;s++) power[s] = 0; // The temporary vector, which gets so many uses... temp = new float[flagLen]; } // Fills in the flag index array - must be called in practise immediatly after the constructor. Input is the output of calling getFlagMatrix on a FlagIndexArray. void SetFIA(PyArrayObject * arr) { for (int s=0;s<fliLen;s++) { for (int f=0;f<flagLen;f++) { fia[s*flagLen + f] = Byte2D(arr,s,f); } } } // For if you have the fia as an array of unsigned char's instead... void SetFIA(unsigned char * arr) { for (int s=0;s<fliLen;s++) { for (int f=0;f<flagLen;f++) { fia[s*flagLen + f] = arr[s*flagLen + f]; } } } // Sets the number of samples to use for the estimation - basically draws a large number of positions from a uniform Dirichlet distribution and then pre-calculates the values required such that the calculation of the mean given samples is trivial. Must be called before sampling occurs. void SetSampleCount(int count = 1024) { // Handle memory... samLen = count; delete[] sam; sam = new float[samLen*fliLen]; delete[] samPos; samPos = new float[samLen*flagLen]; delete[] samTemp; samTemp = new float[samLen]; // Generate the samples... for (int a=0;a<samLen;a++) { // Draw a distribution from the uniform Dirichlet - we are going to integrate by the classic summing of lots of uniform samples approach... float sum = 0.0; for (int f=0;f<flagLen;f++) { temp[f] = -log(1.0 - sample_uniform()); // Identical to sample_gamma(1), but without the code to deal with values other than 1! sum += temp[f]; } for (int f=0;f<flagLen;f++) temp[f] /= sum; // Calculate and store the log of each of the sums implied by the flag array - this makes the integration sampling nice and efficient... for (int s=0;s<fliLen;s++) { float * out = &sam[a*fliLen + s]; *out = 0.0; for (int f=0;f<flagLen;f++) { if (fia[s*flagLen + f]!=0) *out += temp[f]; } *out = log(*out); } // Also fill in the samPos array... for (int f=0;f<flagLen;f++) { samPos[a*flagLen + f] = temp[f]; } } } // Sets the Dirichlet prior, using a vector that sums to unity and a concentration... void SetPrior(float * mn, float conc) { for (int f=0;f<flagLen;f++) priorMN[f] = mn[f]; priorConc = conc; } // Sets the Dirichlet prior, using a vector that sums to the concentration... void SetPrior(float * dir) { priorConc = 0.0; for (int f=0;f<flagLen;f++) { priorMN[f] = dir[f]; priorConc += priorMN[f]; } for (int f=0;f<flagLen;f++) priorMN[f] /= priorConc; } // This version takes python objects - a numpy array of floats and a python float for the concentration... void SetPrior(PyArrayObject * mn, PyObject * conc) { for (int f=0;f<flagLen;f++) priorMN[f] = Float1D(mn,f); priorConc = PyFloat_AsDouble(conc); } // Resets the counts, ready to add a bunch of new samples for a new estimate... void Reset() { for (int s=0;s<fliLen;s++) power[s] = 0; } // Given a flag list index indicating which counts are valid and a set of counts indicating the sample counts drawn from the unknown multinomial. Updates the model accordingly... void Add(int fli, const int * counts) { int total = 0; for (int f=0;f<flagLen;f++) { if (fia[fli*flagLen + f]!=0) { power[f] += counts[f]; total += counts[f]; } } power[fli] -= total + 1; } // An alternate add method - adds the return value of Power(), allowing the combining of samples stored in seperate SMP objects... void Add(const int * pow) { for (int s=0;s<fliLen;s++) power[s] += pow[s]; } // For incase you have a power vector as a numpy array... void Add(PyArrayObject * pow) { for (int s=0;s<fliLen;s++) power[s] += Int1D(pow,s); } // These return the dimensions of the entities... int FlagSize() {return flagLen;} int FlagIndexSize() {return fliLen;} // These return info about the prior... float * GetPriorMN() {return priorMN;} float GetPriorConc() {return priorConc;} // Returns the power vector - can be used to combine SMP objects, assuming the same fia... const int * Power() {return power;} // Calculates the mean of the multinomial drawn from the prior - has lots of sexy optimisations to make it fast. out must be of flagSize() and will have the estimate of the mean written into it... void Mean(float * out) { // First calculate the log probability of each sample, including the prior, storing into samTemp... float maxVal = -1e100; for (int a=0;a<samLen;a++) { samTemp[a] = 0.0; // Prior... for (int f=0;f<flagLen;f++) { samTemp[a] += priorConc * priorMN[f] * sam[a*fliLen + f]; } // Informaiton provided by samples... for (int s=0;s<fliLen;s++) { samTemp[a] += power[s] * sam[a*fliLen + s]; } // Keep the maximum, for the next bit... if (samTemp[a]>maxVal) maxVal = samTemp[a]; } // Convert samTemp into an array of weights that sum to one - done in a numerically stable way, as the logs will represent extremelly small probabilities... float sum = 0.0; for (int a=0;a<samLen;a++) { samTemp[a] = exp(samTemp[a] - maxVal); sum += samTemp[a]; } for (int a=0;a<samLen;a++) samTemp[a] /= sum; // Calculate the mean by suming the weights multiplied by the sample draws into the output... for (int f=0;f<flagLen;f++) out[f] = 0.0; for (int a=0;a<samLen;a++) { for (int f=0;f<flagLen;f++) { out[f] += samTemp[a] * samPos[a*flagLen + f]; } } } private: int flagLen; // Length of each flag list. int fliLen; // Number of flag lists in system. unsigned char * fia; // Array indexed [fli * flagLen + flagIndex] of {0,1} indicating inclusion in each flag list. float * priorMN; // Multinomial of prior. float priorConc; // Concentration of prior. int samLen; // Number of samples used when sampling the mean. float * sam; // Array indexed by [sam * fliLen + fli], giving the log of each sum of terms of a draw from Dirichlet(1,...,1). float * samPos; // Array indexed by [sam * flagLen + flag], giving the not-log of the above for the single flag entries. float * samTemp; // Temporary of length samLen. int * power; // Count array, indexed by fli, of power terms for current distribution. float * temp; // Temporary array of length flagLen. }; #endif """
Python
# Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy class FlagIndexArray: """Provides a register for flag lists - given a list of true/false flags gives a unique number for each combination. Requesting the numebr associated with a combination that has already been entered will always return the same number. All flag lists should be the same length and you can obtain a numpy matrix of {0,1} valued unsigned chars where each row corresponds to the flag list with that index. Also has a function to add the flags for each case of only one flag being on, which if called before anything else puts them so the index of the flag and the index of the flag list correspond - a trick required by the rest of the system.""" def __init__(self, length, addSingles = False): """Requires the length of the flag lists. Alternativly it can clone another FlagIndexArray. Will call the addSingles method for you if the flag is set.""" if isinstance(length, FlagIndexArray): self.length = length.length self.flags = dict(length.flags) else: self.length = length self.flags = dict() # Dictionary from flag lists to integers. Flag lists are represented with tuples of {0,1}. if addSingles: self.addSingles() def getLength(self): """Return the length that all flag lists should be.""" return self.length def addSingles(self): """Adds the entries where only a single flag is set, with the index of the flag list set to match the index of the flag that is set. Must be called first, before flagIndex is ever called.""" for i in xrange(self.length): t = tuple([0]*i + [1] + [0]*(self.length-(i+1))) self.flags[t] = i def flagIndex(self, flags, create = True): """Given a flag list returns its index - if it has been previously supplied then it will be the same index, otherwise a new one. Can be passed any entity that can be indexed via [] to get the integers {0,1}. Returns a natural. If the create flag is set to False in the event of a previously unseen flag list it will raise an exception instead of assigning it a new natural.""" f = [0]*self.length for i in xrange(self.length): if flags[i]!=0: f[i] = 1 f = tuple(f) if f in self.flags: return self.flags[f] if create==False: raise Exception('Unrecognised flag list') index = len(self.flags) self.flags[f] = index return index def addFlagIndexArray(self, fia, remap = None): """Given a flag index array this merges its flags into the new flags, returning a dictionary indexed by fia's indices that converts them to the new indices in self. remap is optionally a dictionary converting flag indices in fia to flag indexes in self - remap[fia index] = self index.""" def adjust(fi): fo = [0]*self.length for i in xrange(fia.length): fo[remap[i]] = fi[i] return tuple(fo) ret = dict() for f, index in fia.flags.iteritems(): if remap: f = adjust(f) ret[index] = self.flagIndex(f) return ret def flagCount(self): """Returns the number of flag lists that are in the system.""" return len(self.flags) def getFlagMatrix(self): """Returns a 2D numpy array of type numpy.uint8 containing {0,1}, indexed by [flag index,flag entry] - basically all the flags stacked into a single matrix and indexed by the entries returned by flagIndex. Often refered to as a 'flag index array' (fia).""" ret = numpy.zeros((len(self.flags),self.length), dtype=numpy.uint8) for flags,row in self.flags.iteritems(): for col in xrange(self.length): if flags[col]!=0: ret[row,col] = 1 return ret
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Some basic matrix operations that come in use... matrix_code = start_cpp() + """ #ifndef MATRIX_CODE #define MATRIX_CODE template <typename T> inline void MemSwap(T * lhs, T * rhs, int count = 1) { while(count!=0) { T t = *lhs; *lhs = *rhs; *rhs = t; ++lhs; ++rhs; --count; } } // Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default. template <typename T> inline T Determinant(T * pos, int size, int stride = -1) { if (stride==-1) stride = size; if (size==1) return pos[0]; else { if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride]; else { T ret = 0.0; for (int i=0; i<size; i++) { if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1); T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1]; if ((i+size)%2) ret += sub; else ret -= sub; } for (int i=1; i<size; i++) { MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1); } return ret; } } } // Inverts a square matrix, will fail on singular and very occasionally on // non-singular matrices, returns true on success. Uses Gauss-Jordan elimination // with partial pivoting. // in is the input matrix, out the output matrix, just be aware that the input matrix is trashed. // You have to provide its size (Its square, obviously.), and optionally a stride if different from size. template <typename T> inline bool Inverse(T * in, T * out, int size, int stride = -1) { if (stride==-1) stride = size; for (int r=0; r<size; r++) { for (int c=0; c<size; c++) { out[r*stride + c] = (c==r)?1.0:0.0; } } for (int r=0; r<size; r++) { // Find largest pivot and swap in, fail if best we can get is 0... T max = in[r*stride + r]; int index = r; for (int i=r+1; i<size; i++) { if (fabs(in[i*stride + r])>fabs(max)) { max = in[i*stride + r]; index = i; } } if (index!=r) { MemSwap(&in[index*stride], &in[r*stride], size); MemSwap(&out[index*stride], &out[r*stride], size); } if (fabs(max-0.0)<1e-6) return false; // Divide through the entire row... max = 1.0/max; in[r*stride + r] = 1.0; for (int i=r+1; i<size; i++) in[r*stride + i] *= max; for (int i=0; i<size; i++) out[r*stride + i] *= max; // Row subtract to generate 0's in the current column, so it matches an identity matrix... for (int i=0; i<size; i++) { if (i==r) continue; T factor = in[i*stride + r]; in[i*stride + r] = 0.0; for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j]; for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j]; } } return true; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import time class ProgBar: """Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops.""" def __init__(self, width = 60, onCallback = None): self.start = time.time() self.fill = 0 self.width = width self.onCallback = onCallback sys.stdout.write(('_'*self.width)+'\n') sys.stdout.flush() def __del__(self): self.end = time.time() self.__show(self.width) sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n') sys.stdout.flush() def callback(self, nDone, nToDo): """Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo).""" if self.onCallback: self.onCallback() n = int(float(self.width)*float(nDone)/float(nToDo)) n = min((n,self.width)) if n>self.fill: self.__show(n) def __show(self,n): sys.stdout.write('|'*(n-self.fill)) sys.stdout.flush() self.fill = n
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pydoc import inspect class DocGen: """A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code.""" def __init__(self, name, title = None, summary = None): """name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title.""" if title==None: title = name if summary==None: summary = title self.doc = pydoc.HTMLDoc() self.html = open('%s.html'%name,'w') self.html.write('<html>\n') self.html.write('<head>\n') self.html.write('<title>%s</title>\n'%title) self.html.write('</head>\n') self.html.write('<body>\n') self.html_variables = '' self.html_functions = '' self.html_classes = '' self.wiki = open('%s.wiki'%name,'w') self.wiki.write('#summary %s\n\n'%summary) self.wiki.write('= %s= \n\n'%title) self.wiki_variables = '' self.wiki_functions = '' self.wiki_classes = '' def __del__(self): if self.html_variables!='': self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables)) if self.html_functions!='': self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions)) if self.html_classes!='': self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes)) self.html.write('</body>\n') self.html.write('</html>\n') self.html.close() if self.wiki_variables!='': self.wiki.write('= Variables =\n\n') self.wiki.write(self.wiki_variables) self.wiki.write('\n') if self.wiki_functions!='': self.wiki.write('= Functions =\n\n') self.wiki.write(self.wiki_functions) self.wiki.write('\n') if self.wiki_classes!='': self.wiki.write('= Classes =\n\n') self.wiki.write(self.wiki_classes) self.wiki.write('\n') self.wiki.close() def addFile(self, fn, title, fls = True): """Given a filename and section title adds the contents of said file to the output. Various flags influence how this works.""" html = [] wiki = [] for i, line in enumerate(open(fn,'r').readlines()): hl = line.replace('\n', '') if i==0 and fls: hl = '<strong>' + hl + '</strong>' for ext in ['py','txt']: if '.%s - '%ext in hl: s = hl.split('.%s - '%ext, 1) hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1] html.append(hl) wl = line.strip() if i==0 and fls: wl = '*%s*'%wl for ext in ['py','txt']: if '.%s - '%ext in wl: s = wl.split('.%s - '%ext, 1) wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n' wiki.append(wl) self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html))) self.wiki.write('== %s ==\n'%title) self.wiki.write('\n'.join(wiki)) self.wiki.write('----\n\n') def addVariable(self, var, desc): """Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc..""" self.html_variables += '<strong>%s</strong><br/>'%var self.html_variables += '%s<br/><br/>\n'%desc self.wiki_variables += '*`%s`*\n'%var self.wiki_variables += ' %s\n\n'%desc def addFunction(self, func): """Adds a function to the documentation. You provide the actual function instance.""" self.html_functions += self.doc.docroutine(func).replace('&nbsp;',' ') self.html_functions += '\n' name = func.__name__ args, varargs, keywords, defaults = inspect.getargspec(func) doc = inspect.getdoc(func) if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_functions += ' %s\n\n'%doc def addClass(self, cls): """Adds a class to the documentation. You provide the actual class object.""" self.html_classes += self.doc.docclass(cls).replace('&nbsp;',' ') self.html_classes += '\n' name = cls.__name__ parents = filter(lambda a: a!=cls, inspect.getmro(cls)) doc = inspect.getdoc(cls) par_str = '' if len(parents)!=0: par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents)) self.wiki_classes += '== %s(%s) ==\n'%(name, par_str) self.wiki_classes += ' %s\n\n'%doc methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x)) def method_key(pair): if pair[0]=='__init__': return '___' else: return pair[0] methods.sort(key=method_key) for name, method in methods: if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'): if inspect.ismethod(method): args, varargs, keywords, defaults = inspect.getargspec(method) else: args = ['?'] varargs = None keywords = None defaults = None if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords def fetch_doc(cls, name): try: method = getattr(cls, name) if method.__doc__!=None: return inspect.getdoc(method) except: pass for parent in filter(lambda a: a!=cls, inspect.getmro(cls)): ret = fetch_doc(parent, name) if ret!=None: return ret return None doc = fetch_doc(cls, name) self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_classes += ' %s\n\n'%doc variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float)) for name, var in variables: if not name.startswith('__'): if hasattr(var, '__doc__'): d = var.__doc__ else: d = str(var) self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import random import math from scipy.special import gammaln, psi, polygamma from scipy import weave from utils.start_cpp import start_cpp # Provides various gamma-related functions... gamma_code = start_cpp() + """ #ifndef GAMMA_CODE #define GAMMA_CODE #include <cmath> // Returns the natural logarithm of the Gamma function... // (Uses Lanczos's approximation.) double lnGamma(double z) { static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7}; if (z<0.5) { // Use reflection formula, as approximation doesn't work down here... return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z); } else { double x = coeff[0]; for (int i=1;i<9;i++) x += coeff[i]/(z+i-1); double t = z + 6.5; return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x); } } // Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values... double digamma(double z) { static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point. double ret = 0.0; while (z<highVal) { ret -= 1.0/z; z += 1.0; } double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz4 = iz2*iz2; double iz6 = iz4*iz2; ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0; return ret; } // Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically... double trigamma(double z) { static const double highVal = 8.0; double ret = 0.0; while (z<highVal) { ret += 1.0/(z*z); z += 1.0; } z -= 1.0; double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz3 = iz1*iz2; double iz5 = iz3*iz2; double iz7 = iz5*iz2; double iz9 = iz7*iz2; ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0; return ret; } #endif """ def lnGamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function""" code = start_cpp(gamma_code) + """ return_val = lnGamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def digamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function""" code = start_cpp(gamma_code) + """ return_val = digamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def trigamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function""" code = start_cpp(gamma_code) + """ return_val = trigamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) class TestFuncs(unittest.TestCase): """Test code for the assorted gamma-related functions.""" def test_compile(self): code = start_cpp(gamma_code) + """ """ weave.inline(code, support_code=gamma_code) def test_error_lngamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = lnGamma(z) good = gammaln(z) assert(math.fabs(own-good)<1e-12) def test_error_digamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = digamma(z) good = psi(z) assert(math.fabs(own-good)<1e-9) def test_error_trigamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = trigamma(z) good = polygamma(1,z) assert(math.fabs(own-good)<1e-9) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import inspect import hashlib def start_cpp(hash_str = None): """This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>""" frame = inspect.currentframe().f_back info = inspect.getframeinfo(frame) if hash_str==None: return '#line %i "%s"\n'%(info[1],info[0]) else: h = hashlib.md5() h.update(hash_str) hash_val = h.hexdigest() return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
Python
# -*- coding: utf-8 -*- # Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.). import cv import numpy as np def cv2array(im): """Converts a cv array to a numpy array.""" depth2dtype = { cv.IPL_DEPTH_8U: 'uint8', cv.IPL_DEPTH_8S: 'int8', cv.IPL_DEPTH_16U: 'uint16', cv.IPL_DEPTH_16S: 'int16', cv.IPL_DEPTH_32S: 'int32', cv.IPL_DEPTH_32F: 'float32', cv.IPL_DEPTH_64F: 'float64', } arrdtype=im.depth a = np.fromstring( im.tostring(), dtype=depth2dtype[im.depth], count=im.width*im.height*im.nChannels) a.shape = (im.height,im.width,im.nChannels) return a def array2cv(a): """Converts a numpy array to a cv array, if possible.""" dtype2depth = { 'uint8': cv.IPL_DEPTH_8U, 'int8': cv.IPL_DEPTH_8S, 'uint16': cv.IPL_DEPTH_16U, 'int16': cv.IPL_DEPTH_16S, 'int32': cv.IPL_DEPTH_32S, 'float32': cv.IPL_DEPTH_32F, 'float64': cv.IPL_DEPTH_64F, } try: nChannels = a.shape[2] except: nChannels = 1 cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]), dtype2depth[str(a.dtype)], nChannels) cv.SetData(cv_im, a.tostring(), a.dtype.itemsize*nChannels*a.shape[1]) return cv_im
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import multiprocessing as mp import multiprocessing.synchronize # To make sure we have all the functionality. import types import marshal import unittest def repeat(x): """A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant.""" while True: yield x def run_code(code,args): """Internal use function that does the work in each process.""" code = marshal.loads(code) func = types.FunctionType(code, globals(), '_') return func(*args) def mp_map(func, *iters, **keywords): """A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity.""" if 'pool' in keywords: pool = keywords['pool'] else: pool = mp.Pool() code = marshal.dumps(func.func_code) jobs = [] for args in zip(*iters): jobs.append(pool.apply_async(run_code,(code,args))) for i in xrange(len(jobs)): jobs[i] = jobs[i].get() return jobs class TestMpMap(unittest.TestCase): def test_simple1(self): data = ['a','b','c','d'] def noop(data): return data data_noop = mp_map(noop, data) self.assertEqual(data, data_noop) def test_simple2(self): data = [x for x in xrange(1000)] data_double = mp_map(lambda a: a*2, data) self.assertEqual(map(lambda a: a*2,data), data_double) def test_gen(self): def gen(): for i in xrange(100): yield i data_double = mp_map(lambda a: a*2, gen()) self.assertEqual(map(lambda a: a*2,gen()), data_double) def test_repeat(self): def mult(a,b): return a*b data = [x for x in xrange(50,5000,5)] data_triple = mp_map(mult, data, repeat(3)) self.assertEqual(map(lambda a: a*3,data),data_triple) def test_none(self): data = [] data_sqr = mp_map(lambda x: x*x, data) self.assertEqual([],data_sqr) if __name__ == '__main__': unittest.main()
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import os.path import tempfile import shutil from distutils.core import setup, Extension import distutils.ccompiler import distutils.dep_util try: __default_compiler = distutils.ccompiler.new_compiler() except: __default_compiler = None def make_mod(name, base, source, openCL = False): """Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place.""" if __default_compiler==None: raise Exception('No compiler!') # Work out the various file names - check if we actually need to do anything... if not isinstance(source, list): source = [source] source_path = map(lambda s: os.path.join(base, s), source) library_path = os.path.join(base, __default_compiler.shared_object_filename(name)) if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)): try: print 'b' # Backup the argv variable and create a temporary directory to do all work in... old_argv = sys.argv[:] temp_dir = tempfile.mkdtemp() # Prepare the extension... sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir] comp_path = filter(lambda s: not s.endswith('.h'), source_path) depends = filter(lambda s: s.endswith('.h'), source_path) if openCL: ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends) else: ext = Extension(name, comp_path, depends=depends) # Compile... setup(name=name, version='1.0.0', ext_modules=[ext]) finally: # Cleanup the argv variable and the temporary directory... sys.argv = old_argv shutil.rmtree(temp_dir, True)
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp from numpy_help_cpp import numpy_util_code # Provides various functions to assist with manipulating python objects from c++ code. python_obj_code = numpy_util_code + start_cpp() + """ #ifndef PYTHON_OBJ_CODE #define PYTHON_OBJ_CODE // Extracts a boolean from an object... bool GetObjectBoolean(PyObject * obj, const char * name) { PyObject * b = PyObject_GetAttrString(obj, name); bool ret = b!=Py_False; Py_DECREF(b); return ret; } // Extracts an int from an object... int GetObjectInt(PyObject * obj, const char * name) { PyObject * i = PyObject_GetAttrString(obj, name); int ret = PyInt_AsLong(i); Py_DECREF(i); return ret; } // Extracts a float from an object... float GetObjectFloat(PyObject * obj, const char * name) { PyObject * f = PyObject_GetAttrString(obj, name); float ret = PyFloat_AsDouble(f); Py_DECREF(f); return ret; } // Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored... unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); unsigned char * ret = new unsigned char[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i); Py_DECREF(nao); return ret; } // Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored... float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); float * ret = new float[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i); Py_DECREF(nao); return ret; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Defines helper functions for accessing numpy arrays... numpy_util_code = start_cpp() + """ #ifndef NUMPY_UTIL_CODE #define NUMPY_UTIL_CODE float & Float1D(PyArrayObject * arr, int index = 0) { return *(float*)(arr->data + index*arr->strides[0]); } float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } unsigned char & Byte1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index*arr->strides[0]); } unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } int & Int1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index*arr->strides[0]); } int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } #endif """
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cvarray import mp_map import prog_bar import numpy_help_cpp import python_obj_cpp import matrix_cpp import gamma_cpp import setProcName import start_cpp import make import doc_gen # Setup... doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.') doc.addFile('readme.txt', 'Overview') # Variables... doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.') doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.') doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++') doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++') # Functions... doc.addFunction(make.make_mod) doc.addFunction(cvarray.cv2array) doc.addFunction(cvarray.array2cv) doc.addFunction(mp_map.repeat) doc.addFunction(mp_map.mp_map) doc.addFunction(setProcName.setProcName) doc.addFunction(start_cpp.start_cpp) doc.addFunction(make.make_mod) # Classes... doc.addClass(prog_bar.ProgBar) doc.addClass(doc_gen.DocGen)
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from ctypes import * def setProcName(name): """Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases.""" # Call the process control function... libc = cdll.LoadLibrary('libc.so.6') libc.prctl(15, c_char_p(name), 0, 0, 0) # Update argv... charPP = POINTER(POINTER(c_char)) argv = charPP.in_dll(libc,'_dl_argv') size = libc.strlen(argv[0]) libc.strncpy(argv[0],c_char_p(name),size) if __name__=='__main__': # Quick test that it works... import os ps1 = 'ps' ps2 = 'ps -f' os.system(ps1) os.system(ps2) setProcName('wibble_wobble') os.system(ps1) os.system(ps2)
Python
#! /usr/bin/env python # Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import smp from utils import doc_gen # Setup... doc = doc_gen.DocGen('smp', 'Sparse Multinomial Posterior', 'Estimate a multinomial distribution, given sparse draws') doc.addFile('readme.txt', 'Overview') # Variables... doc.addVariable('smp_code', 'String containing the C++ code that does the actual work for the system.') # Classes... doc.addClass(smp.SMP) doc.addClass(smp.FlagIndexArray)
Python
# Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import time import multiprocessing as mp import multiprocessing.synchronize # To make sure we have all the functionality. from solve_shared import Params, State from solve_weave import gibbs_run from model import DocModel def gibbs_run_wrap(state, doneIters): """Wrapper around gibbs_run to make it suitable for multiprocessing.""" def next(amount = 1): doneIters.value += amount gibbs_run(state, next) return state def gibbs_all_mp(state, callback = None): """Identical to gibbs_all, except it does each run in a different process, to fully stress the computer.""" # Need the parameters object so we do the correct amount of work... params = state.getParams() # Create a pool of worker processes... pool = mp.Pool() # Create a value for sub-processes to report back their progress with... manager = mp.Manager() doneIters = manager.Value('i',0) totalIters = params.runs * (max((params.burnIn,params.lag)) + params.samples + (params.samples-1)*params.lag) # Create a callback for when a job completes... def onComplete(s): state.absorbClone(s) # Create all the jobs, wait for their completion, report progress... try: jobs = [] for r in xrange(params.runs): jobs.append(pool.apply_async(gibbs_run_wrap,(State(state),doneIters), callback = onComplete)) finally: # Close the pool and wait for all the jobs to complete... pool.close() while len(jobs)!=0: if jobs[0].ready(): del jobs[0] continue time.sleep(0.01) if callback!=None: callback(doneIters.value,totalIters) pool.join() def gibbs_doc_mp(model, doc, params = None, callback = None): """Runs Gibbs iterations on a single document, by sampling with a prior constructed from each sample in the given Model. params applies to each sample, so should probably be much more limited than usual - the default if its undefined is to use 1 run and 1 sample and a burn in of only 500. Returns a DocModel with all the relevant samples in.""" # Initialisation stuff - handle params, create the state and the DocModel object, plus a reporter... if params==None: params = Params() params.runs = 1 params.samples = 1 params.burnIn = 500 state = State(doc, params) dm = DocModel() # Create a pool of worker processes... pool = mp.Pool() # Create a value for sub-processes to report back their progress with... manager = mp.Manager() doneIters = manager.Value('i',0) totalIters = model.sampleCount() * params.runs * (params.burnIn + params.samples + (params.samples-1)*params.lag) # Create a callback for when a job completes... def onComplete(s): dm.addFrom(s.getModel()) # Create all the jobs, wait for their completion, report progress... try: jobs = [] for sample in model.sampleList(): tempState = State(state) tempState.setGlobalParams(sample) tempState.addPrior(sample) jobs.append(pool.apply_async(gibbs_run_wrap,(tempState,doneIters), callback = onComplete)) finally: # Close the pool and wait for all the jobs to complete... pool.close() while len(jobs)!=0: if jobs[0].ready(): del jobs[0] continue time.sleep(0.01) if callback!=None: callback(doneIters.value,totalIters) pool.join() # Return... return dm
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from utils.start_cpp import start_cpp # Code for sampling from various distributions, including some very specific situations involving Dirichlet processes... sampling_code = start_cpp() + """ #ifndef SAMPLING_CODE #define SAMPLING_CODE #include <stdlib.h> #include <math.h> const double gamma_approx = 32.0; // Threshold between the two methods of doing a gamma draw. // Returns a sample from the natural numbers [0,n)... int sample_nat(int n) { return lrand48()%n; } // Returns a sample from [0.0,1.0)... double sample_uniform() { return drand48(); //return double(random())/(double(RAND_MAX)+1.0); } // Samples from a normal distribution with a mean of 0 and a standard deviation of 1... double sample_standard_normal() { double u = 1.0-sample_uniform(); double v = 1.0-sample_uniform(); return sqrt(-2.0*log(u)) * cos(2.0*M_PI*v); } // Samples from a normal distribution with the given mean and standard deviation... double sample_normal(double mean, double sd) { return mean + sd*sample_standard_normal(); } // Samples from the Gamma distribution, base version that has no scaling parameter... /*double sample_gamma(double alpha) { // Check if the alpha value is high enough to approximate via a normal distribution... if (alpha>gamma_approx) { while (true) { double ret = sample_normal(alpha, sqrt(alpha)); if (ret<0.0) continue; return ret; } } // First do the integer part of gamma(alpha)... double ret = 0.0; // 1.0 while (alpha>=1.0) { alpha -= 1.0; //ret /= 1.0 - sample_uniform(); ret -= log(1.0-sample_uniform()); } //ret = log(ret); // Now do the remaining fractional part and sum it in - uses rejection sampling... if (alpha>1e-4) { while (true) { double u1 = 1.0 - sample_uniform(); double u2 = 1.0 - sample_uniform(); double u3 = 1.0 - sample_uniform(); double frac, point; if (u1<=(M_E/(M_E+alpha))) { frac = pow(u2,1.0/alpha); point = u3*pow(frac,alpha-1.0); } else { frac = 1.0 - log(u2); point = u3*exp(-frac); } if (point<=(pow(frac,alpha-1.0)*exp(-frac))) { ret += frac; break; } } } // Finally return... return ret; }*/ // As above, but faster... double sample_gamma(double alpha) { // Check if the alpha value is high enough to approximate via a normal distribution... if (alpha>gamma_approx) { while (true) { double ret = sample_normal(alpha, sqrt(alpha)); if (ret<0.0) continue; return ret; } } // If alpha is one, within tolerance, just use an exponential distribution... if (fabs(alpha-1.0)<1e-4) { return -log(1.0-sample_uniform()); } if (alpha>1.0) { // If alpha is 1 or greater use the Cheng/Feast method... while (true) { double u1 = sample_uniform(); double u2 = sample_uniform(); double v = ((alpha - 1.0/(6.0*alpha))*u1) / ((alpha-1.0)*u2); double lt2 = 2.0*(u2-1.0)/(alpha-1) + v + 1.0/v; if (lt2<=2.0) { return (alpha-1.0)*v; } double lt1 = 2.0*log(u2)/(alpha-1.0) - log(v) + v; if (lt1<=1.0) { return (alpha-1.0)*v; } } } else { // If alpha is less than 1 use a rejection sampling method... while (true) { double u1 = 1.0 - sample_uniform(); double u2 = 1.0 - sample_uniform(); double u3 = 1.0 - sample_uniform(); double frac, point; if (u1<=(M_E/(M_E+alpha))) { frac = pow(u2,1.0/alpha); point = u3*pow(frac,alpha-1.0); } else { frac = 1.0 - log(u2); point = u3*exp(-frac); } if (point<=(pow(frac,alpha-1.0)*exp(-frac))) { return frac; break; } } } } // Samples from the Gamma distribution, version that has a scaling parameter... double sample_gamma(double alpha, double beta) { return sample_gamma(alpha)/beta; } // Samples from the Beta distribution... double sample_beta(double alpha, double beta) { double g1 = sample_gamma(alpha); double g2 = sample_gamma(beta); return g1 / (g1 + g2); } #endif """
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from utils.python_obj_cpp import python_obj_code from linked_list_cpp import linked_list_gc_code from utils.gamma_cpp import gamma_code from sampling_cpp import sampling_code from conc_cpp import conc_code from dir_est_cpp import dir_est_code # Put all the suplied code together into one easy to use include... dp_utils_code = python_obj_code + linked_list_gc_code + gamma_code + sampling_code + conc_code + dir_est_code
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from utils.start_cpp import start_cpp conc_code = start_cpp() + """ // This funky little function is used to resample the concentration parameter of a Dirichlet process, using the previous parameter - allows this parameter to be Gibbs sampled. Also works for any level of a HDP, due to the limited interactions. // Parameters are: // pcp - previous concentration parameter. // n - number of samples taken from the Dirichlet process // k - number of discretly different samples, i.e. table count in the Chinese restaurant process. // prior_alpha - alpha value of the Gamma prior on the concentration parameter. // prior_beta - beta value of the Gamma prior on the concentration parameter. double sample_dirichlet_proc_conc(double pcp, double n, double k, double prior_alpha = 1.01, double prior_beta = 0.01) { if ((n<(1.0-1e-6))||(k<(2.0-1e-6))) { return pcp; // Doesn't work in this case, so just repeat. } double nn = sample_beta(pcp+1.0, n); double log_nn = log(nn); double f_alpha = prior_alpha + k; double f_beta = prior_beta - log_nn; double pi_n_mod = (f_alpha - 1.0) / (n * f_beta); double r = sample_uniform(); double r_mod = r / (1.0 - r); if (r_mod>=pi_n_mod) f_alpha -= 1.0; double ret = sample_gamma(f_alpha, f_beta); if (ret<1e-3) ret = 1e-3; return ret; } // Class to represent the concentration parameter associated with a DP - consists of the prior and the previous/current value... struct Conc { float alpha; // Parameter for Gamma prior. float beta; // " float conc; // Previously sampled concentration value - needed for next sample, and for output/use. // Resamples the concentration value, assuming only a single DP is using it. n = number of samples from DP, k = number of unique samples, i.e. respectivly RefTotal() and Size() for a ListRef. void ResampleConc(int n, int k) { conc = sample_dirichlet_proc_conc(conc, n, k, alpha, beta); if (conc<1e-3) conc = 1e-3; } }; // This class is the generalisation of the above for when multiple Dirichlet processes share a single concentration parameter - again allows a new concentration parameter to be drawn given the previous one and a Gamma prior, but takes multiple pairs of sample count/discrete sample counts, hence the class interface to allow it to accumilate the relevant information. class SampleConcDP { public: SampleConcDP():f_alpha(1.0),f_beta(1.0),prev_conc(1.0) {} ~SampleConcDP() {} // Sets the prior and resets the entire class.... void SetPrior(double alpha, double beta) { f_alpha = alpha; f_beta = beta; } // Set the previous concetration parameter - must be called before any DP stats are added... void SetPrevConc(double prev) { prev_conc = prev; } // Call once for each DP that is using the concentration parameter... // (n is the number of samples drawn, k the number of discretly different samples.) void AddDP(double n, double k) { if (k>1.0) { double s = 0.0; if (sample_uniform()>(1.0/(1.0+n/prev_conc))) s = 1.0; double w = sample_beta(prev_conc+1.0,n); f_alpha += k - s; f_beta -= log(w); } } // Once all DP have been added call this to draw a new concentration value... double Sample() { double ret = sample_gamma(f_alpha, f_beta); if (ret<1e-3) ret = 1e-3; return ret; } private: double f_alpha; double f_beta; double prev_conc; }; """
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from scipy import weave import unittest from utils.start_cpp import start_cpp # Defines code for a doubly linked list - simple but works as expected... (Includes its data via templated inheritance - a little strange, but neat and saves on memory thrashing.) linked_list_code = start_cpp() + """ // Predefinitions... template <typename ITEM, typename BODY> class Item; template <typename ITEM, typename BODY> class List; // Useful default... struct Empty {}; // Item for the linked list data structure - simply inherits extra data stuff... template <typename ITEM = Empty, typename BODY = Empty> class Item : public ITEM { public: Item(List<ITEM,BODY> * head):head(head),next(this),prev(this) {} ~Item() {} Item<ITEM,BODY> * Next() {return next;} Item<ITEM,BODY> * Prev() {return prev;} List<ITEM,BODY> * GetList() {return head;} bool Valid() {return static_cast< Item<ITEM,BODY>* >(head)!=this;} bool IsDummy() {return static_cast< Item<ITEM,BODY>* >(head)==this;} Item<ITEM,BODY> * PreNew() // Adds a new item before this one. { Item<ITEM,BODY> * ret = new Item<ITEM,BODY>(head); head->size += 1; ret->prev = this->prev; ret->next = this; ret->prev->next = ret; ret->next->prev = ret; return ret; } Item<ITEM,BODY> * PostNew() // Adds a new item after this one. { Item<ITEM,BODY> * ret = new Item<ITEM,BODY>(head); head->size += 1; ret->prev = this; ret->next = this->next; ret->prev->next = ret; ret->next->prev = ret; return ret; } void Suicide() // Removes this node from its list and makes it delete itself. { head->size -= 1; next->prev = prev; prev->next = next; delete this; } protected: List<ITEM,BODY> * head; Item<ITEM,BODY> * next; Item<ITEM,BODY> * prev; }; // Simple totally inline doubly linked list structure, where template <typename ITEM = Empty, typename BODY = Empty> class List : protected Item<ITEM,BODY> { public: List():Item<ITEM,BODY>(this),size(0) {} ~List() { while(this->size!=0) { this->next->Suicide(); } } Item<ITEM,BODY> * Append() {return this->PreNew();} Item<ITEM,BODY> * Prepend() {return this->PostNew();} Item<ITEM,BODY> * First() {return this->next;} Item<ITEM,BODY> * Last() {return this->prev;} int Size() {return this->size;} BODY & Body() {return body;} Item<ITEM,BODY> * Index(int i) { Item<ITEM,BODY> * ret = this->next; while(i>0) { ret = ret->next; i -= 1; } return ret; } protected: friend class Item<ITEM,BODY>; int size; BODY body; }; """ class TestLinkedList(unittest.TestCase): """Test code for the linked list.""" def test_compile(self): code = start_cpp(linked_list) + """ """ weave.inline(code, support_code=linked_list) def test_size(self): code = start_cpp(linked_list) + """ int errors = 0; List<> wibble; if (wibble.Size()!=0) errors += 1; Item<> * it = wibble.Append(); if (wibble.Size()!=1) errors += 1; it->Suicide(); if (wibble.Size()!=0) errors += 1; return_val = errors; """ errors = weave.inline(code, support_code=linked_list) self.assertEqual(errors,0) def test_loop(self): extra = """ struct Number { int num; }; """ code = start_cpp(linked_list_code+extra) + """ int errors = 0; List<Number> wibble; for (int i=0;i<10;i++) { Item<Number> * it = wibble.Append(); it->num = i; } if (wibble.Size()!=10) errors += 1; int i = 0; for (Item<Number> * targ = wibble.First(); targ->Valid(); targ = targ->Next()) { if (i!=targ->num) errors += 1; i += 1; } return_val = errors; """ errors = weave.inline(code, support_code=linked_list_code+extra) self.assertEqual(errors,0) # Code for a linked list with garbage collection - each entry has a reference count, and it also allows access of the reference counts and the total number of reference counts for all entrys. This structure is very useful for modelling a Dirichlet process as a direct consequence, as it has all its properties... linked_list_gc_code = linked_list_code + start_cpp() + """ // Predefinitions... template <typename ITEM, typename BODY> class ItemRef; template <typename ITEM, typename BODY> class ListRef; // Item for the linked list data structure - simply inherits extra data stuff... template <typename ITEM = Empty, typename BODY = Empty> class ItemRef : public ITEM { public: ItemRef(ListRef<ITEM,BODY> * head):head(head),next(this),prev(this),refCount(0) {} ~ItemRef() {} ItemRef<ITEM,BODY> * Next() {return next;} ItemRef<ITEM,BODY> * Prev() {return prev;} ListRef<ITEM,BODY> * GetList() {return head;} bool Valid() {return static_cast< ItemRef<ITEM,BODY>* >(head)!=this;} bool IsDummy() {return static_cast< ItemRef<ITEM,BODY>* >(head)==this;} ItemRef<ITEM,BODY> * PreNew() // Adds a new item before this one. { ItemRef<ITEM,BODY> * ret = new ItemRef<ITEM,BODY>(head); head->size += 1; ret->prev = this->prev; ret->next = this; ret->prev->next = ret; ret->next->prev = ret; return ret; } ItemRef<ITEM,BODY> * PostNew() // Adds a new item after this one. { ItemRef<ITEM,BODY> * ret = new ItemRef<ITEM,BODY>(head); head->size += 1; ret->prev = this; ret->next = this->next; ret->prev->next = ret; ret->next->prev = ret; return ret; } void Suicide() // Removes this node from its list and makes it delete itself. { head->size -= 1; head->refTotal -= refCount; next->prev = prev; prev->next = next; delete this; } void IncRef(int amount = 1) { this->refCount += amount; head->refTotal += amount; } void DecRef(int amount = 1) // If the ref count reaches zero the object will delete itself. { this->refCount -= amount; head->refTotal -= amount; if (refCount<=0) this->Suicide(); } int RefCount() {return refCount;} protected: ListRef<ITEM,BODY> * head; ItemRef<ITEM,BODY> * next; ItemRef<ITEM,BODY> * prev; int refCount; }; // Simple totally inline doubly linked list structure... template <typename ITEM = Empty, typename BODY = Empty> class ListRef : protected ItemRef<ITEM,BODY> { public: ListRef():ItemRef<ITEM,BODY>(this),size(0),refTotal(0) {} ~ListRef() { while(this->size!=0) { this->next->Suicide(); } } ItemRef<ITEM,BODY> * Append() {return this->PreNew();} ItemRef<ITEM,BODY> * Prepend() {return this->PostNew();} ItemRef<ITEM,BODY> * First() {return this->next;} ItemRef<ITEM,BODY> * Last() {return this->prev;} int Size() {return this->size;} int RefTotal() {return this->refTotal;} BODY & Body() {return body;} ItemRef<ITEM,BODY> * Index(int i) { ItemRef<ITEM,BODY> * ret = this->next; while(i>0) { ret = ret->Next(); i -= 1; } return ret; } protected: friend class ItemRef<ITEM,BODY>; int size; int refTotal; BODY body; }; """ class TestLinkedListGC(unittest.TestCase): """Test code for the linked list with garbage collection.""" def test_compile(self): code = start_cpp(linked_list_gc) + """ """ weave.inline(code, support_code=linked_list_gc) def test_size_gc(self): code = start_cpp(linked_list_gc_code) + """ int errors = 0; ListRef<> wibble; if (wibble.Size()!=0) errors += 1; ItemRef<> * it = wibble.Append(); if (wibble.Size()!=1) errors += 1; if (wibble.RefTotal()!=0) errors += 1; it->IncRef(); it->IncRef(); if (it->RefCount()!=2) errors += 1; if (wibble.RefTotal()!=2) errors += 1; it->DecRef(); it->DecRef(); if (wibble.RefTotal()!=0) errors += 1; if (wibble.Size()!=0) errors += 1; return_val = errors; """ errors = weave.inline(code, support_code=linked_list_gc_code) self.assertEqual(errors,0) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from utils.start_cpp import start_cpp # Provides code for estimating the Dirichlet distribution from which a number of multinomial distributions were drawn from, given those multinomials... dir_est_code = start_cpp() + """ // Defined as a class - you then add each multinomial before requesting a maximum likelihood update of the Dirichlet distribution. It uses Newton-Raphson iterations, and so needs a starting point - you provide a vector to be updated, which can of course save time if it is already close... class EstimateDir { public: EstimateDir(int vecSize):size(vecSize), samples(0), meanLog(new double[vecSize]), grad(new double[vecSize]), qq(new double[vecSize]) { for (int i=0;i<vecSize;i++) meanLog[i] = 0.0; } ~EstimateDir() {delete[] meanLog; delete[] grad; delete[] qq;} void Add(float * mn) { samples += 1; for (int i=0;i<size;i++) { meanLog[i] += (log(mn[i]) - meanLog[i]) / double(samples); } } void Add(double * mn) { samples += 1; for (int i=0;i<size;i++) { meanLog[i] += (log(mn[i]) - meanLog[i]) / double(samples); } } void Update(float * dir, int maxIter = 64, float epsilon = 1e-3, float cap = 1e6) { for (int iter=0;iter<maxIter;iter++) { // We will need the sum of the dir vector... double dirSum = 0.0; for (int i=0;i<size;i++) { dirSum += dir[i]; } // Check for Nan/inf - if so reset to basic value... if ((dirSum==dirSum) || (dirSum>1e100)) { for (int i=0;i<size;i++) dir[i] = 1.0; dirSum = size; } // Safety - don't let it get too precise, that probably means its being crazy (Can happen with too few samples.)... if (dirSum>cap) { float mult = cap / dirSum; for (int i=0;i<size;i++) { dir[i] *= mult; } dirSum = cap; } // Calculate the gradiant and the Hessian 'matrix', except its actually diagonal... double digDirSum = digamma(dirSum); for (int i=0;i<size;i++) { grad[i] = samples * (digDirSum - digamma(dir[i]) + meanLog[i]); qq[i] = -samples * trigamma(dir[i]); } // Calculate b... double b = 0.0; double bDiv = 1.0 / (samples*trigamma(dirSum)); for (int i=0;i<size;i++) { b += grad[i]/qq[i]; bDiv += 1.0/qq[i]; } b /= bDiv; // Do the update, sum the change... double change = 0.0; for (int i=0;i<size;i++) { double delta = (grad[i] - b) / qq[i]; dir[i] -= delta; if (dir[i]<1e-3) dir[i] = 1e-3; change += fabs(delta); } // Break if no change... if (change<epsilon) break; } } void Update(double * dir, int maxIter = 64, double epsilon = 1e-6, double cap = 1e6) { for (int iter=0;iter<maxIter;iter++) { // We will need the sum of the dir vector... double dirSum = 0.0; for (int i=0;i<size;i++) { dirSum += dir[i]; } // Check for Nan/inf - if so reset to basic value... if ((dirSum==dirSum) || (dirSum>1e100)) { for (int i=0;i<size;i++) dir[i] = 1.0; dirSum = size; } // Safety - don't let it get too precise, that probably means its being crazy (Can happen with too few samples.)... if (dirSum>cap) { float mult = cap / dirSum; for (int i=0;i<size;i++) { dir[i] *= mult; } dirSum = cap; } // Calculate the gradiant and the Hessian 'matrix', except its actually diagonal... double digDirSum = digamma(dirSum); for (int i=0;i<size;i++) { grad[i] = samples * (digDirSum - digamma(dir[i]) + meanLog[i]); qq[i] = -samples * trigamma(dir[i]); } // Calculate b... double b = 0.0; double bDiv = 1.0 / (samples*trigamma(dirSum)); for (int i=0;i<size;i++) { b += grad[i]/qq[i]; bDiv += 1.0/qq[i]; } b /= bDiv; // Do the update, sum the change... double change = 0.0; for (int i=0;i<size;i++) { double delta = (grad[i] - b) / qq[i]; dir[i] -= delta; change += fabs(delta); } // Break if no change... if (change<epsilon) break; } } private: int size; int samples; double * meanLog; // Vector of length size, contains the component-wise mean of the log of each of the samples - consititutes the sufficient statistics required to do the update. double * grad; // Temporary during update. double * qq; // Temporary during update. }; """
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Some basic matrix operations that come in use... matrix_code = start_cpp() + """ #ifndef MATRIX_CODE #define MATRIX_CODE template <typename T> inline void MemSwap(T * lhs, T * rhs, int count = 1) { while(count!=0) { T t = *lhs; *lhs = *rhs; *rhs = t; ++lhs; ++rhs; --count; } } // Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default. template <typename T> inline T Determinant(T * pos, int size, int stride = -1) { if (stride==-1) stride = size; if (size==1) return pos[0]; else { if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride]; else { T ret = 0.0; for (int i=0; i<size; i++) { if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1); T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1]; if ((i+size)%2) ret += sub; else ret -= sub; } for (int i=1; i<size; i++) { MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1); } return ret; } } } // Inverts a square matrix, will fail on singular and very occasionally on // non-singular matrices, returns true on success. Uses Gauss-Jordan elimination // with partial pivoting. // in is the input matrix, out the output matrix, just be aware that the input matrix is trashed. // You have to provide its size (Its square, obviously.), and optionally a stride if different from size. template <typename T> inline bool Inverse(T * in, T * out, int size, int stride = -1) { if (stride==-1) stride = size; for (int r=0; r<size; r++) { for (int c=0; c<size; c++) { out[r*stride + c] = (c==r)?1.0:0.0; } } for (int r=0; r<size; r++) { // Find largest pivot and swap in, fail if best we can get is 0... T max = in[r*stride + r]; int index = r; for (int i=r+1; i<size; i++) { if (fabs(in[i*stride + r])>fabs(max)) { max = in[i*stride + r]; index = i; } } if (index!=r) { MemSwap(&in[index*stride], &in[r*stride], size); MemSwap(&out[index*stride], &out[r*stride], size); } if (fabs(max-0.0)<1e-6) return false; // Divide through the entire row... max = 1.0/max; in[r*stride + r] = 1.0; for (int i=r+1; i<size; i++) in[r*stride + i] *= max; for (int i=0; i<size; i++) out[r*stride + i] *= max; // Row subtract to generate 0's in the current column, so it matches an identity matrix... for (int i=0; i<size; i++) { if (i==r) continue; T factor = in[i*stride + r]; in[i*stride + r] = 0.0; for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j]; for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j]; } } return true; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import time class ProgBar: """Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops.""" def __init__(self, width = 60, onCallback = None): self.start = time.time() self.fill = 0 self.width = width self.onCallback = onCallback sys.stdout.write(('_'*self.width)+'\n') sys.stdout.flush() def __del__(self): self.end = time.time() self.__show(self.width) sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n') sys.stdout.flush() def callback(self, nDone, nToDo): """Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo).""" if self.onCallback: self.onCallback() n = int(float(self.width)*float(nDone)/float(nToDo)) n = min((n,self.width)) if n>self.fill: self.__show(n) def __show(self,n): sys.stdout.write('|'*(n-self.fill)) sys.stdout.flush() self.fill = n
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pydoc import inspect class DocGen: """A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code.""" def __init__(self, name, title = None, summary = None): """name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title.""" if title==None: title = name if summary==None: summary = title self.doc = pydoc.HTMLDoc() self.html = open('%s.html'%name,'w') self.html.write('<html>\n') self.html.write('<head>\n') self.html.write('<title>%s</title>\n'%title) self.html.write('</head>\n') self.html.write('<body>\n') self.html_variables = '' self.html_functions = '' self.html_classes = '' self.wiki = open('%s.wiki'%name,'w') self.wiki.write('#summary %s\n\n'%summary) self.wiki.write('= %s= \n\n'%title) self.wiki_variables = '' self.wiki_functions = '' self.wiki_classes = '' def __del__(self): if self.html_variables!='': self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables)) if self.html_functions!='': self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions)) if self.html_classes!='': self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes)) self.html.write('</body>\n') self.html.write('</html>\n') self.html.close() if self.wiki_variables!='': self.wiki.write('= Variables =\n\n') self.wiki.write(self.wiki_variables) self.wiki.write('\n') if self.wiki_functions!='': self.wiki.write('= Functions =\n\n') self.wiki.write(self.wiki_functions) self.wiki.write('\n') if self.wiki_classes!='': self.wiki.write('= Classes =\n\n') self.wiki.write(self.wiki_classes) self.wiki.write('\n') self.wiki.close() def addFile(self, fn, title, fls = True): """Given a filename and section title adds the contents of said file to the output. Various flags influence how this works.""" html = [] wiki = [] for i, line in enumerate(open(fn,'r').readlines()): hl = line.replace('\n', '') if i==0 and fls: hl = '<strong>' + hl + '</strong>' for ext in ['py','txt']: if '.%s - '%ext in hl: s = hl.split('.%s - '%ext, 1) hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1] html.append(hl) wl = line.strip() if i==0 and fls: wl = '*%s*'%wl for ext in ['py','txt']: if '.%s - '%ext in wl: s = wl.split('.%s - '%ext, 1) wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n' wiki.append(wl) self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html))) self.wiki.write('== %s ==\n'%title) self.wiki.write('\n'.join(wiki)) self.wiki.write('----\n\n') def addVariable(self, var, desc): """Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc..""" self.html_variables += '<strong>%s</strong><br/>'%var self.html_variables += '%s<br/><br/>\n'%desc self.wiki_variables += '*`%s`*\n'%var self.wiki_variables += ' %s\n\n'%desc def addFunction(self, func): """Adds a function to the documentation. You provide the actual function instance.""" self.html_functions += self.doc.docroutine(func).replace('&nbsp;',' ') self.html_functions += '\n' name = func.__name__ args, varargs, keywords, defaults = inspect.getargspec(func) doc = inspect.getdoc(func) if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_functions += ' %s\n\n'%doc def addClass(self, cls): """Adds a class to the documentation. You provide the actual class object.""" self.html_classes += self.doc.docclass(cls).replace('&nbsp;',' ') self.html_classes += '\n' name = cls.__name__ parents = filter(lambda a: a!=cls, inspect.getmro(cls)) doc = inspect.getdoc(cls) par_str = '' if len(parents)!=0: par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents)) self.wiki_classes += '== %s(%s) ==\n'%(name, par_str) self.wiki_classes += ' %s\n\n'%doc methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x)) def method_key(pair): if pair[0]=='__init__': return '___' else: return pair[0] methods.sort(key=method_key) for name, method in methods: if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'): if inspect.ismethod(method): args, varargs, keywords, defaults = inspect.getargspec(method) else: args = ['?'] varargs = None keywords = None defaults = None if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords def fetch_doc(cls, name): try: method = getattr(cls, name) if method.__doc__!=None: return inspect.getdoc(method) except: pass for parent in filter(lambda a: a!=cls, inspect.getmro(cls)): ret = fetch_doc(parent, name) if ret!=None: return ret return None doc = fetch_doc(cls, name) self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_classes += ' %s\n\n'%doc variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float)) for name, var in variables: if not name.startswith('__'): if hasattr(var, '__doc__'): d = var.__doc__ else: d = str(var) self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import random import math from scipy.special import gammaln, psi, polygamma from scipy import weave from utils.start_cpp import start_cpp # Provides various gamma-related functions... gamma_code = start_cpp() + """ #ifndef GAMMA_CODE #define GAMMA_CODE #include <cmath> // Returns the natural logarithm of the Gamma function... // (Uses Lanczos's approximation.) double lnGamma(double z) { static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7}; if (z<0.5) { // Use reflection formula, as approximation doesn't work down here... return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z); } else { double x = coeff[0]; for (int i=1;i<9;i++) x += coeff[i]/(z+i-1); double t = z + 6.5; return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x); } } // Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values... double digamma(double z) { static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point. double ret = 0.0; while (z<highVal) { ret -= 1.0/z; z += 1.0; } double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz4 = iz2*iz2; double iz6 = iz4*iz2; ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0; return ret; } // Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically... double trigamma(double z) { static const double highVal = 8.0; double ret = 0.0; while (z<highVal) { ret += 1.0/(z*z); z += 1.0; } z -= 1.0; double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz3 = iz1*iz2; double iz5 = iz3*iz2; double iz7 = iz5*iz2; double iz9 = iz7*iz2; ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0; return ret; } #endif """ def lnGamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function""" code = start_cpp(gamma_code) + """ return_val = lnGamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def digamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function""" code = start_cpp(gamma_code) + """ return_val = digamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def trigamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function""" code = start_cpp(gamma_code) + """ return_val = trigamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) class TestFuncs(unittest.TestCase): """Test code for the assorted gamma-related functions.""" def test_compile(self): code = start_cpp(gamma_code) + """ """ weave.inline(code, support_code=gamma_code) def test_error_lngamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = lnGamma(z) good = gammaln(z) assert(math.fabs(own-good)<1e-12) def test_error_digamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = digamma(z) good = psi(z) assert(math.fabs(own-good)<1e-9) def test_error_trigamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = trigamma(z) good = polygamma(1,z) assert(math.fabs(own-good)<1e-9) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import inspect import hashlib def start_cpp(hash_str = None): """This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>""" frame = inspect.currentframe().f_back info = inspect.getframeinfo(frame) if hash_str==None: return '#line %i "%s"\n'%(info[1],info[0]) else: h = hashlib.md5() h.update(hash_str) hash_val = h.hexdigest() return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
Python
# -*- coding: utf-8 -*- # Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.). import cv import numpy as np def cv2array(im): """Converts a cv array to a numpy array.""" depth2dtype = { cv.IPL_DEPTH_8U: 'uint8', cv.IPL_DEPTH_8S: 'int8', cv.IPL_DEPTH_16U: 'uint16', cv.IPL_DEPTH_16S: 'int16', cv.IPL_DEPTH_32S: 'int32', cv.IPL_DEPTH_32F: 'float32', cv.IPL_DEPTH_64F: 'float64', } arrdtype=im.depth a = np.fromstring( im.tostring(), dtype=depth2dtype[im.depth], count=im.width*im.height*im.nChannels) a.shape = (im.height,im.width,im.nChannels) return a def array2cv(a): """Converts a numpy array to a cv array, if possible.""" dtype2depth = { 'uint8': cv.IPL_DEPTH_8U, 'int8': cv.IPL_DEPTH_8S, 'uint16': cv.IPL_DEPTH_16U, 'int16': cv.IPL_DEPTH_16S, 'int32': cv.IPL_DEPTH_32S, 'float32': cv.IPL_DEPTH_32F, 'float64': cv.IPL_DEPTH_64F, } try: nChannels = a.shape[2] except: nChannels = 1 cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]), dtype2depth[str(a.dtype)], nChannels) cv.SetData(cv_im, a.tostring(), a.dtype.itemsize*nChannels*a.shape[1]) return cv_im
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import multiprocessing as mp import multiprocessing.synchronize # To make sure we have all the functionality. import types import marshal import unittest def repeat(x): """A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant.""" while True: yield x def run_code(code,args): """Internal use function that does the work in each process.""" code = marshal.loads(code) func = types.FunctionType(code, globals(), '_') return func(*args) def mp_map(func, *iters, **keywords): """A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity.""" if 'pool' in keywords: pool = keywords['pool'] else: pool = mp.Pool() code = marshal.dumps(func.func_code) jobs = [] for args in zip(*iters): jobs.append(pool.apply_async(run_code,(code,args))) for i in xrange(len(jobs)): jobs[i] = jobs[i].get() return jobs class TestMpMap(unittest.TestCase): def test_simple1(self): data = ['a','b','c','d'] def noop(data): return data data_noop = mp_map(noop, data) self.assertEqual(data, data_noop) def test_simple2(self): data = [x for x in xrange(1000)] data_double = mp_map(lambda a: a*2, data) self.assertEqual(map(lambda a: a*2,data), data_double) def test_gen(self): def gen(): for i in xrange(100): yield i data_double = mp_map(lambda a: a*2, gen()) self.assertEqual(map(lambda a: a*2,gen()), data_double) def test_repeat(self): def mult(a,b): return a*b data = [x for x in xrange(50,5000,5)] data_triple = mp_map(mult, data, repeat(3)) self.assertEqual(map(lambda a: a*3,data),data_triple) def test_none(self): data = [] data_sqr = mp_map(lambda x: x*x, data) self.assertEqual([],data_sqr) if __name__ == '__main__': unittest.main()
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import os.path import tempfile import shutil from distutils.core import setup, Extension import distutils.ccompiler import distutils.dep_util try: __default_compiler = distutils.ccompiler.new_compiler() except: __default_compiler = None def make_mod(name, base, source, openCL = False): """Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place.""" if __default_compiler==None: raise Exception('No compiler!') # Work out the various file names - check if we actually need to do anything... if not isinstance(source, list): source = [source] source_path = map(lambda s: os.path.join(base, s), source) library_path = os.path.join(base, __default_compiler.shared_object_filename(name)) if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)): try: print 'b' # Backup the argv variable and create a temporary directory to do all work in... old_argv = sys.argv[:] temp_dir = tempfile.mkdtemp() # Prepare the extension... sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir] comp_path = filter(lambda s: not s.endswith('.h'), source_path) depends = filter(lambda s: s.endswith('.h'), source_path) if openCL: ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends) else: ext = Extension(name, comp_path, depends=depends) # Compile... setup(name=name, version='1.0.0', ext_modules=[ext]) finally: # Cleanup the argv variable and the temporary directory... sys.argv = old_argv shutil.rmtree(temp_dir, True)
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp from numpy_help_cpp import numpy_util_code # Provides various functions to assist with manipulating python objects from c++ code. python_obj_code = numpy_util_code + start_cpp() + """ #ifndef PYTHON_OBJ_CODE #define PYTHON_OBJ_CODE // Extracts a boolean from an object... bool GetObjectBoolean(PyObject * obj, const char * name) { PyObject * b = PyObject_GetAttrString(obj, name); bool ret = b!=Py_False; Py_DECREF(b); return ret; } // Extracts an int from an object... int GetObjectInt(PyObject * obj, const char * name) { PyObject * i = PyObject_GetAttrString(obj, name); int ret = PyInt_AsLong(i); Py_DECREF(i); return ret; } // Extracts a float from an object... float GetObjectFloat(PyObject * obj, const char * name) { PyObject * f = PyObject_GetAttrString(obj, name); float ret = PyFloat_AsDouble(f); Py_DECREF(f); return ret; } // Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored... unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); unsigned char * ret = new unsigned char[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i); Py_DECREF(nao); return ret; } // Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored... float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); float * ret = new float[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i); Py_DECREF(nao); return ret; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Defines helper functions for accessing numpy arrays... numpy_util_code = start_cpp() + """ #ifndef NUMPY_UTIL_CODE #define NUMPY_UTIL_CODE float & Float1D(PyArrayObject * arr, int index = 0) { return *(float*)(arr->data + index*arr->strides[0]); } float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } unsigned char & Byte1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index*arr->strides[0]); } unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } int & Int1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index*arr->strides[0]); } int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } #endif """
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cvarray import mp_map import prog_bar import numpy_help_cpp import python_obj_cpp import matrix_cpp import gamma_cpp import setProcName import start_cpp import make import doc_gen # Setup... doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.') doc.addFile('readme.txt', 'Overview') # Variables... doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.') doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.') doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++') doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++') # Functions... doc.addFunction(make.make_mod) doc.addFunction(cvarray.cv2array) doc.addFunction(cvarray.array2cv) doc.addFunction(mp_map.repeat) doc.addFunction(mp_map.mp_map) doc.addFunction(setProcName.setProcName) doc.addFunction(start_cpp.start_cpp) doc.addFunction(make.make_mod) # Classes... doc.addClass(prog_bar.ProgBar) doc.addClass(doc_gen.DocGen)
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from ctypes import * def setProcName(name): """Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases.""" # Call the process control function... libc = cdll.LoadLibrary('libc.so.6') libc.prctl(15, c_char_p(name), 0, 0, 0) # Update argv... charPP = POINTER(POINTER(c_char)) argv = charPP.in_dll(libc,'_dl_argv') size = libc.strlen(argv[0]) libc.strncpy(argv[0],c_char_p(name),size) if __name__=='__main__': # Quick test that it works... import os ps1 = 'ps' ps2 = 'ps -f' os.system(ps1) os.system(ps2) setProcName('wibble_wobble') os.system(ps1) os.system(ps2)
Python
#! /usr/bin/env python # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import dp_utils from utils import doc_gen # Setup... doc = doc_gen.DocGen('dp_utils', 'Dirichlet Process Utilities', 'Utility library for handling Dirichlet processes') doc.addFile('readme.txt', 'Overview') # Variables... doc.addVariable('sampling_code', 'Code for sampling from various distributions - uniform, Gaussian, gamma and beta.') doc.addVariable('conc_code', 'Contains code to sample a concentration parameter and two classes - one to represent the status of a concentration parameter - its prior and its estimated value, and another to do the same thing for when a concentration parameter is shared between multiple Dirichlet processes.') doc.addVariable('dir_est_code', 'Contains a class for doing maximum likelihood estimation of a Dirichlet distrbution given multinomials that have been drawn from it.') doc.addVariable('linked_list_code', 'A linked list implimentation - doubly linked, adds data via templated inheritance.') doc.addVariable('linked_list_gc_code', 'A linked list with reference counting and garabge collection for its entries. Happens to be very good at representing a Dirichlet process.') doc.addVariable('dp_utils_code', 'Combines all of the code provided in this module into a single variable.')
Python
# Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest import math from params import Params from solve_shared import State from model import DocModel from utils.start_cpp import start_cpp from ds_link_cpp import ds_link_code from scipy import weave # Shared code used to Gibbs sample the model - provides operations used repeatedly by the sampling code. Note that this contains all the heavy code used by the system - the rest is basically just loops. Additionally the data structure code is prepended to this, so this is the only shared code... shared_code = ds_link_code + start_cpp() + """ #include <sys/time.h> // Code for resampling a documents cluster assignment... void ResampleDocumentCluster(State & state, Document & doc) { // If the document does not currently have a cluster then create one for it - let 'em cluster in non-initialisation iterations... if (doc.GetCluster()==0) { ItemRef<Cluster,Conc> * newC = state.clusters.Append(); newC->Body().alpha = state.rho.alpha; newC->Body().beta = state.rho.beta; newC->Body().conc = state.rho.conc; float * bmn = new float[state.behCount]; float bmnDiv = 0.0; for (int b=0;b<state.behCount;b++) { bmn[b] = state.phi[b]; bmnDiv += state.phi[b]; } for (int b=0;b<state.behCount;b++) bmn[b] /= bmnDiv; newC->SetBMN(bmn); doc.SetCluster(newC); return; } // Fill probAux of the topics with the counts of how many of each topic exist in the document whilst at the same time detaching the cluster instances from the document instances... { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { topic->probAux = 0.0; topic = topic->Next(); } } int normalDocInst = 0; { ItemRef<DocInst,Conc> * docInst = doc.First(); while (docInst->Valid()) { docInst->topic = docInst->GetClusterInst()->GetTopic(); if (docInst->topic->beh==0) // Only need to redo the normal ones, as that is all resampling the cluster affects. { docInst->topic->IncRef(); // Could be that this is the last (indirect) reference to the topic, and the next line could delete it - would be bad. docInst->SetClusterInst(0); docInst->topic->probAux += 1.0; normalDocInst += 1; } docInst = docInst->Next(); } } // Detach the document from its current cluster... doc.SetCluster(0); // Work out the log probabilities of assigning one of the known clusters to the document - store them in the cluster prob values. Uses the topic prob values as intermediates, for the probability of drawing each topic from the cluster... float maxLogProb = -1e100; { ItemRef<Cluster,Conc> * cluster = state.clusters.First(); while (cluster->Valid()) { // We need the probability of drawing each topic from the cluster, which we write into the prob variable of the topics... // Zero out the prob values of the topics... { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { topic->prob = 0.0; topic = topic->Next(); } } // Count how many times each topic has been drawn from the cluster, storing in the topic prob values... { ItemRef<ClusterInst,Conc> * cluInst = cluster->First(); while (cluInst->Valid()) { cluInst->GetTopic()->prob += cluInst->RefCount(); cluInst = cluInst->Next(); } } // Normalise whilst adding in the probability of drawing the given topic... // (There is some cleverness here to account for the extra references to the topics obtained from the document being resampled.) { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { topic->prob += cluster->Body().conc * float(topic->RefCount()-topic->probAux) / (state.topics.RefTotal() - normalDocInst + state.topics.Body().conc); topic->prob /= cluster->RefTotal() + cluster->Body().conc; topic = topic->Next(); } } // Now calculate the log probability of the cluster - involves a loop over the topics plus the inclusion of the probability of drawing this cluster... cluster->prob = log(cluster->RefCount()); //cluster->prob -= log(state.clusters.RefTotal() + state.clusters.Body().conc); { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { cluster->prob += topic->probAux * log(topic->prob); topic = topic->Next(); } } // Factor in the probability of the clusters distribution over behaviours... float bmnDiv = cluster->GetBMN()[0]; bool hasAbnorm = false; for (int b=1;b<state.behCount;b++) { if (doc.GetBehFlags()[b]!=0) { bmnDiv += cluster->GetBMN()[b]; hasAbnorm = true; } } if (hasAbnorm) { cluster->prob += lnGamma(doc.SampleCount()+1.0); for (int b=0;b<state.behCount;b++) { if (doc.GetBehFlags()[b]!=0) { cluster->prob += doc.GetBehCounts()[b] * log(cluster->GetBMN()[b]/bmnDiv); cluster->prob -= lnGamma(doc.GetBehCounts()[b]+1.0); } } } if (cluster->prob>maxLogProb) maxLogProb = cluster->prob; cluster = cluster->Next(); } } // Calculate the log probability of assigning a new cluster - involves quite a few terms, including a loop over the topics to get many of them... float probNew = log(state.clusters.Body().conc); //probNew -= log(state.clusters.RefTotal() + state.clusters.Body().conc); probNew += lnGamma(doc.Body().conc); probNew -= lnGamma(doc.Body().conc + doc.Size()); { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { float tProb = float(topic->RefCount()-topic->probAux) / (state.topics.RefTotal() - normalDocInst + state.topics.Body().conc); float tWeight = doc.Body().conc * tProb; probNew += lnGamma(tWeight + topic->probAux); probNew -= lnGamma(tWeight); topic = topic->Next(); } } { float phiDiv = state.phi[0]; bool hasAbnorm = false; for (int b=1;b<state.behCount;b++) { if (doc.GetBehFlags()[b]!=0) { phiDiv += state.phi[b]; hasAbnorm = true; } } if (hasAbnorm) { probNew += lnGamma(doc.SampleCount()+1.0); for (int b=0;b<state.behCount;b++) { if (doc.GetBehFlags()[b]!=0) { probNew += doc.GetBehCounts()[b] * log(state.phi[b]/phiDiv); probNew -= lnGamma(doc.GetBehCounts()[b]+1.0); } } } } if (probNew>maxLogProb) maxLogProb = probNew; // Convert from logs to actual probabilities, with partial normalisation and summing for implicit precise normalisation later... float sumProb = 0.0; probNew = exp(probNew - maxLogProb); sumProb += probNew; { ItemRef<Cluster,Conc> * cluster = state.clusters.First(); while (cluster->Valid()) { cluster->prob = exp(cluster->prob - maxLogProb); sumProb += cluster->prob; cluster = cluster->Next(); } } // Draw which cluster we are to assign; in the event of a new cluster create it... ItemRef<Cluster,Conc> * selected = 0; { float rand = sample_uniform() * sumProb; ItemRef<Cluster,Conc> * cluster = state.clusters.First(); while (cluster->Valid()) { rand -= cluster->prob; if (rand<0.0) { selected = cluster; break; } cluster = cluster->Next(); } } if (selected==0) { selected = state.clusters.Append(); selected->Body().alpha = state.rho.alpha; selected->Body().beta = state.rho.beta; selected->Body().conc = state.rho.conc; float * bmn = new float[state.behCount]; float bmnDiv = 0.0; for (int b=0;b<state.behCount;b++) { bmn[b] = state.phi[b]; bmnDiv += state.phi[b]; } for (int b=0;b<state.behCount;b++) bmn[b] /= bmnDiv; selected->SetBMN(bmn); } // Update the document with its new cluster - consists of setting the documents cluster and updating the document instances to use the new cluster, which requires more sampling... doc.SetCluster(selected); ItemRef<DocInst,Conc> * docInst = doc.First(); while(docInst->Valid()) { if (docInst->topic->beh==0) { // Update the cluster instance for this document instance - treat as a draw from the cluster DP with a hard requiremement that we draw an instance with the same topic as currently (What to do here is not given by the dual-hdp paper - this is just one option amung many, choosen for being good for convergance and relativly easy to impliment.)... // Sum weights from the cluster instances, but only when they are the correct topic; also add in the probability of creating a new cluster instance with the relevant topic... float probSum = selected->Body().conc * float(docInst->topic->RefCount()) / (state.topics.RefTotal() + state.topics.Body().conc); { ItemRef<ClusterInst,Conc> * targ2 = selected->First(); while (targ2->Valid()) { if (targ2->GetTopic()==docInst->topic) probSum += targ2->RefCount(); targ2 = targ2->Next(); } } // Select the relevant one... ItemRef<ClusterInst,Conc> * relevant = 0; { float rand = sample_uniform() * probSum; ItemRef<ClusterInst,Conc> * cluInst = selected->First(); while (cluInst->Valid()) { if (cluInst->GetTopic()==docInst->topic) { rand -= cluInst->RefCount(); if (rand<0.0) { relevant = cluInst; break; } } cluInst = cluInst->Next(); } } if (relevant==0) { relevant = selected->Append(); relevant->SetTopic(docInst->topic); } // Assign it... docInst->SetClusterInst(relevant); // Temporary with topic in is no longer needed - decriment the reference... docInst->topic->DecRef(); } docInst = docInst->Next(); } } // Code for resampling the topics associated with cluster instances - single function that does them all - designed this way for efficiency reasons... void ResampleClusterInstances(State & state) { // First construct a linked list in each ClusterInst of all samples currently assigned to that ClusterInst, ready for the next bit - quite an involved process due to the multiple levels... { ItemRef<Cluster,Conc> * cluster = state.clusters.First(); while (cluster->Valid()) { ItemRef<ClusterInst,Conc> * cluInst = cluster->First(); while (cluInst->Valid()) { cluInst->first = 0; cluInst = cluInst->Next(); } cluster = cluster->Next(); } } for (int d=0;d<state.docCount;d++) { Document & doc = state.doc[d]; for (int s=0;s<doc.SampleCount();s++) { Sample & sam = doc.GetSample(s); ItemRef<ClusterInst,Conc> * ci = sam.GetDocInst()->GetClusterInst(); sam.next = ci->first; // Note that doing this for abnormal cluster instances makes no sense, but causes no harm either, hence leaving it with the simpler code. ci->first = &sam; } } // Now iterate all the cluster instances and resample each in turn... { ItemRef<Cluster,Conc> * cluster = state.clusters.First(); while (cluster->Valid()) { ItemRef<ClusterInst,Conc> * cluInst = cluster->First(); while (cluInst->Valid()) { // First decriment the topic word counts for all the using samples and remove its topic... { Sample * sam = cluInst->first; while (sam) { ItemRef<Topic,Conc> * topic = sam->GetDocInst()->GetClusterInst()->GetTopic(); topic->wc[sam->GetWord()] -= 1; topic->wcTotal -= 1; sam = sam->next; } } cluInst->SetTopic(0); // Count the number of each word type used by all the children of the cluster instance... { for (int w=0;w<state.wordCount;w++) state.tempWord[w] = 0; Sample * sam = cluInst->first; while (sam) { state.tempWord[sam->GetWord()] += 1; sam = sam->next; } } // Iterate the topics and calculate the log probability of each, find maximum log probability... float maxLogProb = -1e100; { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { topic->prob = log(topic->RefCount()); float samDiv = log(topic->wcTotal + state.betaSum); for (int w=0;w<state.wordCount;w++) { if (state.tempWord[w]!=0) { topic->prob += state.tempWord[w]*(log(topic->wc[w] + state.beta[w]) - samDiv); } } if (topic->prob>maxLogProb) maxLogProb = topic->prob; topic = topic->Next(); } } // Calculate the log probability of a new topic; maintain maximum... float probNew = log(state.topics.Body().conc); { for (int w=0;w<state.wordCount;w++) { if (state.tempWord[w]!=0) { probNew += state.tempWord[w]*log(state.beta[w]/state.betaSum); } } } if (probNew>maxLogProb) maxLogProb = probNew; // Convert log probabilities to actual probabilities in a numerically safe way, and sum them up for selection... float probSum = 0.0; probNew = exp(probNew-maxLogProb); probSum += probNew; { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { topic->prob = exp(topic->prob-maxLogProb); probSum += topic->prob; topic = topic->Next(); } } // Select the resampled topic, creating a new one if required... ItemRef<Topic,Conc> * nt = 0; float rand = probSum * sample_uniform(); { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { rand -= topic->prob; if (rand<0.0) { nt = topic; break; } topic = topic->Next(); } } if (nt==0) { nt = state.topics.Append(); nt->wc = new int[state.wordCount]; for (int w=0;w<state.wordCount;w++) nt->wc[w] = 0; nt->wcTotal = 0; nt->beh = 0; } // Finally set its topic and sum back in the topic usage by its using samples... cluInst->SetTopic(nt); { Sample * sam = cluInst->first; while (sam) { ItemRef<Topic,Conc> * topic = sam->GetDocInst()->GetClusterInst()->GetTopic(); topic->wc[sam->GetWord()] += 1; topic->wcTotal += 1; sam = sam->next; } } cluInst = cluInst->Next(); } cluster = cluster->Next(); } } } // Code for resampling a document instance's cluster instance - actually does all document instances for a single document with each call, for efficiency reasons... void ResampleDocumentInstances(State & state, Document & doc) { // Calculate the normaliser for the behaviour multinomial and the log probability of normal behaviour... float bmnNorm = 0.0; for (int b=0;b<state.behCount;b++) { if (doc.GetBehFlags()[b]!=0) bmnNorm += doc.GetCluster()->GetBMN()[b]; } float logProbNorm = log(doc.GetCluster()->GetBMN()[0]/bmnNorm); // Construct a linked list in each DocInst of the samples contained within - needed to do the next task efficiently... { ItemRef<DocInst,Conc> * docInst = doc.First(); while (docInst->Valid()) { docInst->first = 0; docInst = docInst->Next(); } } for (int s=0;s<doc.SampleCount();s++) { Sample & sam = doc.GetSample(s); sam.next = sam.GetDocInst()->first; sam.GetDocInst()->first = &sam; } // Now iterate all DocInst in the document, resampling each in turn... { ItemRef<DocInst,Conc> * docInst = doc.First(); while (docInst->Valid()) { // Detach from its cluster instance, removing all topic references at the same time, also, count how many words it has... { for (int w=0;w<state.wordCount;w++) state.tempWord[w] = 0; Sample * sample = docInst->first; while (sample) { ItemRef<Topic,Conc> * topic = sample->GetDocInst()->GetClusterInst()->GetTopic(); topic->wc[sample->GetWord()] -= 1; state.tempWord[sample->GetWord()] += 1; topic->wcTotal -= 1; doc.GetBehCounts()[topic->beh] -= 1; sample = sample->next; } } docInst->SetClusterInst(0); // Iterate the topics and determine the log probability of each topic for the samples in probAux and the log probability of drawing a new cluster instance with the given topic in prob. The latter has its max recorded for numerically stable normalisation later... float maxLogProb = -1e100; float logTopicNorm = log(state.topics.RefTotal() + state.topics.Body().conc); float logCluNorm = log(doc.GetCluster()->RefTotal() + doc.GetCluster()->Body().conc); { float baseTopicLogProb = logProbNorm + log(doc.GetCluster()->Body().conc) - logCluNorm - logTopicNorm; ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { topic->probAux = 0.0; float samDiv = log(topic->wcTotal + state.betaSum); for (int w=0;w<state.wordCount;w++) { if (state.tempWord[w]!=0) { topic->probAux += state.tempWord[w]*(logf(topic->wc[w] + state.beta[w]) - samDiv); // Don't normalise for arbitrary order, as same constant for all. } } topic->prob = baseTopicLogProb + logf(topic->RefCount()) + topic->probAux; if (topic->prob>maxLogProb) maxLogProb = topic->prob; topic = topic->Next(); } } // Iterate the cluster instances and calculate their log probabilities, maintaining knowledge of the maximum... { ItemRef<ClusterInst,Conc> * cluInst = doc.GetCluster()->First(); while (cluInst->Valid()) { cluInst->prob = logProbNorm + logf(cluInst->RefCount()) - logCluNorm + cluInst->GetTopic()->probAux; if (cluInst->prob>maxLogProb) maxLogProb = cluInst->prob; cluInst = cluInst->Next(); } } // Calculate the log probability of a new topic and new cluster instance, factor into the maximum... float probAllNew = logProbNorm + log(doc.GetCluster()->Body().conc) - logCluNorm + log(state.topics.Body().conc) - logTopicNorm; { for (int w=0;w<state.wordCount;w++) { if (state.tempWord[w]!=0) { probAllNew += state.tempWord[w]*logf(state.beta[w]/state.betaSum); // Ignore ordering irrelevance normalisation, as done throughout due to being constant. } } } if (probAllNew>maxLogProb) maxLogProb = probAllNew; // Do all the abnormal topics - same idea as previously... { ItemRef<Topic,Conc> * topic = state.behTopics.First()->Next(); while (topic->Valid()) { if (doc.GetBehFlags()[topic->beh]!=0) { topic->prob = log(doc.GetCluster()->GetBMN()[topic->beh]/bmnNorm); float samDiv = log(topic->wcTotal + state.betaSum); for (int w=0;w<state.wordCount;w++) { if (state.tempWord[w]!=0) { topic->prob += state.tempWord[w]*(logf(topic->wc[w] + state.beta[w]) - samDiv); // Don't normalise for arbitrary ordering, as same constant for all. } } if (topic->prob>maxLogProb) maxLogProb = topic->prob; } topic = topic->Next(); } } // Use the maximum log probability to convert all values to normal probabilities in a numerically safe way, storing a sum ready for drawing from the various options... float probSum = 0.0; probAllNew = exp(probAllNew-maxLogProb); probSum += probAllNew; { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { topic->prob = exp(topic->prob-maxLogProb); probSum += topic->prob; topic = topic->Next(); } } { ItemRef<ClusterInst,Conc> * cluInst = doc.GetCluster()->First(); while (cluInst->Valid()) { cluInst->prob = exp(cluInst->prob-maxLogProb); probSum += cluInst->prob; cluInst = cluInst->Next(); } } { ItemRef<Topic,Conc> * topic = state.behTopics.First()->Next(); while (topic->Valid()) { if (doc.GetBehFlags()[topic->beh]!=0) { topic->prob = exp(topic->prob-maxLogProb); probSum += topic->prob; } topic = topic->Next(); } } // Draw the new cluster instance - can involve creating a new one and even creating a new topic... ItemRef<ClusterInst,Conc> * nci = 0; float rand = sample_uniform() * probSum; // Is it a normal cluster instance that already exists?.. { ItemRef<ClusterInst,Conc> * cluInst = doc.GetCluster()->First(); while (cluInst->Valid()) { rand -= cluInst->prob; if (rand<0.0) { nci = cluInst; break; } cluInst = cluInst->Next(); } } // Is it an abnormal topic?.. if (nci==0) { ItemRef<ClusterInst,Conc> * cluInst = state.behCluInsts.First()->Next(); while (cluInst->Valid()) { if (doc.GetBehFlags()[cluInst->GetTopic()->beh]!=0) { rand -= cluInst->GetTopic()->prob; if (rand<0.0) { nci = cluInst; break; } } cluInst = cluInst->Next(); } } // Is it a new cluster instance?.. if (nci==0) { nci = doc.GetCluster()->Append(); ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { rand -= topic->prob; if (rand<0.0) { nci->SetTopic(topic); break; } topic = topic->Next(); } } // Is it a new topic as well as a new cluster instance?.. if (nci->GetTopic()==0) { ItemRef<Topic,Conc> * nt = state.topics.Append(); nt->wc = new int[state.wordCount]; for (int w=0;w<state.wordCount;w++) nt->wc[w] = 0; nt->wcTotal = 0; nt->beh = 0; nci->SetTopic(nt); } // Reattach its resampled cluster instance, and incriment the topic word counts... docInst->SetClusterInst(nci); { Sample * sample = docInst->first; while (sample) { ItemRef<Topic,Conc> * topic = sample->GetDocInst()->GetClusterInst()->GetTopic(); topic->wc[sample->GetWord()] += 1; topic->wcTotal += 1; doc.GetBehCounts()[topic->beh] += 1; sample = sample->next; } } docInst = docInst->Next(); } } } // Helper for below, seperated out as required seperate for the left to right algorithm later on. Returns the sum of all the probabilities of the options for the sample just calculated, and leaves correct values in all the relevant ->prob variables... float CalcSampleProb(State & state, Document & doc, Sample & sam) { float pSum = 0.0; // Calculate the normalising constant for the associated clusters behaviour multinomial given the documents behaviour flags... float bmvDiv = 0.0; for (int b=0;b<state.behCount;b++) { if (doc.GetBehFlags()[b]!=0) bmvDiv += doc.GetCluster()->GetBMN()[b]; } // Probability of going for something normal... float probNormal = doc.GetCluster()->GetBMN()[0] / bmvDiv; // Calculate the probabilities of various 'new' events... float probNewDocInst = doc.Body().conc / (doc.RefTotal() + doc.Body().conc); float probNewCluInst = probNewDocInst * doc.GetCluster()->Body().conc / (doc.GetCluster()->RefTotal() + doc.GetCluster()->Body().conc); float probNewTopic = probNewCluInst * state.topics.Body().conc / (state.topics.RefTotal() + state.topics.Body().conc); // The probability of a new topic... pSum += probNormal * probNewTopic * state.beta[sam.GetWord()] / state.betaSum; // The topics - keep the probabilities of drawing the word in question from the topic in the aux variables, to save computation in the following steps... float betaWeight = state.beta[sam.GetWord()]; { ItemRef<Topic,Conc> * topic = state.topics.First(); float base = probNormal * probNewCluInst / (state.topics.RefTotal() + state.topics.Body().conc); while (topic->Valid()) { topic->probAux = (topic->wc[sam.GetWord()] + betaWeight) / (topic->wcTotal + state.betaSum); topic->prob = topic->probAux * topic->RefCount() * base; pSum += topic->prob; topic = topic->Next(); } } // The abnormal topics... { ItemRef<Topic,Conc> * topic = state.behTopics.First()->Next(); while (topic->Valid()) { if (doc.GetBehFlags()[topic->beh]!=0) { topic->probAux = (topic->wc[sam.GetWord()] + betaWeight) / (topic->wcTotal + state.betaSum); float probBeh = doc.GetCluster()->GetBMN()[topic->beh] / bmvDiv; topic->prob = probBeh * probNewDocInst * topic->probAux; pSum += topic->prob; } topic = topic->Next(); } } // The cluster instances... { ItemRef<ClusterInst,Conc> * cluInst = doc.GetCluster()->First(); float base = probNormal * probNewDocInst / (doc.GetCluster()->RefTotal() + doc.GetCluster()->Body().conc); while (cluInst->Valid()) { cluInst->prob = cluInst->GetTopic()->probAux * cluInst->RefCount() * base; pSum += cluInst->prob; cluInst = cluInst->Next(); } } // The document instances... { ItemRef<DocInst,Conc> * docInst = doc.First(); float divisor = doc.RefTotal() + doc.Body().conc; while (docInst->Valid()) { docInst->prob = docInst->GetClusterInst()->GetTopic()->probAux * docInst->RefCount() / divisor; pSum += docInst->prob; docInst = docInst->Next(); } } return pSum; } // Code for resampling a samples topic instance assignment... // (Everything must be assigned - no null pointers on the chain from sample to topic.) // (You can seperatly call CalcSampleProb and put its return value in pSum if you want, though that requires that you really, really know what your doing.) void ResampleSample(State & state, Document & doc, Sample & sam, float pSum = -1.0) { // Remove the samples current assignment... if (sam.GetDocInst()) { int beh = sam.GetDocInst()->GetClusterInst()->GetTopic()->beh; doc.GetBehCounts()[beh] -= 1; sam.SetDocInst(0); } // Assign probabilities to the various possibilities - there are temporary variables in the data structure to make this elegant. Sum up the total probability ready for the sampling phase. In all cases an entity is assigned the probability of using that entity with everything below it being created from scratch... if (pSum<0.0) { pSum = CalcSampleProb(state, doc, sam); } // Now draw from the distribution and assign the result, creating new entities as required. The checking is done in order of (typically) largest to smallest, to maximise the chance of an early bail out... // Draw the random uniform, scaled by the pSum - we will repeatedly subtract from this random variable for each item - when it becomes negative we have found the item to draw... float rand = sample_uniform() * pSum; // Check the document instances... { ItemRef<DocInst,Conc> * docInst = doc.First(); while (docInst->Valid()) { rand -= docInst->prob; if (rand<0.0) { // A document instance has been selected - simplest reassignment case... sam.SetDocInst(docInst); int beh = sam.GetDocInst()->GetClusterInst()->GetTopic()->beh; doc.GetBehCounts()[beh] += 1; return; } docInst = docInst->Next(); } } // Check the cluster instances - would involve a new document instance... { ItemRef<ClusterInst,Conc> * cluInst = doc.GetCluster()->First(); while (cluInst->Valid()) { rand -= cluInst->prob; if (rand<0.0) { // A cluster instance has been selected - need to create a new document instance... ItemRef<DocInst,Conc> * ndi = doc.Append(); ndi->SetClusterInst(cluInst); sam.SetDocInst(ndi); doc.GetBehCounts()[0] += 1; return; } cluInst = cluInst->Next(); } } // Check the abnormal topics... { ItemRef<ClusterInst,Conc> * cluInst = state.behCluInsts.First()->Next(); while (cluInst->Valid()) { if (doc.GetBehFlags()[cluInst->GetTopic()->beh]!=0) { rand -= cluInst->GetTopic()->prob; if (rand<0.0) { // An abnormal topic has been selected - need a new document instance... ItemRef<DocInst,Conc> * ndi = doc.Append(); ndi->SetClusterInst(cluInst); sam.SetDocInst(ndi); doc.GetBehCounts()[cluInst->GetTopic()->beh] += 1; return; } } cluInst = cluInst->Next(); } } // Check the topics - would involve both a new cluster and document instance... { ItemRef<Topic,Conc> * topic = state.topics.First(); while (topic->Valid()) { rand -= topic->prob; if (rand<0.0) { // A topic has been selected - need a new cluster and a new document instance... ItemRef<ClusterInst,Conc> * nci = doc.GetCluster()->Append(); nci->SetTopic(topic); ItemRef<DocInst,Conc> * ndi = doc.Append(); ndi->SetClusterInst(nci); sam.SetDocInst(ndi); doc.GetBehCounts()[0] += 1; return; } topic = topic->Next(); } } // If we have got this far then its a new topic, with a new cluster and document instance as well... ItemRef<Topic,Conc> * nt = state.topics.Append(); nt->wc = new int[state.wordCount]; for (int w=0;w<state.wordCount;w++) nt->wc[w] = 0; nt->wcTotal = 0; nt->beh = 0; ItemRef<ClusterInst,Conc> * nci = doc.GetCluster()->Append(); nci->SetTopic(nt); ItemRef<DocInst,Conc> * ndi = doc.Append(); ndi->SetClusterInst(nci); sam.SetDocInst(ndi); doc.GetBehCounts()[0] += 1; } // Code for resampling all the concentration parameters - just have to iterate through and call all the resampling methods... void ResampleConcs(State & state, bool doClu = true, bool doDoc = true) { // Concentrations for DPs from which topics and clusters are drawn... state.topics.Body().ResampleConc(state.topics.RefTotal(), state.topics.Size()); state.clusters.Body().ResampleConc(state.clusters.RefTotal(), state.clusters.Size()); // Concentrations for clusters... if (doClu) { if (state.seperateClusterConc) { ItemRef<Cluster,Conc> * cluster = state.clusters.First(); while (cluster->Valid()) { cluster->Body().ResampleConc(cluster->RefTotal(), cluster->Size()); cluster = cluster->Next(); } } else { if (state.clusters.Size()>0) { SampleConcDP scdp; scdp.SetPrior(state.rho.alpha,state.rho.beta); scdp.SetPrevConc(state.clusters.First()->Body().conc); ItemRef<Cluster,Conc> * cluster = state.clusters.First(); while (cluster->Valid()) { scdp.AddDP(cluster->RefTotal(), cluster->Size()); cluster = cluster->Next(); } double newConc = scdp.Sample(); cluster = state.clusters.First(); while (cluster->Valid()) { cluster->Body().conc = newConc; cluster = cluster->Next(); } state.rho.conc = newConc; } } } // Concentrations for documents... if (doDoc) { if (state.seperateDocumentConc) { for (int d=0;d<state.docCount;d++) { state.doc[d].Body().ResampleConc(state.doc[d].RefTotal(), state.doc[d].Size()); } } else { SampleConcDP scdp; scdp.SetPrior(state.doc[0].Body().alpha,state.doc[0].Body().beta); scdp.SetPrevConc(state.doc[0].Body().conc); for (int d=0;d<state.docCount;d++) { scdp.AddDP(state.doc[d].RefTotal(), state.doc[d].Size()); } double newConc = scdp.Sample(); for (int d=0;d<state.docCount;d++) { state.doc[d].Body().conc = newConc; } } } } // Helper function used during the left to right algorithm - a comparator for the qsort function for sorting an array of ints... int compareInt(const void * lhs, const void * rhs) { return *(int*)lhs - *(int*)rhs; } // Helper function for timming... float micro_seconds() { static double prev = 0.0; timeval tv; gettimeofday(&tv,0); double now = tv.tv_sec + (tv.tv_usec/1e6); float ret = (now-prev)*1e6; prev = now; return ret; } """ # The actual function for Gibbs iterating the data structure - takes as input the State object as 'state' and the number of iterations to do as 'iters'... gibbs_code = start_cpp(shared_code) + """ // State... State s; StatePyToCpp(state, &s); // Declare some stuff for efficiency... float * mn = new float[s.wordCount]; SMP smp(s.flagSets->dimensions[1], s.flagSets->dimensions[0]); smp.SetFIA(s.flagSets); smp.SetSampleCount(s.behSamples); // If there is only one behaviour force disable bmn and phi estimation - things go pear-shaped otherwise... if (s.flagSets->dimensions[1]<2) s.calcCluBmn = false; // No point resampling phi if not resampling the bmn's... if (s.calcCluBmn==false) s.calcPhi = false; // Iterations... bool verbose = false; for (int iter=0;iter<iters;iter++) { if (verbose) printf("iter %i | %f\\n", iter, micro_seconds()); // Iterate the documents... for (int d=0;d<s.docCount;d++) { if (verbose) printf("iter %i, doc %i | %f\\n", iter, d, micro_seconds()); // Resample the documents cluster... if (s.doc[d].GetCluster()==0) { if (s.clusters.Size()==0) { ItemRef<Cluster,Conc> * newC = s.clusters.Append(); newC->Body().alpha = s.rho.alpha; newC->Body().beta = s.rho.beta; newC->Body().conc = s.rho.conc; float * bmn = new float[s.behCount]; for (int b=0;b<s.behCount;b++) bmn[b] = s.phi[b]; newC->SetBMN(bmn); s.doc[d].SetCluster(newC); } else { s.doc[d].SetCluster(s.clusters.First()); } } else { if (!s.oneCluster) { ResampleDocumentCluster(s, s.doc[d]); } } if (verbose) printf("resampled cluster | %f\\n", micro_seconds()); // Resample the documents samples (words)... for (int ss=0;ss<s.doc[d].SampleCount();ss++) { ResampleSample(s, s.doc[d], s.doc[d].GetSample(ss)); } if (verbose) printf("resampled words | %f\\n", micro_seconds()); // Resample the cluster instance that each document instance is assigned to... if (!s.dnrDocInsts) { ResampleDocumentInstances(s,s.doc[d]); } if (verbose) printf("resampled doc instances | %f\\n", micro_seconds()); // Resample the many concentration parameters every document - need to do this regularly to make sure the initialisation values don't cause the algorithm to get stuck (Plus its such a cheap operation that it doesn't matter if its done too frequently.)... if (s.resampleConcs) { ResampleConcs(s); } if (verbose) printf("resampled concentrations | %f\\n", micro_seconds()); } // Resample the cluster instances assigned topics... if (!s.dnrCluInsts) { if (verbose) printf("resampling cluster instances... | %f\\n", micro_seconds()); ResampleClusterInstances(s); } // Resample each clusters bmn... if (s.calcCluBmn) { if (verbose) printf("resampling cluster bmn's... | %f\\n", micro_seconds()); // Update the prior for the smp object from phi... smp.SetPrior(s.phi); // Go through the documents and construct a list of documents belonging to each cluster... ItemRef<Cluster,Conc> * targClu = s.clusters.First(); while (targClu->Valid()) { targClu->first = 0; targClu = targClu->Next(); } for (int d=0;d<s.docCount;d++) { targClu = s.doc[d].GetCluster(); s.doc[d].next = targClu->first; targClu->first = &s.doc[d]; } // Iterate and do the calculation for each cluster... targClu = s.clusters.First(); while (targClu->Valid()) { // Reset the smp object, add the prior... smp.Reset(); int * priorPower = targClu->GetBehCountPrior(); if (priorPower) smp.Add(priorPower); // Add samples by iterating the relevant documents... Document * targ = targClu->first; while (targ) { if (targ->GetFlagIndex()>=s.flagSets->dimensions[1]) { smp.Add(targ->GetFlagIndex(), targ->GetBehCounts()); } targ = targ->next; } // Extract the estimate... smp.Mean(targClu->GetBMN()); targClu = targClu->Next(); } } // Resample phi, the prior on the cluster bmn-s... if (s.calcPhi) { if (verbose) printf("resampling phi... | %f\\n", micro_seconds()); EstimateDir ed(s.behCount); ItemRef<Cluster,Conc> * cluster = s.clusters.First(); while (cluster->Valid()) { ed.Add(cluster->GetBMN()); // Not actually correct - see below with beta for reason/justification. cluster = cluster->Next(); } ed.Update(s.phi); } // If requested recalculate beta... if (s.calcBeta&&((s.topics.Size()+s.behTopics.Size()-1)>1)) { if (verbose) printf("resampling beta... | %f\\n", micro_seconds()); EstimateDir ed(s.wordCount); ItemRef<Topic,Conc> * topic = s.topics.First(); while (topic->Valid()) { float div = 0.0; for (int i=0;i<s.wordCount;i++) { mn[i] = topic->wc[i] + s.beta[i]; div += mn[i]; } for (int i=0;i<s.wordCount;i++) mn[i] /= div; ed.Add(mn); // Not actually correct - we are using the mean of the distribution from which we should draw the multinomial, rather than actually drawing. This is easier however, and not that unreasonable. topic = topic->Next(); } topic = s.behTopics.First()->Next(); // Skip the normal behaviour dummy. while (topic->Valid()) { float div = 0.0; for (int i=0;i<s.wordCount;i++) { mn[i] = topic->wc[i] + s.beta[i]; div += mn[i]; } for (int i=0;i<s.wordCount;i++) mn[i] /= div; ed.Add(mn); topic = topic->Next(); } ed.Update(s.beta); s.betaSum = 0.0; for (int i=0;i<s.wordCount;i++) s.betaSum += s.beta[i]; } // Verify the state is consistant - for debugging (Only works when there is no prior)... //VerifyState(s); } delete[] mn; StateCppToPy(&s, state); """ class ProgReporter: """Class to allow progress to be reported.""" def __init__(self,params,callback,mult = 1): self.doneIters = 0 self.totalIters = mult * params.runs * (max((params.burnIn,params.lag)) + params.samples + (params.samples-1)*params.lag) self.callback = callback if self.callback: self.callback(self.doneIters,self.totalIters) def next(self, amount = 1): self.doneIters += amount if self.callback: self.callback(self.doneIters,self.totalIters) def gibbs(state, total_iters, next, step = 64): """Does the requested number of Gibbs iterations to the passed in state. If state has not been initialised the first iteration will be an incrimental construction.""" while total_iters>0: iters = total_iters if iters>step: iters = step total_iters -= iters weave.inline(gibbs_code, ['state', 'iters'], support_code=shared_code) next(iters) def gibbs_run(state, next): """Does a single run on the given state object, adding the relevant samples.""" params = state.getParams() if params.burnIn>params.lag: gibbs(state, params.burnIn-params.lag,next) for s in xrange(params.samples): gibbs(state, params.lag,next) state.sample() next() def gibbs_all(state, callback = None): """Does all the runs requested by a states params object, collating all the samples into the State.""" params = state.getParams() reporter = ProgReporter(params,callback) for r in xrange(params.runs): tempState = State(state) gibbs_run(tempState,reporter.next) state.absorbClone(tempState) def gibbs_doc(model, doc, params = None, callback = None): """Runs Gibbs iterations on a single document, by sampling with a prior constructed from each sample in the given Model. params applies to each sample, so should probably be much more limited than usual - the default if its undefined is to use 1 run and 1 sample and a burn in of only 500. Returns a DocModel with all the relevant samples in.""" # Initialisation stuff - handle params, create the state and the DocModel object, plus a reporter... if params==None: params = Params() params.runs = 1 params.samples = 1 params.burnIn = 500 state = State(doc, params) dm = DocModel() reporter = ProgReporter(params,callback,model.sampleCount()) # Iterate and run for each sample in the model... for sample in model.sampleList(): tempState = State(state) tempState.setGlobalParams(sample) tempState.addPrior(sample) gibbs_run(tempState,reporter.next) dm.addFrom(tempState.getModel()) # Return... return dm def leftRightNegLogProbWord(sample, doc, cluster, particles, cap): """Does a left to right estimate of the negative log probability of the words in the given document, given a sample, the documents abnormalities and a cluster assignment. cap defines a cap on the number of documents resampled before each word is sampled for inclusion - set to a negative number for no cap, but be warned that the algorithm is then O(n^2) with regard to the number of words in the document. Should be set quite high in practise for a reasonable trade off between quality and run-time.""" code = start_cpp(shared_code) + """ // Setup - create the state, extract the document, set its cluster... State state; StatePyToCpp(stateIn, &state); Document & doc = state.doc[0]; if (cluster>=0) { // Existing cluster... doc.SetCluster(state.clusters.Index(cluster)); } else { // New cluster... ItemRef<Cluster,Conc> * newC = state.clusters.Append(); newC->Body().alpha = state.rho.alpha; newC->Body().beta = state.rho.beta; newC->Body().conc = state.rho.conc; float * bmn = new float[state.behCount]; float bmnDiv = 0.0; for (int b=0;b<state.behCount;b++) { bmn[b] = state.phi[b]; bmnDiv += state.phi[b]; } for (int b=0;b<state.behCount;b++) bmn[b] /= bmnDiv; newC->SetBMN(bmn); doc.SetCluster(newC); } // If the cap is negative set it to include all words, otherwise we need some storage... int * samIndex = 0; if (cap<0) cap = doc.SampleCount(); else { samIndex = new int[cap]; } // Create some memory for storing the results into, zeroed out... float * samProb = new float[doc.SampleCount()]; for (int s=0;s<doc.SampleCount();s++) samProb[s] = 0.0; // Do all the particles, summing the results into the samProb array... for (int p=0;p<particles;p++) { // Reset the document to have no assignments to words... for (int s=0;s<doc.SampleCount();s++) { doc.GetSample(s).SetDocInst(0); } // Iterate and factor in the result from each sample... for (int s=0;s<doc.SampleCount();s++) { // Resample preceding samples - 3 scenarios with regards to the cap... // (Note that duplication is allowed in the random sample selection - whilst strictly forbidden the situation is such that it can not cause any issues.) if (s<=cap) { // Less or equal number of samples than the cap - do them all... for (int s2=0;s2<s;s2++) { ResampleSample(state, doc, doc.GetSample(s2)); } } else { if (s<=cap*2) { // Need to miss some samples out, but due to numbers its best to randomly select the ones to miss rather than the ones to do... int missCount = s-cap; for (int m=0;m<missCount;m++) samIndex[m] = sample_nat(s); qsort(samIndex, missCount, sizeof(int), compareInt); for (int s2=0;s2<samIndex[0];s2++) { ResampleSample(state, doc, doc.GetSample(s2)); } for (int m=0;m<missCount-1;m++) { for (int s2=samIndex[m]+1;s2<samIndex[m+1];s2++) { ResampleSample(state, doc, doc.GetSample(s2)); } } for (int s2=samIndex[missCount-1]+1;s2<s;s2++) { ResampleSample(state, doc, doc.GetSample(s2)); } } else { // Need to select a subset of samples to do... for (int m=0;m<cap;m++) samIndex[m] = sample_nat(s); qsort(samIndex, cap, sizeof(int), compareInt); for (int m=0;m<cap;m++) { ResampleSample(state, doc, doc.GetSample(samIndex[m])); } } } // Calculate the contribution of this sample, whilst simultaneously filling out so we can make a draw from them... float pSum = CalcSampleProb(state, doc, doc.GetSample(s)); samProb[s] += (pSum - samProb[s]) / (p+1); // Draw an assignment for the current sample, ready for the next iteration... ResampleSample(state, doc, doc.GetSample(s), pSum); } } // Sumarise the results buffer into a single log probability and return it... float ret = 0.0; for (int s=0;s<doc.SampleCount();s++) ret += log(samProb[s]); return_val = ret; // Clean up... delete[] samIndex; delete[] samProb; """ stateIn = State(doc, Params()) stateIn.setGlobalParams(sample) stateIn.addPrior(sample) ret = weave.inline(code,['stateIn','cluster','particles','cap'] , support_code=shared_code) return -ret # Convert to negative log on the return - before then stick to positive. class TestShared(unittest.TestCase): """Test code for the data structure.""" def test_compile(self): code = start_cpp(shared_code) + """ """ weave.inline(code, support_code=shared_code) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. class PriorConcDP: """Contains the parameters required for the concentration parameter of a DP - specifically its Gamma prior and initial concentration value.""" def __init__(self, other = None): if other!=None: self.__alpha = other.alpha self.__beta = other.beta self.__conc = other.conc else: self.__alpha = 1.0 self.__beta = 1.0 self.__conc = 16.0 def getAlpha(self): """Getter for alpha.""" return self.__alpha def getBeta(self): """Getter for beta.""" return self.__beta def getConc(self): """Getter for the initial concentration.""" return self.__conc def setAlpha(self, alpha): """Setter for alpha.""" assert(alpha>0.0) self.__alpha = alpha def setBeta(self, beta): """Setter for beta.""" assert(beta>0.0) self.__beta = beta def setConc(self, conc): """Setter for the initial concentration.""" assert(conc>=0.0) self.__conc = conc alpha = property(getAlpha, setAlpha, None, "The alpha parameter of the Gamma prior over the concentration parameter.") beta = property(getBeta, setBeta, None, "The beta parameter of the Gamma prior over the concentration parameter.") conc = property(getConc, setConc, None, "The starting value of the concentration parameter, to be updated.")
Python
# Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from scipy import weave import unittest from utils.start_cpp import start_cpp from dp_utils.dp_utils import dp_utils_code from smp.smp_cpp import smp_code # Data structure for storing the state of the model, for use with the c++ Gibbs sampling code. A custom structure is used for speed and to keep the code clean... ds_code = dp_utils_code + smp_code + start_cpp() + """ // Details specific for a topic - basically its multinomial and some helper stuff... class Topic { public: Topic():wc(0) {} ~Topic() {delete[] wc;} // Persistant variables... int * wc; // Indexed by word id this contains the count of how many words with that id are assigned to this topic - from the prior the multinomial can hence be worked out. int wcTotal; // Sum of above. int beh; // Index of which behaviour it is from - 0==normal. // Temporary variables... int id; // Used when moving this data structure to and from python. float prob; // Helper for resampling. float probAux; // " }; class Sample; // Predeclaration required for below. class Document; // " // Stuff for the clustering - basically everything that goes into a cluster, including its DP... class ClusterInst { public: ClusterInst():topic(0) {} ~ClusterInst() { if (topic) topic->DecRef(); } ItemRef<Topic,Conc> * GetTopic() {return topic;} void SetTopic(ItemRef<Topic,Conc> * nt, bool safe=true) { if (safe&&nt) nt->IncRef(); if (safe&&topic) topic->DecRef(); topic = nt; } int id; // Used when moving this data structure to and from python. float prob; // Helper for resampling. Sample * first; // For a temporary linked list when resampling the topic. protected: ItemRef<Topic,Conc> * topic; }; class Cluster : public ListRef<ClusterInst,Conc> { public: Cluster():bmn(0),behCountPrior(0) {} ~Cluster() {delete[] bmn; delete[] behCountPrior;} float * GetBMN() {return bmn;} void SetBMN(float * newBMN) { delete[] bmn; bmn = newBMN; } int * GetBehCountPrior() {return behCountPrior;} void SetBehCountPrior(int * newBCP) { delete[] behCountPrior; behCountPrior = newBCP; } // Temporarys... int id; // Used when moving this data structure to and from python. float prob; // Helper for resampling. Document * first; // For recalculating the bmn - temporary linked list of using documents. private: // The multinomial from which behaviours are drawn in documents that inherit from this cluster. (It is actually the mean of the distribution over this multinomial)... float * bmn; // Array with an entry for each flag set giving a counter offset as a prior... int * behCountPrior; }; class DocInst { public: DocInst():clusterInst(0) {} ~DocInst() { if (clusterInst) clusterInst->DecRef(); } ItemRef<ClusterInst,Conc> * GetClusterInst() {return clusterInst;} void SetClusterInst(ItemRef<ClusterInst,Conc> * nci, bool safe=true) { if (safe&&nci) nci->IncRef(); if (safe&&clusterInst) clusterInst->DecRef(); clusterInst = nci; } int id; // Used when moving this data structure to and from python. float prob; // Helper for resampling. ItemRef<Topic,Conc> * topic; // Temporary value, used to store the topic whilst disconnected from a cluster inst during the cluster resampling process. Sample * first; // For a temporary linked list when resampling the cluster instance. protected: ItemRef<ClusterInst,Conc> * clusterInst; }; class Sample { public: Sample():word(-1),docInst(0) {} ~Sample() { if (docInst) docInst->DecRef(); } int GetWord() {return word;} void SetWord(int w) {word = w;} ItemRef<DocInst,Conc> * GetDocInst() {return docInst;} void SetDocInst(ItemRef<DocInst,Conc> * ndi, bool safe=true) { if (safe&&ndi) { ndi->IncRef(); ItemRef<Topic,Conc> * topic = ndi->GetClusterInst()->GetTopic(); topic->wcTotal += 1; topic->wc[word] += 1; } if (safe&&docInst) { ItemRef<Topic,Conc> * topic = docInst->GetClusterInst()->GetTopic(); topic->wcTotal -= 1; topic->wc[word] -= 1; docInst->DecRef(); } docInst = ndi; } Sample * next; // Used for a temporary linked list whilst resampling higher up the hierachy. protected: int word; ItemRef<DocInst,Conc> * docInst; }; class Document : public ListRef<DocInst,Conc> { public: Document():cluster(0),sampleCount(0),sample(0),behFlags(0),behCounts(0),flagIndex(-1) {} ~Document() { if (cluster) cluster->DecRef(); delete[] behCounts; delete[] behFlags; delete[] sample; } ItemRef<Cluster,Conc> * GetCluster() {return cluster;} void SetCluster(ItemRef<Cluster,Conc> * nc, bool safe=true) { if (safe&&nc) nc->IncRef(); if (safe&&cluster) cluster->DecRef(); cluster = nc; } int SampleCount() {return sampleCount;} Sample & GetSample(int i) {return sample[i];} void SetSamples(int count,Sample * array) // Takes owenership of the given array, must be declared with new[] { sampleCount = count; delete[] sample; sample = array; } unsigned char * GetBehFlags() {return behFlags;} void SetBehFlags(unsigned char * newFlags) // newFlags must be declared with new[] { delete[] behFlags; behFlags = newFlags; } int * GetBehCounts() {return behCounts;} void SetBehCounts(int * newCounts) // newCounts must be declared with new[] { delete[] behCounts; behCounts = newCounts; } int GetFlagIndex() {return flagIndex;} void SetFlagIndex(int newFlagIndex) {flagIndex = newFlagIndex;} // Temporary variable, used for a linked list of documents belonging to a cluster... Document * next; protected: ItemRef<Cluster,Conc> * cluster; int sampleCount; Sample * sample; // Declared with new[] unsigned char * behFlags; int * behCounts; int flagIndex; }; // Final State object - represents an entire model... class State { public: State():seperateClusterConc(false), seperateDocumentConc(false), oneCluster(false), calcBeta(false), beta(0), betaSum(0.0), phi(0), docCount(0), doc(0), flagSets(0), tempWord(0) {} ~State() { for (int d=0;d<docCount;d++) { doc[d].SetSamples(0,0); while (doc[d].Size()!=0) doc[d].First()->Suicide(); doc[d].SetCluster(0); } delete[] doc; while (clusters.Size()!=0) { ItemRef<Cluster,Conc> * victim = clusters.First(); while (victim->Size()!=0) victim->First()->Suicide(); victim->Suicide(); } while (topics.Size()!=0) topics.First()->Suicide(); while (behCluInsts.Size()!=0) behCluInsts.First()->Suicide(); while (behTopics.Size()!=0) behTopics.First()->Suicide(); delete[] phi; delete[] beta; Py_XDECREF(flagSets); delete[] tempWord; } // Algorithm behavioural flags, indicate if concentration parameters for clusters and documents are shared or calculated on a per entity basis, and if we should fix it to a single cluster to acheive HDP-like behaviour... bool dnrDocInsts; bool dnrCluInsts; bool seperateClusterConc; bool seperateDocumentConc; bool oneCluster; bool calcBeta; bool calcCluBmn; bool calcPhi; bool resampleConcs; // Parameters - only need these ones as most can be stored where they are needed... // (new[] used for beta and phi) float * beta; float betaSum; Conc rho; // Needed for new clusters. float * phi; int wordCount; // Number of unique word types. int behCount; // Number of behaviours - 1 + # of abnormalities. int behSamples; // Samples used when integrating for the per-cluster bmn. // Basic DP that provides topics, contains multinomial distributions etc... ListRef<Topic,Conc> topics; // DDP that provides clusters - you draw DP's from this... ListRef<Cluster,Conc> clusters; // List containing the topics for each behaviour. Note that behaviour 0 is normal and comes from elsewhere, so entry 0 is basically a dummy. Done in this slightly crazy way to avoid having crazyness in the document instances, i.e. so the same pointer type is used for normal and abnormal... ListRef<Topic,Conc> behTopics; ListRef<ClusterInst,Conc> behCluInsts; // All the documents... int docCount; Document * doc; // Declared with new[] // A little bit of python - pass through of the flagSets provided by a python-side FlagIndexArray object - the documents then index this array so estimation of the per-cluster multinomials on behaviour may be estimated - note that it will be freeded on the destruction of this object... PyArrayObject * flagSets; // A temporary array of ints, with the same number of entrys as there are words - used to optimise some loops... int * tempWord; }; // Goes through the given State object and verifies that the ref counts match the number of references - for debugging. (Obviously no good if there is a prior.) printf's out any errors... void VerifyState(State & state) { // Verify topic counts... int * counts = new int[state.topics.Size()]; { ItemRef<Topic,Conc> * targ = state.topics.First(); int id = 0; while (targ->Valid()) { targ->id = id; counts[id] = 0; id += 1; targ = targ->Next(); } if (id!=state.topics.Size()) printf("Size of topics is incorrect\\n"); } { ItemRef<Cluster,Conc> * targ = state.clusters.First(); while (targ->Valid()) { ItemRef<ClusterInst,Conc> * targ2 = targ->First(); while (targ2->Valid()) { if (targ2->GetTopic()) counts[targ2->GetTopic()->id] += 1; targ2 = targ2->Next(); } targ = targ->Next(); } } { ItemRef<Topic,Conc> * targ = state.topics.First(); int total = 0; while (targ->Valid()) { total += targ->RefCount(); if (counts[targ->id]!=targ->RefCount()) { printf("Topic %i has the wrong refcount\\n",targ->id); } targ = targ->Next(); } if (total!=state.topics.RefTotal()) printf("Topics ref-total is incorrect\\n"); } delete[] counts; // Verify cluster counts... counts = new int[state.clusters.Size()]; { ItemRef<Cluster,Conc> * targ = state.clusters.First(); int id = 0; while (targ->Valid()) { targ->id = id; counts[id] = 0; id += 1; targ = targ->Next(); } if (id!=state.clusters.Size()) printf("Size of clusters is incorrect\\n"); } for (int d=0;d<state.docCount;d++) { if (state.doc[d].GetCluster()) { counts[state.doc[d].GetCluster()->id] += 1; } } { ItemRef<Cluster,Conc> * targ = state.clusters.First(); int total = 0; while (targ->Valid()) { total += targ->RefCount(); if (counts[targ->id]!=targ->RefCount()) { printf("Cluster %i has the wrong refcount\\n",targ->id); } targ = targ->Next(); } if (total!=state.clusters.RefTotal()) printf("Clusters ref-total is incorrect\\n"); } delete[] counts; // Verify cluster instance counts... int cluInstSum = 0; { ItemRef<Cluster,Conc> * targ = state.clusters.First(); while (targ->Valid()) { cluInstSum += targ->Size(); targ = targ->Next(); } } counts = new int[cluInstSum]; { ItemRef<Cluster,Conc> * targ = state.clusters.First(); int id = 0; while (targ->Valid()) { ItemRef<ClusterInst,Conc> * targ2 = targ->First(); int startId = id; while (targ2->Valid()) { targ2->id = id; counts[id] = 0; id += 1; targ2 = targ2->Next(); } if ((id-startId)!=targ->Size()) printf("Size of cluster instance %i is incorrect\\n",targ->id); targ = targ->Next(); } } for (int d=0;d<state.docCount;d++) { ItemRef<DocInst,Conc> * targ = state.doc[d].First(); while (targ->Valid()) { if (targ->GetClusterInst()) { counts[targ->GetClusterInst()->id] += 1; } targ = targ->Next(); } } { ItemRef<Cluster,Conc> * targ = state.clusters.First(); while (targ->Valid()) { int total = 0; ItemRef<ClusterInst,Conc> * targ2 = targ->First(); while (targ2->Valid()) { total += targ2->RefCount(); if (targ2->RefCount()!=counts[targ2->id]) { printf("Cluster instance %i of cluster %i has a bad refcount\\n",targ2->id,targ->id); } targ2 = targ2->Next(); } if (total!=targ->RefTotal()) printf("Cluster instance %i has a bad ref total\\n",targ->id); targ = targ->Next(); } } delete[] counts; // Verify document instance counts... for (int d=0;d<state.docCount;d++) { counts = new int[state.doc[d].Size()]; { ItemRef<DocInst,Conc> * targ = state.doc[d].First(); int id = 0; while (targ->Valid()) { targ->id = id; counts[id] = 0; id += 1; targ = targ->Next(); } if (id!=state.doc[d].Size()) printf("Doc %i has an invalid size\\n",d); } for (int s=0;s<state.doc[d].SampleCount();s++) { Sample & sam = state.doc[d].GetSample(s); if (sam.GetDocInst()) { counts[sam.GetDocInst()->id] += 1; } } { ItemRef<DocInst,Conc> * targ = state.doc[d].First(); int total = 0; while (targ->Valid()) { total += targ->RefCount(); if (targ->RefCount()!=counts[targ->id]) { printf("Document %i, instance %i has a bad ref count\\n",d,targ->id); } targ = targ->Next(); } if (total!=state.doc[d].RefTotal()) printf("Doc %i has an invalid ref total\\n",d); } delete[] counts; } } """ class TestDS(unittest.TestCase): """Test code for the data structure.""" def test_compile(self): code = start_cpp(dual_hdp_ds) + """ State state; """ weave.inline(code, support_code=dual_hdp_ds) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Loads solvers.... # Load the most basic solver, but also load a mp one if possible... try: from solve_weave import gibbs_all, gibbs_doc, leftRightNegLogProbWord __fitter = 'weave' except: raise Exception('Could not load basic weave solver') try: from solve_weave_mp import gibbs_all_mp, gibbs_doc_mp __fitter = 'multiprocess weave' except: pass # Function to get the best fitter avaliable... def getAlgorithm(): """Returns a text string indicating which implimentation of the fitting algorithm is being used by default, which will be the best avaliable.""" global __fitter return __fitter
Python
# Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # This loads in the entire library and provides the interface - only import needed by a user... # Load in the solvers (Done fist to avoid include loop issues.)... from solvers import * # Load in all the data structure types... from params import Params from solve_shared import State from model import Model, Sample, DocSample, DocModel from dp_conc import PriorConcDP from corpus import Corpus from document import Document
Python
# Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import math import numpy import scipy.special import collections import solvers from smp.smp import FlagIndexArray from utils.mp_map import * class DocSample: """Stores the sample information for a given document - the DP from which topics are drawn and which cluster it is a member of. Also calculates and stores the negative log liklihood of the document.""" def __init__(self, doc): """Given the specific DocState object this copies the relevant information. Note that it doesn't calculate the nll - another method will do that. It also supports cloning.""" if isinstance(doc, DocSample): # Code for clonning self.cluster = doc.cluster self.dp = doc.dp.copy() self.conc = doc.conc self.samples = doc.samples.copy() self.behFlags = doc.behFlags.copy() self.nll = doc.nll self.ident = doc.ident else: # Extract the model information... self.cluster = doc.cluster self.dp = doc.use.copy() self.conc = doc.conc self.samples = doc.samples.copy() self.behFlags = doc.behFlags.copy() self.nll = 0.0 self.ident = doc.ident def calcNLL(self, doc, state): """Calculates the negative log likelihood of the document, given the relevant information. This is the DocState object again, but this time with the entire state object as well. Probability (Expressed as negative log likelihood.) is specificly calculated using all terms that contain a variable in the document, but none that would be identical for all documents. That is, it contains the probability of the cluster, the probability of the dp given the cluster, and the probability of the samples, which factors in both the drawing of the topic and the drawing of the word. The ordering of the samples is considered irrelevant, with both the topic and word defining uniqueness. Some subtle approximation is made - see if you can spot it in the code!""" self.nll = 0.0 # Probability of drawing the cluster... self.nll -= math.log(state.clusterUse[doc.cluster]) self.nll += math.log(state.clusterUse.sum()+state.clusterConc) # Probability of drawing the documents dp from its cluster, taking into account the abnormal entrys... cl = state.cluster[doc.cluster] logBMN = numpy.log(cl[2] / (cl[2]*numpy.asfarray(doc.behFlags)).sum()) behInstCounts = numpy.zeros(doc.behFlags.shape[0], dtype=numpy.int32) instCounts = numpy.zeros(cl[0].shape[0], dtype=numpy.int32) for ii in xrange(doc.use.shape[0]): behInstCounts[doc.use[ii,0]] += 1 if doc.use[ii,0]==0: instCounts[doc.use[ii,1]] += 1 self.nll -= (logBMN * behInstCounts).sum() self.nll -= scipy.special.gammaln(behInstCounts.sum() + 1.0) self.nll += scipy.special.gammaln(behInstCounts + 1.0).sum() norm = cl[0][:,1].sum() + cl[1] self.nll -= (numpy.log(numpy.asfarray(cl[0][:,1])/norm)*instCounts).sum() self.nll -= scipy.special.gammaln(instCounts.sum() + 1.0) # Cancels with a term from the above - can optimise, but would rather have neat code. self.nll += scipy.special.gammaln(instCounts + 1.0).sum() # Count the numbers of word/topic instance pairs in the data structure - sum using a dictionary... samp_count = collections.defaultdict(int) # [instance,word] for s in xrange(doc.samples.shape[0]): samp_count[doc.samples[s,0],doc.samples[s,1]] += 1 # Calculate the probability distribution of drawing each topic instance and the probability of drawing each word/topic assignment... inst = numpy.asfarray(doc.use[:,2]) inst /= inst.sum() + doc.conc topicWord = numpy.asfarray(state.topicWord) + state.beta topicWord = (topicWord.T/topicWord.sum(axis=1)).T abnormTopicWord = numpy.asfarray(state.abnormTopicWord) + state.beta abnormTopicWord = (abnormTopicWord.T/abnormTopicWord.sum(axis=1)).T instLog = numpy.log(inst) wordLog = numpy.log(topicWord) abnormLog = numpy.log(abnormTopicWord) # Now sum into nll the probability of drawing the samples that have been drawn - gets a tad complex as includes the probability of drawing the topic from the documents dp and then the probability of drawing the word from the topic, except I've merged them such that it doesn't look like that is what is happening... self.nll -= scipy.special.gammaln(doc.samples.shape[0]+1.0) for pair, count in samp_count.iteritems(): inst, word = pair beh = doc.use[inst,0] if beh==0: topic = cl[0][doc.use[inst,1],0] self.nll -= count * (wordLog[topic,word] + instLog[inst]) else: self.nll -= count * (abnormLog[beh,word] + instLog[inst]) self.nll += scipy.special.gammaln(count+1.0) def getCluster(self): """Returns the sampled cluster assignment.""" return self.cluster def getInstCount(self): """Returns the number of cluster instances in the documents model.""" return self.dp.shape[0] def getInstBeh(self, i): """Returns the behaviour index for the given instance.""" return self.dp[i,0] def getInstTopic(self, i): """Returns the topic index for the given instance.""" return self.dp[i,1] def getInstWeight(self, i): """Returns the number of samples that have been assigned to the given topic instance.""" return self.dp[i,2] def getInstAll(self): """Returns a 2D numpy array of integers where the first dimension indexes the topic instances for the document and the the second dimension has three entries, the first (0) the behaviour index, the second (1) the topic index and the third (2) the number of samples assigned to the given topic instance. Do not edit the return value for this method - copy it first.""" return self.dp def getInstConc(self): """Returns the sampled concentration parameter.""" return self.conc def getBehFlags(self): """Returns the behavioural flags - a 1D array of {0,1} as type unsigned char where 1 indicates that it has the behaviour with that index, 0 that it does not. Entry 0 will map to normal behaviour, and will always be 1. Do not edit - copy first.""" return self.behFlags def getNLL(self): """Returns the negative log liklihood of the document given the model.""" return self.nll def getIdent(self): """Returns the ident of the document, as passed through from the input document so they may be matched up.""" return self.ident class Sample: """Stores a single sample drawn from the model - the topics, clusters and each document being sampled over. Stores counts and parameters required to make them into distributions, rather than final distributions. Has clonning capability.""" def __init__(self, state, calcNLL = True, priorsOnly = False): """Given a state this draws a sample from it, as a specific parametrisation of the model. Also a copy constructor, with a slight modification - if the priorsOnly flag is set it will only copy across the priors, and initialise to an empty model.""" if isinstance(state, Sample): # Code for clonning. self.alpha = state.alpha self.beta = state.beta.copy() self.gamma = state.gamma self.rho = state.rho self.mu = state.mu self.phi = state.phi.copy() if not priorsOnly: self.topicWord = state.topicWord.copy() self.topicUse = state.topicUse.copy() else: self.topicWord = numpy.zeros((0,state.topicWord.shape[1]), dtype=numpy.int32) self.topicUse = numpy.zeros(0,dtype=numpy.int32) self.topicConc = state.topicConc self.abnormTopicWord = state.abnormTopicWord.copy() self.abnorms = dict(state.abnorms) self.fia = FlagIndexArray(state.fia) if not priorsOnly: self.cluster = map(lambda t: (t[0].copy(),t[1],t[2].copy(),t[3].copy()), state.cluster) self.clusterUse = state.clusterUse.copy() else: self.cluster = [] self.clusterUse = numpy.zeros(0,dtype=numpy.int32) self.clusterConc = state.clusterConc if not priorsOnly: self.doc = map(lambda ds: DocSample(ds), state.doc) else: self.doc = [] else: # Normal initialisation code. self.alpha = state.alpha self.beta = state.beta.copy() self.gamma = state.gamma self.rho = state.rho self.mu = state.mu self.phi = state.phi.copy() # Topic stuff... self.topicWord = state.topicWord.copy() self.topicUse = state.topicUse.copy() self.topicConc = state.topicConc # Abnormality stuff... self.abnormTopicWord = state.abnormTopicWord.copy() self.abnorms = dict(state.abnorms) self.fia = FlagIndexArray(state.fia) # Cluster stuff... self.cluster = map(lambda t: (t[0].copy(),t[1],t[2].copy(),t[3].copy()), state.cluster) self.clusterUse = state.clusterUse.copy() self.clusterConc = state.clusterConc # The details for each document... self.doc = [] for d in xrange(len(state.doc)): self.doc.append(DocSample(state.doc[d])) # Second pass through documents to fill in the negative log liklihoods - need some data structures for this... if calcNLL: for d in xrange(len(state.doc)): self.doc[d].calcNLL(state.doc[d],state) def merge(self, other): """Given a sample this merges it into this sample. Works under the assumption that the new sample was learnt with this sample as its only prior, and ends up as though both the prior and the sample were drawn whilst simultaneously being modeled. Trashes the given sample - do not continue to use.""" # Update the old documents - there are potentially more behaviours in the new sample, which means adjusting the behaviour flags... if self.fia.getLength()!=other.fia.getLength(): for doc in self.doc: newBehFlags = numpy.zeros(other.fia.getLength(), dtype=numpy.uint8) newBehFlags[0] = doc.behFlags[0] for abnorm, index in self.abnorms: newIndex = other.abnorms[abnorm] newBehFlags[newIndex] = doc.behFlags[index] doc.behFlags = newBehFlags # Replace the basic parameters... self.alpha = other.alpha self.beta = other.beta self.gamma = other.gamma self.rho = other.rho self.mu = other.mu self.phi = other.phi self.topicWord = other.topicWord self.topicUse = other.topicUse self.topicConc = other.topicConc self.abnormTopicWord = other.abnormTopicWord self.abnorms = other.abnorms self.fia = other.fia self.cluster = other.cluster self.clusterUse = other.clusterUse self.clusterConc = other.clusterConc # Add in the (presumably) new documents... for doc in other.doc: self.doc.append(doc) def getAlphaPrior(self): """Returns the PriorConcDP that was used for the alpha parameter, which is the concentration parameter for the DP in each document.""" return self.alpha def getBeta(self): """Returns the beta prior, which is a vector representing a Dirichlet distribution from which the multinomials for each topic are drawn, from which words are drawn.""" return self.beta def getGammaPrior(self): """Returns the PriorConcDP that was used for the gamma parameter, which is the concentration parameter for the global DP from which topics are drawn.""" return self.gamma def getRhoPrior(self): """Returns the PriorConcDP that was used for the rho parameter, which is the concentration parameter for each specific clusters DP.""" return self.rho def getMuPrior(self): """Returns the PriorConcDP that was used for the mu parameter, which is the concentration parameter for the DP from which clusters are drawn.""" return self.mu def getPhi(self): """Returns the phi Dirichlet distribution prior on the behavioural multinomial for each cluster.""" return self.phi def getTopicCount(self): """Returns the number of topics in the sample.""" return self.topicWord.shape[0] def getWordCount(self): """Returns the number of words in the topic multinomial.""" return self.topicWord.shape[1] def getTopicUseWeight(self, t): """Returns how many times the given topic has been instanced in a cluster.""" return self.topicUse[t] def getTopicUseWeights(self): """Returns an array, indexed by topic id, that contains how many times each topic has been instanciated in a cluster. Do not edit the return value - copy it first.""" return self.topicUse def getTopicConc(self): """Returns the sampled concentration parameter for drawing topic instances from the global DP.""" return self.topicConc def getTopicWordCount(self, t): """Returns the number of samples assigned to each word for the given topic, as an integer numpy array. Do not edit the return value - make a copy first.""" return self.topicWord[t,:] def getTopicWordCounts(self, t): """Returns the number of samples assigned to each word for all topics, indexed [topic, word], as an integer numpy array. Do not edit the return value - make a copy first.""" return self.topicWord def getTopicMultinomial(self, t): """Returns the calculated multinomial for a given topic ident.""" ret = self.beta.copy() ret += self.topicWord[t,:] ret /= ret.sum() return ret def getTopicMultinomials(self): """Returns the multinomials for all topics, in a single array - indexed by [topic, word] to give P(word|topic).""" ret = numpy.vstack([self.beta]*self.topicWord.shape[0]) ret += self.topicWord ret = (ret.T / ret.sum(axis=1)).T return ret def getBehCount(self): """Returns the number of behaviours, which is the number of abnormalities plus 1, and the entry count for the indexing variable for abnormals in the relevant methods.""" return self.abnormTopicWord.shape[0] def getAbnormWordCount(self, b): """Returns the number of samples assigned to each word for the given abnormal topic. Note that entry 0 equates to normal behaviour and is a dummy that should be ignored.""" return self.abnormTopicWord[b,:] def getAbnormWordCounts(self): """Returns the number of samples assigned to each word in each abnormal behaviour. An integer 2D array indexed with [behaviour, word], noting that behaviour 0 is a dummy for normal behaviour. Do not edit the return value - make a copy first.""" return self.abnormTopicWord def getAbnormMultinomial(self, b): """Returns the calculated multinomial for a given abnormal behaviour.""" ret = self.beta.copy() ret += self.abnormTopicWord[b,:] ret /= ret.sum() return ret def getAbnormMultinomials(self): """Returns the multinomials for all abnormalities, in a single array - indexed by [behaviour, word] to give P(word|topic associated with behaviour). Entry 0 is a dummy to fill in for normal behaviour, and should be ignored.""" ret = numpy.vstack([self.beta]*self.abnormTopicWord.shape[0]) ret += self.abnormTopicWord ret = (ret.T / ret.sum(axis=1)).T return ret def getAbnormDict(self): """Returns a dictionary that takes each abnormalities user provided token to the behaviour index used for it. Allows the use of the getAbnorm* methods, amung other things.""" return self.abnorms def getClusterCount(self): """Returns how many clusters there are.""" return len(self.cluster) def getClusterDrawWeight(self, c): """Returns how many times the given cluster has been instanced by a document.""" return self.clusterUse[c] def getClusterDrawWeights(self): """Returns an array, indexed by cluster id, that contains how many times each cluster has been instanciated by a document. Do not edit the return value - copy it first.""" return self.clusterUse def getClusterDrawConc(self): """Returns the sampled concentration parameter for drawing cluster instances for documents.""" return self.clusterConc def getClusterInstCount(self, c): """Returns how many instances of topics exist in the given cluster.""" return self.cluster[c][0].shape[0] def getClusterInstWeight(self, c, ti): """Returns how many times the given cluster topic instance has been instanced by a documents DP.""" return self.cluster[c][0][ti,1] def getClusterInstTopic(self, c, ti): """Returns which topic the given cluster topic instance is an instance of.""" return self.cluster[c][0][ti,0] def getClusterInstDual(self, c): """Returns a 2D array, where the first dimension is indexed by the topic instance, and the second contains two columns - the first the topic index, the second the weight. Do not edit return value - copy before use.""" return self.cluster[c][0] def getClusterInstConc(self, c): """Returns the sampled concentration that goes with the DP from which the members of each documents DP are drawn.""" return self.cluster[c][1] def getClusterInstBehMN(self, c): """Returns the multinomial on drawing behaviours for the given cluster.""" return self.cluster[c][2] def getClusterInstPriorBehMN(self, c): """Returns the prior on the behaviour multinomial, as an array of integer counts aligned with the flag set.""" return self.cluster[c][3] def docCount(self): """Returns the number of documents stored within. Should be the same as the corpus from which the sample was drawn.""" return len(self.doc) def getDoc(self,d): """Given a document index this returns the appropriate DocSample object. These indices should align up with the document indices in the Corpus from which this Sample was drawn, assuming no documents have been deleted.""" return self.doc[d] def delDoc(self, ident): """Given a document ident this finds the document with the ident and removes it from the model, completly - i.e. all the variables in the sample are also updated. Primarilly used to remove documents for resampling prior to using the model as a prior. Note that this can potentially leave entities with no users - they get culled when the model is loaded into the C++ data structure so as to not cause problems.""" # Find and remove it from the document list... index = None for i in xrange(len(self.doc)): if self.doc[i].getIdent()==ident: index = i break if index==None: return victim = self.doc[index] self.doc = self.doc[:index] + self.doc[index+1:] # Update all the variables left behind by subtracting the relevant terms... cluster = self.cluster[victim.cluster] self.clusterUse[victim.cluster] -= 1 ## First pass through the dp and remove its influence; at the same time note the arrays that need to be updated by each user when looping through... dp_ext = [] for i in xrange(victim.dp.shape[0]): beh = victim.dp[i,0] #count = victim.dp[i,2] if beh==0: # Normal behaviour cluInst = victim.dp[i,1] # Update the instance, and topic use counts if necessary... topic = cluster[0][cluInst,0] cluster[0][cluInst,1] -= 1 if cluster[0][cluInst,1]==0: self.topicUse[topic] -= 1 # Store the entity that needs updating in correspondence with this dp instance in the next step... dp_ext.append((self.topicWord, topic)) else: # Abnormal behaviour. # Store the entity that needs updating in correspondence with the dp... dp_ext.append((self.abnormTopicWord, beh)) ## Go through the samples array and remove their influnce - the hard part was done by the preceding step... for si in xrange(victim.samples.shape[0]): inst = victim.samples[si,0] word = victim.samples[si,1] mat, topic = dp_ext[inst] mat[topic,word] -= 1 # Clean up all zeroed items... self.cleanZeros() def cleanZeros(self): """Goes through and removes anything that has a zero reference count, adjusting all indices accordingly.""" # Remove the zeros from this object, noting the changes... ## Topics... newTopicCount = 0 topicMap = dict() for t in xrange(self.topicUse.shape[0]): if self.topicUse[t]!=0: topicMap[t] = newTopicCount newTopicCount += 1 if newTopicCount!=self.topicUse.shape[0]: newTopicWord = numpy.zeros((newTopicCount, self.topicWord.shape[1]), dtype=numpy.int32) newTopicUse = numpy.zeros(newTopicCount,dtype=numpy.int32) for origin, dest in topicMap.iteritems(): newTopicWord[dest,:] = self.topicWord[origin,:] newTopicUse[dest] = self.topicUse[origin] self.topicWord = newTopicWord self.topicUse = newTopicUse ## Clusters... newClusterCount = 0 clusterMap = dict() for c in xrange(self.clusterUse.shape[0]): if self.clusterUse[c]!=0: clusterMap[c] = newClusterCount newClusterCount += 1 if newClusterCount!=self.clusterUse.shape[0]: newCluster = [None]*newClusterCount newClusterUse = numpy.zeros(newClusterCount, dtype=numpy.int32) for origin, dest in clusterMap.iteritems(): newCluster[dest] = self.cluster[origin] newClusterUse[dest] = self.clusterUse[origin] self.cluster = newCluster self.clusterUse = newClusterUse ## Cluster instances... # (Change is noted by a 2-tuple of (new length, dict) where new length is the new length and dict goes from old indices to new indices.) cluInstAdj = [] for ci in xrange(len(self.cluster)): newInstCount = 0 instMap = dict() for i in xrange(self.cluster[ci][0].shape[0]): if self.cluster[ci][0][i,1]!=0: instMap[i] = newInstCount newInstCount += 1 cluInstAdj.append((newInstCount, instMap)) if newInstCount!=self.cluster[ci][0].shape[0]: newInst = numpy.zeros((newInstCount,2), dtype=numpy.int32) for origin, dest in instMap.iteritems(): newInst[dest,:] = self.cluster[ci][0][origin,:] self.cluster[ci] = (newInst, self.cluster[ci][1], self.cluster[ci][2], self.cluster[ci][3]) # Iterate and update the topic indices of the cluster instances... for ci in xrange(len(self.cluster)): for i in xrange(self.cluster[ci][0].shape[0]): self.cluster[ci][0][i,0] = topicMap[self.cluster[ci][0][i,0]] # Now iterate the documents and update their cluster and cluster instance indices... for doc in self.doc: doc.cluster = clusterMap[doc.cluster] _, instMap = cluInstAdj[doc.cluster] for di in xrange(doc.dp.shape[0]): if doc.dp[di,0]==0: doc.dp[di,1] = instMap[doc.dp[di,1]] def nllAllDocs(self): """Returns the negative log likelihood of all the documents in the sample - a reasonable value to compare various samples with.""" return sum(map(lambda d: d.getNLL(),self.doc)) def logNegProbWordsGivenClusterAbnorm(self, doc, cluster, particles = 16, cap = -1): """Uses wallach's 'left to right' method to calculate the negative log probability of the words in the document given the rest of the model. Both the cluster (provided as an index) and the documents abnormalities vector are fixed for this calculation. Returns the average of the results for each sample contained within model. particles is the number of particles to use in the left to right estimation algorithm. This is implimented using scipy.weave.""" return solvers.leftRightNegLogProbWord(self, doc, cluster, particles, cap) def logNegProbWordsGivenAbnorm(self, doc, particles = 16, cap = -1): """Uses logNegProbWordsGivenClusterAbnorm and simply sums out the cluster variable.""" # Get the probability of each with the dependence with clusters... cluScores = map(lambda c: solvers.leftRightNegLogProbWord(self, doc, c, particles, cap), xrange(self.getClusterCount())) # Multiply each by the probability of the cluster, so it can be summed out... cluNorm = float(self.clusterUse.sum()) + self.clusterConc cluScores = map(lambda c,s: s - math.log(float(self.clusterUse[c])/cluNorm), xrange(len(cluScores)), cluScores) # Also need to include the probability of a new cluster, even though it is likelly to be a neglible contribution... newVal = solvers.leftRightNegLogProbWord(self, doc, -1, particles, cap) newVal -= math.log(self.clusterConc/cluNorm) cluScores.append(newVal) # Sum out the cluster variable, in a numerically stable way given that we are dealing with negative log likelihood values that will map to extremelly low probabilities... minScore = min(cluScores) cluPropProb = map(lambda s: math.exp(minScore-s), cluScores) return minScore - math.log(sum(cluPropProb)) class Model: """Simply contains a list of samples taken from the state during Gibbs iterations. Has clonning capability.""" def __init__(self, obj=None, priorsOnly = False): """If provided with a Model will clone it.""" self.sample = [] if isinstance(obj, Model): for sample in obj.sample: self.sample.append(Sample(sample, priorsOnly)) def add(self, sample): """Adds a sample to the model.""" self.sample.append(sample) def sampleState(self, state): """Samples the state, storing the sampled model within.""" self.sample.append(Sample(state)) def absorbModel(self, model): """Given another model this absorbs all its samples, leaving then given model baren.""" self.sample += model.sample model.sample = [] def sampleCount(self): """Returns the number of samples.""" return len(self.sample) def getSample(self, s): """Returns the sample associated with the given index.""" return self.sample[s] def sampleList(self): """Returns a list of samples, for iterating.""" return self.sample def delDoc(self, ident): """Calls the delDoc method for the given ident on all samples contained within.""" for sample in self.sample: sample.delDoc(ident) def bestSampleOnly(self): """Calculates the document nll for each sample and prunes all but the one with the highest - very simple way of 'merging' multiple samples together.""" score = map(lambda s: s.nllAllDocs(),self.sample) best = 0 for i in xrange(1,len(self.sample)): if score[i]>score[best]: best = i self.sample = [self.sample[best]] def fitDoc(self, doc, params = None, callback=None, mp = True): """Given a document this returns a DocModel calculated by Gibbs sampling the document with the samples in the model as priors. Returns a DocModel. Note that it samples using params for *each* sample in the Model, so you typically want to use less than the defaults in Params, typically only a single run and sample, which is the default. mp can be set to False to force it to avoid multi-processing behaviour""" if mp and len(self.sample)>1 and hasattr(solvers,'gibbs_doc_mp'): return solvers.gibbs_doc_mp(self, doc, params, callback) else: return solvers.gibbs_doc(self, doc, params, callback) def logNegProbWordsGivenAbnorm(self, doc, particles = 16, cap = -1, mp = True): """Calls the function of the same name for each sample and returns the average of the various return values.""" if mp and len(self.sample)>1: sampNLL = mp_map(lambda s,d,p,c: s.logNegProbWordsGivenAbnorm(d,p,c), self.sample, repeat(doc), repeat(particles), repeat(cap)) else: sampNLL = map(lambda s: s.logNegProbWordsGivenAbnorm(doc,particles,cap), self.sample) ret = 0.0 # Negative log prob retCount = 0.0 for nll in sampNLL: retCount += 1.0 if retCount < 1.5: ret = nll else: ret -= math.log(1.0 + (math.exp(ret-nll) - 1.0)/retCount) return ret def logNegProbAbnormGivenWords(self, doc, epsilon = 0.1, particles = 16, cap = -1): """Returns the probability of the documents current abnormality flags - uses Bayes rule on logNegProbAbnormGivenWords. Does not attempt to calculate the normalising constant, so everything is with proportionality - you can compare flags for a document, but can't compare between different documents. You actually provide epsilon to the function, as its not calculated anywhere. You can either provide a number, in which case that is the probability of each abnormality, or you can provide a numpy vector of probabilities, noting that the first entry must correspond to normal and be set to 1.0""" # Handle the conveniance input of providing a single floating point value for epsilon rather than a numpy array... if not isinstance(epsilon,numpy.ndarray): # Assume its a floating point number and build epsilon as an array... value = epsilon epsilon = numpy.ones(self.sample[0].phi.shape[0], dtype=numpy.float32) epsilon *= value epsilon[0] = 1.0 # Generate flags for the document... flags = numpy.zeros(self.sample[0].phi.shape[0], dtype=numpy.uint8) flags[0] = 1 for abnorm in doc.getAbnorms(): flags[self.sample[0].abnorms[abnorm]] = 1 # Apply Bayes - hardly hard!.. ret = self.logNegProbWordsGivenAbnorm(doc, particles, cap) for i in xrange(1,epsilon.shape[0]): if flags[i]!=0: ret -= math.log(epsilon[i]) else: ret -= math.log(1.0-epsilon[i]) return ret def mlDocAbnorm(self, doc, lone = False, epsilon = 0.1, particles = 16, cap = -1): """Decides which abnormalities most likelly exist in the document, using the logNegProbAbnormGivenWords method. Returns the list of abnormalities that are most likelly to exist. It does a greedy search of the state space - by default it considers all states, but setting the lone flag to true it will only consider states with one abnormality.""" # A dictionary that contains True to indicate that the indexed tuple of abnormalities has already been tried (And, effectivly, rejected.)... tried = dict() # Starting state... best = [] doc.setAbnorms(best) bestNLL = self.logNegProbAbnormGivenWords(doc, epsilon, particles, cap) tried[tuple(best)] = True # Iterate until no change... while True: newBest = best newBestNLL = bestNLL for abnorm in self.sample[0].abnorms.iterkeys(): test = best[:] if abnorm in test: test = filter(lambda a:a!=abnorm, test) else: test.append(abnorm) if lone and len(test)>1: continue doc.setAbnorms(test) if tuple(test) not in tried: tried[tuple(test)] = True doc.setAbnorms(test) testNLL = self.logNegProbAbnormGivenWords(doc, epsilon, particles, cap) if testNLL<newBestNLL: newBest = test newBestNLL = testNLL if best==newBest: doc.setAbnorms(best) return best else: best = newBest bestNLL = newBestNLL class DocModel: """A Model that just contains DocSample-s for a single document. Obviously incomplete without a full Model, this is typically used when sampling a document relative to an already trained Model, such that the topic/cluster indices will match up with the original Model. Note that if the document has enough data to justify the creation of an extra topic/cluster then that could exist with an index above the indices of the topics/clusters in the source Model.""" def __init__(self, obj=None): """Supports cloning.""" self.sample = [] if isinstance(obj, DocModel): for sample in obj.sample: self.sample.append(DocSample(sample)) def addFrom(self, model, index=0): """Given a model and a document index number extracts all the relevant DocSample-s, adding them to this DocModel. It does not edit the Model but the DocSample-s transfered over are the same instances.""" for s in xrange(model.sampleCount()): self.sample.append(model.getSample(s).getDoc(index)) def absorbModel(self, dModel): """Absorbs samples from the given DocModel, leaving it baren.""" self.sample += dModel.sample dModel.sample = [] def sampleCount(self): """Returns the number of samples contained within.""" return len(self.sample) def getSample(self, s): """Returns the sample with the given index, in the range 0..sampleCount()-1""" return self.sample[s] def sampleList(self): """Returns a list of samples, for iterating.""" return self.sample def hasAbnormal(self, name, abnormDict): """Given the key for an abnormality (Typically a string - as provided to the Document object orginally.) returns the probability this document has it, by looking through the samples contained within. Requires an abnorm dictionary, as obtained from the getAbnormDict method of a sample.""" if name not in abnormDict: return 0.0 index = abnormDict[name] count = 0 for s in self.sample: if s.getBehFlags()[index]!=0: count += 1 return float(count)/float(len(self.sample)) def getNLL(self): """Returns the average nll of all the contained samples - does a proper mean of the probability of the samples.""" minSam = min(map(lambda s:s.getNLL(),self.sample)) probMean = sum(map(lambda s:math.exp(minSam-s.getNLL()),self.sample)) probMean /= float(len(self.sample)) return minSam - math.log(probMean)
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Some basic matrix operations that come in use... matrix_code = start_cpp() + """ #ifndef MATRIX_CODE #define MATRIX_CODE template <typename T> inline void MemSwap(T * lhs, T * rhs, int count = 1) { while(count!=0) { T t = *lhs; *lhs = *rhs; *rhs = t; ++lhs; ++rhs; --count; } } // Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default. template <typename T> inline T Determinant(T * pos, int size, int stride = -1) { if (stride==-1) stride = size; if (size==1) return pos[0]; else { if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride]; else { T ret = 0.0; for (int i=0; i<size; i++) { if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1); T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1]; if ((i+size)%2) ret += sub; else ret -= sub; } for (int i=1; i<size; i++) { MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1); } return ret; } } } // Inverts a square matrix, will fail on singular and very occasionally on // non-singular matrices, returns true on success. Uses Gauss-Jordan elimination // with partial pivoting. // in is the input matrix, out the output matrix, just be aware that the input matrix is trashed. // You have to provide its size (Its square, obviously.), and optionally a stride if different from size. template <typename T> inline bool Inverse(T * in, T * out, int size, int stride = -1) { if (stride==-1) stride = size; for (int r=0; r<size; r++) { for (int c=0; c<size; c++) { out[r*stride + c] = (c==r)?1.0:0.0; } } for (int r=0; r<size; r++) { // Find largest pivot and swap in, fail if best we can get is 0... T max = in[r*stride + r]; int index = r; for (int i=r+1; i<size; i++) { if (fabs(in[i*stride + r])>fabs(max)) { max = in[i*stride + r]; index = i; } } if (index!=r) { MemSwap(&in[index*stride], &in[r*stride], size); MemSwap(&out[index*stride], &out[r*stride], size); } if (fabs(max-0.0)<1e-6) return false; // Divide through the entire row... max = 1.0/max; in[r*stride + r] = 1.0; for (int i=r+1; i<size; i++) in[r*stride + i] *= max; for (int i=0; i<size; i++) out[r*stride + i] *= max; // Row subtract to generate 0's in the current column, so it matches an identity matrix... for (int i=0; i<size; i++) { if (i==r) continue; T factor = in[i*stride + r]; in[i*stride + r] = 0.0; for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j]; for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j]; } } return true; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import time class ProgBar: """Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops.""" def __init__(self, width = 60, onCallback = None): self.start = time.time() self.fill = 0 self.width = width self.onCallback = onCallback sys.stdout.write(('_'*self.width)+'\n') sys.stdout.flush() def __del__(self): self.end = time.time() self.__show(self.width) sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n') sys.stdout.flush() def callback(self, nDone, nToDo): """Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo).""" if self.onCallback: self.onCallback() n = int(float(self.width)*float(nDone)/float(nToDo)) n = min((n,self.width)) if n>self.fill: self.__show(n) def __show(self,n): sys.stdout.write('|'*(n-self.fill)) sys.stdout.flush() self.fill = n
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pydoc import inspect class DocGen: """A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code.""" def __init__(self, name, title = None, summary = None): """name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title.""" if title==None: title = name if summary==None: summary = title self.doc = pydoc.HTMLDoc() self.html = open('%s.html'%name,'w') self.html.write('<html>\n') self.html.write('<head>\n') self.html.write('<title>%s</title>\n'%title) self.html.write('</head>\n') self.html.write('<body>\n') self.html_variables = '' self.html_functions = '' self.html_classes = '' self.wiki = open('%s.wiki'%name,'w') self.wiki.write('#summary %s\n\n'%summary) self.wiki.write('= %s= \n\n'%title) self.wiki_variables = '' self.wiki_functions = '' self.wiki_classes = '' def __del__(self): if self.html_variables!='': self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables)) if self.html_functions!='': self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions)) if self.html_classes!='': self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes)) self.html.write('</body>\n') self.html.write('</html>\n') self.html.close() if self.wiki_variables!='': self.wiki.write('= Variables =\n\n') self.wiki.write(self.wiki_variables) self.wiki.write('\n') if self.wiki_functions!='': self.wiki.write('= Functions =\n\n') self.wiki.write(self.wiki_functions) self.wiki.write('\n') if self.wiki_classes!='': self.wiki.write('= Classes =\n\n') self.wiki.write(self.wiki_classes) self.wiki.write('\n') self.wiki.close() def addFile(self, fn, title, fls = True): """Given a filename and section title adds the contents of said file to the output. Various flags influence how this works.""" html = [] wiki = [] for i, line in enumerate(open(fn,'r').readlines()): hl = line.replace('\n', '') if i==0 and fls: hl = '<strong>' + hl + '</strong>' for ext in ['py','txt']: if '.%s - '%ext in hl: s = hl.split('.%s - '%ext, 1) hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1] html.append(hl) wl = line.strip() if i==0 and fls: wl = '*%s*'%wl for ext in ['py','txt']: if '.%s - '%ext in wl: s = wl.split('.%s - '%ext, 1) wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n' wiki.append(wl) self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html))) self.wiki.write('== %s ==\n'%title) self.wiki.write('\n'.join(wiki)) self.wiki.write('----\n\n') def addVariable(self, var, desc): """Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc..""" self.html_variables += '<strong>%s</strong><br/>'%var self.html_variables += '%s<br/><br/>\n'%desc self.wiki_variables += '*`%s`*\n'%var self.wiki_variables += ' %s\n\n'%desc def addFunction(self, func): """Adds a function to the documentation. You provide the actual function instance.""" self.html_functions += self.doc.docroutine(func).replace('&nbsp;',' ') self.html_functions += '\n' name = func.__name__ args, varargs, keywords, defaults = inspect.getargspec(func) doc = inspect.getdoc(func) if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_functions += ' %s\n\n'%doc def addClass(self, cls): """Adds a class to the documentation. You provide the actual class object.""" self.html_classes += self.doc.docclass(cls).replace('&nbsp;',' ') self.html_classes += '\n' name = cls.__name__ parents = filter(lambda a: a!=cls, inspect.getmro(cls)) doc = inspect.getdoc(cls) par_str = '' if len(parents)!=0: par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents)) self.wiki_classes += '== %s(%s) ==\n'%(name, par_str) self.wiki_classes += ' %s\n\n'%doc methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x)) def method_key(pair): if pair[0]=='__init__': return '___' else: return pair[0] methods.sort(key=method_key) for name, method in methods: if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'): if inspect.ismethod(method): args, varargs, keywords, defaults = inspect.getargspec(method) else: args = ['?'] varargs = None keywords = None defaults = None if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords def fetch_doc(cls, name): try: method = getattr(cls, name) if method.__doc__!=None: return inspect.getdoc(method) except: pass for parent in filter(lambda a: a!=cls, inspect.getmro(cls)): ret = fetch_doc(parent, name) if ret!=None: return ret return None doc = fetch_doc(cls, name) self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_classes += ' %s\n\n'%doc variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float)) for name, var in variables: if not name.startswith('__'): if hasattr(var, '__doc__'): d = var.__doc__ else: d = str(var) self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import random import math from scipy.special import gammaln, psi, polygamma from scipy import weave from utils.start_cpp import start_cpp # Provides various gamma-related functions... gamma_code = start_cpp() + """ #ifndef GAMMA_CODE #define GAMMA_CODE #include <cmath> // Returns the natural logarithm of the Gamma function... // (Uses Lanczos's approximation.) double lnGamma(double z) { static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7}; if (z<0.5) { // Use reflection formula, as approximation doesn't work down here... return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z); } else { double x = coeff[0]; for (int i=1;i<9;i++) x += coeff[i]/(z+i-1); double t = z + 6.5; return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x); } } // Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values... double digamma(double z) { static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point. double ret = 0.0; while (z<highVal) { ret -= 1.0/z; z += 1.0; } double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz4 = iz2*iz2; double iz6 = iz4*iz2; ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0; return ret; } // Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically... double trigamma(double z) { static const double highVal = 8.0; double ret = 0.0; while (z<highVal) { ret += 1.0/(z*z); z += 1.0; } z -= 1.0; double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz3 = iz1*iz2; double iz5 = iz3*iz2; double iz7 = iz5*iz2; double iz9 = iz7*iz2; ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0; return ret; } #endif """ def lnGamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function""" code = start_cpp(gamma_code) + """ return_val = lnGamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def digamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function""" code = start_cpp(gamma_code) + """ return_val = digamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def trigamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function""" code = start_cpp(gamma_code) + """ return_val = trigamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) class TestFuncs(unittest.TestCase): """Test code for the assorted gamma-related functions.""" def test_compile(self): code = start_cpp(gamma_code) + """ """ weave.inline(code, support_code=gamma_code) def test_error_lngamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = lnGamma(z) good = gammaln(z) assert(math.fabs(own-good)<1e-12) def test_error_digamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = digamma(z) good = psi(z) assert(math.fabs(own-good)<1e-9) def test_error_trigamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = trigamma(z) good = polygamma(1,z) assert(math.fabs(own-good)<1e-9) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import inspect import hashlib def start_cpp(hash_str = None): """This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>""" frame = inspect.currentframe().f_back info = inspect.getframeinfo(frame) if hash_str==None: return '#line %i "%s"\n'%(info[1],info[0]) else: h = hashlib.md5() h.update(hash_str) hash_val = h.hexdigest() return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
Python
# -*- coding: utf-8 -*- # Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.). import cv import numpy as np def cv2array(im): """Converts a cv array to a numpy array.""" depth2dtype = { cv.IPL_DEPTH_8U: 'uint8', cv.IPL_DEPTH_8S: 'int8', cv.IPL_DEPTH_16U: 'uint16', cv.IPL_DEPTH_16S: 'int16', cv.IPL_DEPTH_32S: 'int32', cv.IPL_DEPTH_32F: 'float32', cv.IPL_DEPTH_64F: 'float64', } arrdtype=im.depth a = np.fromstring( im.tostring(), dtype=depth2dtype[im.depth], count=im.width*im.height*im.nChannels) a.shape = (im.height,im.width,im.nChannels) return a def array2cv(a): """Converts a numpy array to a cv array, if possible.""" dtype2depth = { 'uint8': cv.IPL_DEPTH_8U, 'int8': cv.IPL_DEPTH_8S, 'uint16': cv.IPL_DEPTH_16U, 'int16': cv.IPL_DEPTH_16S, 'int32': cv.IPL_DEPTH_32S, 'float32': cv.IPL_DEPTH_32F, 'float64': cv.IPL_DEPTH_64F, } try: nChannels = a.shape[2] except: nChannels = 1 cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]), dtype2depth[str(a.dtype)], nChannels) cv.SetData(cv_im, a.tostring(), a.dtype.itemsize*nChannels*a.shape[1]) return cv_im
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import multiprocessing as mp import multiprocessing.synchronize # To make sure we have all the functionality. import types import marshal import unittest def repeat(x): """A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant.""" while True: yield x def run_code(code,args): """Internal use function that does the work in each process.""" code = marshal.loads(code) func = types.FunctionType(code, globals(), '_') return func(*args) def mp_map(func, *iters, **keywords): """A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity.""" if 'pool' in keywords: pool = keywords['pool'] else: pool = mp.Pool() code = marshal.dumps(func.func_code) jobs = [] for args in zip(*iters): jobs.append(pool.apply_async(run_code,(code,args))) for i in xrange(len(jobs)): jobs[i] = jobs[i].get() return jobs class TestMpMap(unittest.TestCase): def test_simple1(self): data = ['a','b','c','d'] def noop(data): return data data_noop = mp_map(noop, data) self.assertEqual(data, data_noop) def test_simple2(self): data = [x for x in xrange(1000)] data_double = mp_map(lambda a: a*2, data) self.assertEqual(map(lambda a: a*2,data), data_double) def test_gen(self): def gen(): for i in xrange(100): yield i data_double = mp_map(lambda a: a*2, gen()) self.assertEqual(map(lambda a: a*2,gen()), data_double) def test_repeat(self): def mult(a,b): return a*b data = [x for x in xrange(50,5000,5)] data_triple = mp_map(mult, data, repeat(3)) self.assertEqual(map(lambda a: a*3,data),data_triple) def test_none(self): data = [] data_sqr = mp_map(lambda x: x*x, data) self.assertEqual([],data_sqr) if __name__ == '__main__': unittest.main()
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import os.path import tempfile import shutil from distutils.core import setup, Extension import distutils.ccompiler import distutils.dep_util try: __default_compiler = distutils.ccompiler.new_compiler() except: __default_compiler = None def make_mod(name, base, source, openCL = False): """Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place.""" if __default_compiler==None: raise Exception('No compiler!') # Work out the various file names - check if we actually need to do anything... if not isinstance(source, list): source = [source] source_path = map(lambda s: os.path.join(base, s), source) library_path = os.path.join(base, __default_compiler.shared_object_filename(name)) if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)): try: print 'b' # Backup the argv variable and create a temporary directory to do all work in... old_argv = sys.argv[:] temp_dir = tempfile.mkdtemp() # Prepare the extension... sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir] comp_path = filter(lambda s: not s.endswith('.h'), source_path) depends = filter(lambda s: s.endswith('.h'), source_path) if openCL: ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends) else: ext = Extension(name, comp_path, depends=depends) # Compile... setup(name=name, version='1.0.0', ext_modules=[ext]) finally: # Cleanup the argv variable and the temporary directory... sys.argv = old_argv shutil.rmtree(temp_dir, True)
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp from numpy_help_cpp import numpy_util_code # Provides various functions to assist with manipulating python objects from c++ code. python_obj_code = numpy_util_code + start_cpp() + """ #ifndef PYTHON_OBJ_CODE #define PYTHON_OBJ_CODE // Extracts a boolean from an object... bool GetObjectBoolean(PyObject * obj, const char * name) { PyObject * b = PyObject_GetAttrString(obj, name); bool ret = b!=Py_False; Py_DECREF(b); return ret; } // Extracts an int from an object... int GetObjectInt(PyObject * obj, const char * name) { PyObject * i = PyObject_GetAttrString(obj, name); int ret = PyInt_AsLong(i); Py_DECREF(i); return ret; } // Extracts a float from an object... float GetObjectFloat(PyObject * obj, const char * name) { PyObject * f = PyObject_GetAttrString(obj, name); float ret = PyFloat_AsDouble(f); Py_DECREF(f); return ret; } // Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored... unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); unsigned char * ret = new unsigned char[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i); Py_DECREF(nao); return ret; } // Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored... float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); float * ret = new float[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i); Py_DECREF(nao); return ret; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Defines helper functions for accessing numpy arrays... numpy_util_code = start_cpp() + """ #ifndef NUMPY_UTIL_CODE #define NUMPY_UTIL_CODE float & Float1D(PyArrayObject * arr, int index = 0) { return *(float*)(arr->data + index*arr->strides[0]); } float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } unsigned char & Byte1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index*arr->strides[0]); } unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } int & Int1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index*arr->strides[0]); } int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } #endif """
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cvarray import mp_map import prog_bar import numpy_help_cpp import python_obj_cpp import matrix_cpp import gamma_cpp import setProcName import start_cpp import make import doc_gen # Setup... doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.') doc.addFile('readme.txt', 'Overview') # Variables... doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.') doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.') doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++') doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++') # Functions... doc.addFunction(make.make_mod) doc.addFunction(cvarray.cv2array) doc.addFunction(cvarray.array2cv) doc.addFunction(mp_map.repeat) doc.addFunction(mp_map.mp_map) doc.addFunction(setProcName.setProcName) doc.addFunction(start_cpp.start_cpp) doc.addFunction(make.make_mod) # Classes... doc.addClass(prog_bar.ProgBar) doc.addClass(doc_gen.DocGen)
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from ctypes import * def setProcName(name): """Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases.""" # Call the process control function... libc = cdll.LoadLibrary('libc.so.6') libc.prctl(15, c_char_p(name), 0, 0, 0) # Update argv... charPP = POINTER(POINTER(c_char)) argv = charPP.in_dll(libc,'_dl_argv') size = libc.strlen(argv[0]) libc.strncpy(argv[0],c_char_p(name),size) if __name__=='__main__': # Quick test that it works... import os ps1 = 'ps' ps2 = 'ps -f' os.system(ps1) os.system(ps2) setProcName('wibble_wobble') os.system(ps1) os.system(ps2)
Python
#! /usr/bin/env python # Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import ddhdp from utils import doc_gen # Setup... doc = doc_gen.DocGen('ddhdp', 'Delta-Dual Hierarchical Dirichlet Processes', 'Semi-supervised topic model, with clustering') doc.addFile('readme.txt', 'Overview') # Functions... doc.addFunction(ddhdp.getAlgorithm) # Classes... doc.addClass(ddhdp.PriorConcDP) doc.addClass(ddhdp.Params) doc.addClass(ddhdp.Document) doc.addClass(ddhdp.Corpus) doc.addClass(ddhdp.DocSample) doc.addClass(ddhdp.Sample) doc.addClass(ddhdp.Model) doc.addClass(ddhdp.DocModel)
Python
# Copyright 2011 Tom SF Haines # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy class Document: """Representation of a document used by the system. Consists of a list of words - each is referenced by a natural number and is associated with a count of how many of that particular word exist in the document.""" def __init__(self, dic, abnorms = []): """Constructs a document given a dictionary (Or equivalent) dic[ident] = count, where ident is the natural number that indicates which word and count is how many times that word exists in the document. Excluded entries are effectivly assumed to have a count of zero. Note that the solver will construct an array 0..{max word ident} and assume all words in that range exist, going so far as smoothing in words that are never actually seen. abnorms can be optionally provided as a list of comparable python entities, typcially strings. It constitutes a list of the abnormalities that exist in the current document. Has copy constructor capability if the first parameter is a Document.""" if isinstance(dic, Document): self.words = dic.words.copy() self.sampleCount = dic.sampleCount self.ident = dic.ident self.abnorms = dic.abnorms[:] else: # Create data store... self.words = numpy.empty((len(dic),2), dtype=numpy.uint) # Copy in the data... index = 0 self.sampleCount = 0 # Total number of words is sometimes useful - stored to save computation. for key, value in dic.iteritems(): self.words[index,0] = key self.words[index,1] = value self.sampleCount += value index += 1 assert(index==self.words.shape[0]) # Sorts the data - experiance shows this is not actually needed as iteritems kicks out integers sorted, but as that is not part of the spec (As I know it.) this can not be assumed, and so this step is required, incase it ever changes (Or indeed another type that pretends to be a dictionary is passed in.)... self.words = self.words[self.words[:,0].argsort(),:] # Ident for the document, stored in here for conveniance. Only assigned when the document is stuffed into a Corpus... self.ident = None # Store the abnormalities that exist in the current document - just a list of comparable python objects, typcially strings... self.abnorms = abnorms[:] def setAbnorms(self, newAbnorms): """Allows you to set the abnormalities within the document post-construction.""" self.abnorms = newAbnorms[:] def getDic(self): """Returns a dictionary object that represents the document, basically a recreated version of the dictionary handed in to the constructor.""" ret = dict() for i in xrange(self.words.shape[0]): ret[self.words[i,0]] = self.words[i,1] return ret def getIdent(self): """Ident - just the offset into the array in the corpus where this document is stored, or None if its yet to be stored anywhere.""" return self.ident def getAbnorms(self): """Returns the list of abnormalities that is in the document, which will often be the empty list.""" return self.abnorms def getSampleCount(self): """Returns the number of samples in the document, which is equivalent to the number of words, counting duplicates.""" return self.sampleCount def getWordCount(self): """Returns the number of unique words in the document, i.e. not counting duplicates.""" return self.words.shape[0] def getWord(self, index): """Given an index 0..getWordCount()-1 this returns the tuple (ident,count) for that word.""" return (self.words[index,0],self.words[index,1])
Python
# Copyright 2013 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # Compile the code if need be... try: from utils.make import make_mod import os.path make_mod('ms_c', os.path.dirname(__file__), ['bessel.h', 'bessel.c', 'eigen.h', 'eigen.c', 'kernels.h', 'kernels.c', 'data_matrix.h', 'data_matrix.c', 'spatial.h', 'spatial.c', 'balls.h', 'balls.c', 'mean_shift.h', 'mean_shift.c', 'ms_c.h', 'ms_c.c']) except: pass # Import the compiled module into this space, so we can pretend they are one and the same, just with automatic compilation... from ms_c import MeanShift
Python
#! /usr/bin/env python # Copyright 2013 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. from distutils.core import setup, Extension depends = ['bessel.h', 'eigen.h', 'kernels.h', 'data_matrix.h', 'spatial.h', 'balls.h', 'mean_shift.h', 'ms_c.h'] code = ['bessel.c', 'eigen.c', 'kernels.c', 'data_matrix.c', 'spatial.c', 'balls.c', 'mean_shift.c', 'ms_c.c'] ext = Extension('ms_c', code, depends=depends) setup(name='ms', version='1.0.0', description='Mean Shift', author='Tom SF Haines', author_email='thaines@gmail.com', url='http://code.google.com/p/haines/', py_modules=['ms'], ext_modules=[ext], )
Python
#! /usr/bin/env python # Copyright 2013 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # Simple script that prints out all kernels/spatials/balls that the code supports... import ms print 'Kernels:' for kernel in ms.MeanShift.kernels(): print ' >%s' % kernel d = ms.MeanShift.info(kernel) for i in xrange(0, len(d), 60): print ' %s' % d[i:i+60].strip() print print print 'Spatial:' for spatial in ms.MeanShift.spatials(): print ' >%s' % spatial d = ms.MeanShift.info(spatial) for i in xrange(0, len(d), 60): print ' %s' % d[i:i+60].strip() print print print 'Balls:' for balls in ms.MeanShift.balls(): print ' >%s' % balls d = ms.MeanShift.info(balls) for i in xrange(0, len(d), 60): print ' %s' % d[i:i+60].strip() print
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Some basic matrix operations that come in use... matrix_code = start_cpp() + """ #ifndef MATRIX_CODE #define MATRIX_CODE template <typename T> inline void MemSwap(T * lhs, T * rhs, int count = 1) { while(count!=0) { T t = *lhs; *lhs = *rhs; *rhs = t; ++lhs; ++rhs; --count; } } // Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default. template <typename T> inline T Determinant(T * pos, int size, int stride = -1) { if (stride==-1) stride = size; if (size==1) return pos[0]; else { if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride]; else { T ret = 0.0; for (int i=0; i<size; i++) { if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1); T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1]; if ((i+size)%2) ret += sub; else ret -= sub; } for (int i=1; i<size; i++) { MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1); } return ret; } } } // Inverts a square matrix, will fail on singular and very occasionally on // non-singular matrices, returns true on success. Uses Gauss-Jordan elimination // with partial pivoting. // in is the input matrix, out the output matrix, just be aware that the input matrix is trashed. // You have to provide its size (Its square, obviously.), and optionally a stride if different from size. template <typename T> inline bool Inverse(T * in, T * out, int size, int stride = -1) { if (stride==-1) stride = size; for (int r=0; r<size; r++) { for (int c=0; c<size; c++) { out[r*stride + c] = (c==r)?1.0:0.0; } } for (int r=0; r<size; r++) { // Find largest pivot and swap in, fail if best we can get is 0... T max = in[r*stride + r]; int index = r; for (int i=r+1; i<size; i++) { if (fabs(in[i*stride + r])>fabs(max)) { max = in[i*stride + r]; index = i; } } if (index!=r) { MemSwap(&in[index*stride], &in[r*stride], size); MemSwap(&out[index*stride], &out[r*stride], size); } if (fabs(max-0.0)<1e-6) return false; // Divide through the entire row... max = 1.0/max; in[r*stride + r] = 1.0; for (int i=r+1; i<size; i++) in[r*stride + i] *= max; for (int i=0; i<size; i++) out[r*stride + i] *= max; // Row subtract to generate 0's in the current column, so it matches an identity matrix... for (int i=0; i<size; i++) { if (i==r) continue; T factor = in[i*stride + r]; in[i*stride + r] = 0.0; for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j]; for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j]; } } return true; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import time class ProgBar: """Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops.""" def __init__(self, width = 60, onCallback = None): self.start = time.time() self.fill = 0 self.width = width self.onCallback = onCallback sys.stdout.write(('_'*self.width)+'\n') sys.stdout.flush() def __del__(self): self.end = time.time() self.__show(self.width) sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n') sys.stdout.flush() def callback(self, nDone, nToDo): """Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo).""" if self.onCallback: self.onCallback() n = int(float(self.width)*float(nDone)/float(nToDo)) n = min((n,self.width)) if n>self.fill: self.__show(n) def __show(self,n): sys.stdout.write('|'*(n-self.fill)) sys.stdout.flush() self.fill = n
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pydoc import inspect class DocGen: """A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code.""" def __init__(self, name, title = None, summary = None): """name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title.""" if title==None: title = name if summary==None: summary = title self.doc = pydoc.HTMLDoc() self.html = open('%s.html'%name,'w') self.html.write('<html>\n') self.html.write('<head>\n') self.html.write('<title>%s</title>\n'%title) self.html.write('</head>\n') self.html.write('<body>\n') self.html_variables = '' self.html_functions = '' self.html_classes = '' self.wiki = open('%s.wiki'%name,'w') self.wiki.write('#summary %s\n\n'%summary) self.wiki.write('= %s= \n\n'%title) self.wiki_variables = '' self.wiki_functions = '' self.wiki_classes = '' def __del__(self): if self.html_variables!='': self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables)) if self.html_functions!='': self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions)) if self.html_classes!='': self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes)) self.html.write('</body>\n') self.html.write('</html>\n') self.html.close() if self.wiki_variables!='': self.wiki.write('= Variables =\n\n') self.wiki.write(self.wiki_variables) self.wiki.write('\n') if self.wiki_functions!='': self.wiki.write('= Functions =\n\n') self.wiki.write(self.wiki_functions) self.wiki.write('\n') if self.wiki_classes!='': self.wiki.write('= Classes =\n\n') self.wiki.write(self.wiki_classes) self.wiki.write('\n') self.wiki.close() def addFile(self, fn, title, fls = True): """Given a filename and section title adds the contents of said file to the output. Various flags influence how this works.""" html = [] wiki = [] for i, line in enumerate(open(fn,'r').readlines()): hl = line.replace('\n', '') if i==0 and fls: hl = '<strong>' + hl + '</strong>' for ext in ['py','txt']: if '.%s - '%ext in hl: s = hl.split('.%s - '%ext, 1) hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1] html.append(hl) wl = line.strip() if i==0 and fls: wl = '*%s*'%wl for ext in ['py','txt']: if '.%s - '%ext in wl: s = wl.split('.%s - '%ext, 1) wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n' wiki.append(wl) self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html))) self.wiki.write('== %s ==\n'%title) self.wiki.write('\n'.join(wiki)) self.wiki.write('----\n\n') def addVariable(self, var, desc): """Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc..""" self.html_variables += '<strong>%s</strong><br/>'%var self.html_variables += '%s<br/><br/>\n'%desc self.wiki_variables += '*`%s`*\n'%var self.wiki_variables += ' %s\n\n'%desc def addFunction(self, func): """Adds a function to the documentation. You provide the actual function instance.""" self.html_functions += self.doc.docroutine(func).replace('&nbsp;',' ') self.html_functions += '\n' name = func.__name__ args, varargs, keywords, defaults = inspect.getargspec(func) doc = inspect.getdoc(func) if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_functions += ' %s\n\n'%doc def addClass(self, cls): """Adds a class to the documentation. You provide the actual class object.""" self.html_classes += self.doc.docclass(cls).replace('&nbsp;',' ') self.html_classes += '\n' name = cls.__name__ parents = filter(lambda a: a!=cls, inspect.getmro(cls)) doc = inspect.getdoc(cls) par_str = '' if len(parents)!=0: par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents)) self.wiki_classes += '== %s(%s) ==\n'%(name, par_str) self.wiki_classes += ' %s\n\n'%doc methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x)) def method_key(pair): if pair[0]=='__init__': return '___' else: return pair[0] methods.sort(key=method_key) for name, method in methods: if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'): if inspect.ismethod(method): args, varargs, keywords, defaults = inspect.getargspec(method) else: args = ['?'] varargs = None keywords = None defaults = None if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords def fetch_doc(cls, name): try: method = getattr(cls, name) if method.__doc__!=None: return inspect.getdoc(method) except: pass for parent in filter(lambda a: a!=cls, inspect.getmro(cls)): ret = fetch_doc(parent, name) if ret!=None: return ret return None doc = fetch_doc(cls, name) self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_classes += ' %s\n\n'%doc variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float)) for name, var in variables: if not name.startswith('__'): if hasattr(var, '__doc__'): d = var.__doc__ else: d = str(var) self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import random import math from scipy.special import gammaln, psi, polygamma from scipy import weave from utils.start_cpp import start_cpp # Provides various gamma-related functions... gamma_code = start_cpp() + """ #ifndef GAMMA_CODE #define GAMMA_CODE #include <cmath> // Returns the natural logarithm of the Gamma function... // (Uses Lanczos's approximation.) double lnGamma(double z) { static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7}; if (z<0.5) { // Use reflection formula, as approximation doesn't work down here... return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z); } else { double x = coeff[0]; for (int i=1;i<9;i++) x += coeff[i]/(z+i-1); double t = z + 6.5; return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x); } } // Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values... double digamma(double z) { static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point. double ret = 0.0; while (z<highVal) { ret -= 1.0/z; z += 1.0; } double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz4 = iz2*iz2; double iz6 = iz4*iz2; ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0; return ret; } // Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically... double trigamma(double z) { static const double highVal = 8.0; double ret = 0.0; while (z<highVal) { ret += 1.0/(z*z); z += 1.0; } z -= 1.0; double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz3 = iz1*iz2; double iz5 = iz3*iz2; double iz7 = iz5*iz2; double iz9 = iz7*iz2; ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0; return ret; } #endif """ def lnGamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function""" code = start_cpp(gamma_code) + """ return_val = lnGamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def digamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function""" code = start_cpp(gamma_code) + """ return_val = digamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def trigamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function""" code = start_cpp(gamma_code) + """ return_val = trigamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) class TestFuncs(unittest.TestCase): """Test code for the assorted gamma-related functions.""" def test_compile(self): code = start_cpp(gamma_code) + """ """ weave.inline(code, support_code=gamma_code) def test_error_lngamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = lnGamma(z) good = gammaln(z) assert(math.fabs(own-good)<1e-12) def test_error_digamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = digamma(z) good = psi(z) assert(math.fabs(own-good)<1e-9) def test_error_trigamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = trigamma(z) good = polygamma(1,z) assert(math.fabs(own-good)<1e-9) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import inspect import hashlib def start_cpp(hash_str = None): """This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>""" frame = inspect.currentframe().f_back info = inspect.getframeinfo(frame) if hash_str==None: return '#line %i "%s"\n'%(info[1],info[0]) else: h = hashlib.md5() h.update(hash_str) hash_val = h.hexdigest() return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
Python
# -*- coding: utf-8 -*- # Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.). import cv import numpy as np def cv2array(im): """Converts a cv array to a numpy array.""" depth2dtype = { cv.IPL_DEPTH_8U: 'uint8', cv.IPL_DEPTH_8S: 'int8', cv.IPL_DEPTH_16U: 'uint16', cv.IPL_DEPTH_16S: 'int16', cv.IPL_DEPTH_32S: 'int32', cv.IPL_DEPTH_32F: 'float32', cv.IPL_DEPTH_64F: 'float64', } arrdtype=im.depth a = np.fromstring( im.tostring(), dtype=depth2dtype[im.depth], count=im.width*im.height*im.nChannels) a.shape = (im.height,im.width,im.nChannels) return a def array2cv(a): """Converts a numpy array to a cv array, if possible.""" dtype2depth = { 'uint8': cv.IPL_DEPTH_8U, 'int8': cv.IPL_DEPTH_8S, 'uint16': cv.IPL_DEPTH_16U, 'int16': cv.IPL_DEPTH_16S, 'int32': cv.IPL_DEPTH_32S, 'float32': cv.IPL_DEPTH_32F, 'float64': cv.IPL_DEPTH_64F, } try: nChannels = a.shape[2] except: nChannels = 1 cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]), dtype2depth[str(a.dtype)], nChannels) cv.SetData(cv_im, a.tostring(), a.dtype.itemsize*nChannels*a.shape[1]) return cv_im
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import multiprocessing as mp import multiprocessing.synchronize # To make sure we have all the functionality. import types import marshal import unittest def repeat(x): """A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant.""" while True: yield x def run_code(code,args): """Internal use function that does the work in each process.""" code = marshal.loads(code) func = types.FunctionType(code, globals(), '_') return func(*args) def mp_map(func, *iters, **keywords): """A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity.""" if 'pool' in keywords: pool = keywords['pool'] else: pool = mp.Pool() code = marshal.dumps(func.func_code) jobs = [] for args in zip(*iters): jobs.append(pool.apply_async(run_code,(code,args))) for i in xrange(len(jobs)): jobs[i] = jobs[i].get() return jobs class TestMpMap(unittest.TestCase): def test_simple1(self): data = ['a','b','c','d'] def noop(data): return data data_noop = mp_map(noop, data) self.assertEqual(data, data_noop) def test_simple2(self): data = [x for x in xrange(1000)] data_double = mp_map(lambda a: a*2, data) self.assertEqual(map(lambda a: a*2,data), data_double) def test_gen(self): def gen(): for i in xrange(100): yield i data_double = mp_map(lambda a: a*2, gen()) self.assertEqual(map(lambda a: a*2,gen()), data_double) def test_repeat(self): def mult(a,b): return a*b data = [x for x in xrange(50,5000,5)] data_triple = mp_map(mult, data, repeat(3)) self.assertEqual(map(lambda a: a*3,data),data_triple) def test_none(self): data = [] data_sqr = mp_map(lambda x: x*x, data) self.assertEqual([],data_sqr) if __name__ == '__main__': unittest.main()
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import os.path import tempfile import shutil from distutils.core import setup, Extension import distutils.ccompiler import distutils.dep_util try: __default_compiler = distutils.ccompiler.new_compiler() except: __default_compiler = None def make_mod(name, base, source, openCL = False): """Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place.""" if __default_compiler==None: raise Exception('No compiler!') # Work out the various file names - check if we actually need to do anything... if not isinstance(source, list): source = [source] source_path = map(lambda s: os.path.join(base, s), source) library_path = os.path.join(base, __default_compiler.shared_object_filename(name)) if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)): try: print 'b' # Backup the argv variable and create a temporary directory to do all work in... old_argv = sys.argv[:] temp_dir = tempfile.mkdtemp() # Prepare the extension... sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir] comp_path = filter(lambda s: not s.endswith('.h'), source_path) depends = filter(lambda s: s.endswith('.h'), source_path) if openCL: ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends) else: ext = Extension(name, comp_path, depends=depends) # Compile... setup(name=name, version='1.0.0', ext_modules=[ext]) finally: # Cleanup the argv variable and the temporary directory... sys.argv = old_argv shutil.rmtree(temp_dir, True)
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp from numpy_help_cpp import numpy_util_code # Provides various functions to assist with manipulating python objects from c++ code. python_obj_code = numpy_util_code + start_cpp() + """ #ifndef PYTHON_OBJ_CODE #define PYTHON_OBJ_CODE // Extracts a boolean from an object... bool GetObjectBoolean(PyObject * obj, const char * name) { PyObject * b = PyObject_GetAttrString(obj, name); bool ret = b!=Py_False; Py_DECREF(b); return ret; } // Extracts an int from an object... int GetObjectInt(PyObject * obj, const char * name) { PyObject * i = PyObject_GetAttrString(obj, name); int ret = PyInt_AsLong(i); Py_DECREF(i); return ret; } // Extracts a float from an object... float GetObjectFloat(PyObject * obj, const char * name) { PyObject * f = PyObject_GetAttrString(obj, name); float ret = PyFloat_AsDouble(f); Py_DECREF(f); return ret; } // Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored... unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); unsigned char * ret = new unsigned char[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i); Py_DECREF(nao); return ret; } // Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored... float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0) { PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name); float * ret = new float[nao->dimensions[0]]; if (size) *size = nao->dimensions[0]; for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i); Py_DECREF(nao); return ret; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Defines helper functions for accessing numpy arrays... numpy_util_code = start_cpp() + """ #ifndef NUMPY_UTIL_CODE #define NUMPY_UTIL_CODE float & Float1D(PyArrayObject * arr, int index = 0) { return *(float*)(arr->data + index*arr->strides[0]); } float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } unsigned char & Byte1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index*arr->strides[0]); } unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(unsigned char)); return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } int & Int1D(PyArrayObject * arr, int index = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index*arr->strides[0]); } int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]); } int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0) { //assert(arr->strides[0]==sizeof(int)); return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]); } #endif """
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import cvarray import mp_map import prog_bar import numpy_help_cpp import python_obj_cpp import matrix_cpp import gamma_cpp import setProcName import start_cpp import make import doc_gen # Setup... doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.') doc.addFile('readme.txt', 'Overview') # Variables... doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.') doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.') doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++') doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++') # Functions... doc.addFunction(make.make_mod) doc.addFunction(cvarray.cv2array) doc.addFunction(cvarray.array2cv) doc.addFunction(mp_map.repeat) doc.addFunction(mp_map.mp_map) doc.addFunction(setProcName.setProcName) doc.addFunction(start_cpp.start_cpp) doc.addFunction(make.make_mod) # Classes... doc.addClass(prog_bar.ProgBar) doc.addClass(doc_gen.DocGen)
Python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from ctypes import * def setProcName(name): """Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases.""" # Call the process control function... libc = cdll.LoadLibrary('libc.so.6') libc.prctl(15, c_char_p(name), 0, 0, 0) # Update argv... charPP = POINTER(POINTER(c_char)) argv = charPP.in_dll(libc,'_dl_argv') size = libc.strlen(argv[0]) libc.strncpy(argv[0],c_char_p(name),size) if __name__=='__main__': # Quick test that it works... import os ps1 = 'ps' ps2 = 'ps -f' os.system(ps1) os.system(ps2) setProcName('wibble_wobble') os.system(ps1) os.system(ps2)
Python
#! /usr/bin/env python # Copyright 2013 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import ms from utils import doc_gen # Setup... doc = doc_gen.DocGen('ms', 'Mean Shift', 'Mean shift, plus kernel density estimation and subspace constrained mean shift.') doc.addFile('readme.txt', 'Overview') # Classes... doc.addClass(ms.MeanShift)
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import math import numpy class PrecisionLOO: """Given a large number of samples this uses leave one out to calculate the optimal symetric precision matrix. Standard griding solution.""" def __init__(self): """Initilises with no samples but a default grid of 10^-2 to 10 in 128 incriments that are linear in log base 10 space.""" self.samples = [] self.grid = [] self.best = 1.0 self.setLogGrid(-4.0, 1.0, 128) def setLogGrid(self, low=-4.0, high = 1.0, step = 128): """Sets the grid of variances to test to contain values going from 10^low to 10^high, with inclusive linear interpolation of the exponents to obtain step values.""" self.grid = [] for i in xrange(step): exponent = low + i*(high-low)/float(step-1) self.grid.append(math.pow(10.0,exponent)) def addSample(self, sample): """Adds one or more samples to the set used for loo optimisation. Can either be a single vector or a data matrix, where the first dimension indexes the individual samples.""" self.samples.append(numpy.asarray(sample, dtype=numpy.float32)) def dataMatrix(self): """More for internal use - collates all the samples into a single data matrix, which is put in the internal samples array such that it does not break things - the data matrix is then returned.""" if len(self.samples)==0: return None if len(self.samples)==1 and len(self.samples[0].shape)==2: return self.samples[0] def samSize(sample): if len(sample.shape)==2: return sample.shape[0] else: return 1 count = sum(map(samSize,self.samples)) dm = numpy.empty((count, self.samples[0].shape[-1]), dtype=numpy.float32) offset = 0 for sample in self.samples: if len(sample.shape)==1: dm[offset,:] = sample offset += 1 else: dm[offset:offset+sample.shape[0],:] = sample offset += sample.shape[0] self.samples = [dm] return dm def calcVar(self, var, subset = None): """Internal method really - given a variance calculates its leave one out nll. Has an optional subset parameter, which indexes a subset of data point to be used from the data matrix.""" dm = self.dataMatrix() if subset!=None: dm = dm[subset,:] mask = numpy.empty(dm.shape[0], dtype=numpy.bool) logNorm = -0.5*dm.shape[1]*math.log(2.0*math.pi*var) nll = 0.0 for loi in xrange(dm.shape[0]): mask[:] = True mask[loi] = False delta = numpy.reshape(dm[loi,:], (1,dm.shape[1])) - dm[mask,:] delta = numpy.square(delta).sum(axis=1) delta /= var delta *= -0.5 delta += logNorm # Delta is now the log probability of the target sample in terms of the kernels emitted from all others. maxDelta = delta.max() logProb = maxDelta + math.log(numpy.exp(delta - maxDelta).sum()) # logProb is now the log of the sum of the probabilities of the left-out sample from all other samples, basically the score for leaving this sample out. nll -= logProb return nll def solve(self, callback=None): """Trys all the options, and selects the one that provides the best nll.""" self.best = None bestNLL = None for i, var in enumerate(self.grid): if callback!=None: callback(i,len(self.grid)) nll = self.calcVar(var) if numpy.isfinite(nll) and (self.best==None or nll<bestNLL): self.best = var bestNLL = nll def getBest(self): """Returns the best precision matrix.""" return numpy.identity(self.dataMatrix().shape[1], dtype=numpy.float32) / self.best class SubsetPrecisionLOO(PrecisionLOO): """This class performs the same task as PrecisionLOO, except it runs on a subset of data points, and in effect tunes the precision matrix for a kernel density estimate constructed using less samples than are provided to the class. Takes the mean of multiple runs with different subsets.""" def solve(self, runs, size, callback=None): """Trys all the options, and selects the one that provides the best nll. runs is the number of runs to do, with it taking the average score for each run, whilst size is how many samples to have in each run, i.e. the size to tune for.""" # First generate all the subsets of the datamatrix... dm = self.dataMatrix() subset = [] for _ in xrange(runs): subset.append(numpy.random.permutation(dm.shape[0])[:size]) # Now loop and do the work... self.best = None bestNLL = None for i, var in enumerate(self.grid): if callback!=None: callback(i,len(self.grid)) nll = 0.0 for j, ss in enumerate(subset): if callback!=None: callback(i*runs + j, len(self.grid) * runs) nll += self.calcVar(var, ss) nll /= len(subset) if numpy.isfinite(nll) and (self.best==None or nll<bestNLL): self.best = var bestNLL = nll del callback
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import math import numpy from scipy import weave from utils.start_cpp import start_cpp from utils.matrix_cpp import matrix_code from loo_cov import PrecisionLOO, SubsetPrecisionLOO # Not used below, just for conveniance. from gmm import GMM class KDE_INC: """Provides an incrimental kernel density estimate system that uses Gaussians. A kernel density estimate system with Gaussian kernels that, on reaching a cap, starts merging kernels to limit the number of kernels to a constant - done in such a way as to minimise error whilst capping computation. (Computation is quite high however - this is not a very efficient implimentation.)""" def __init__(self, prec, cap = 32): """Initialise with the precision matrix to use for the kernels, which implicitly provides the number of dimensions, and the cap on the number of kernels to allow.""" self.prec = numpy.asarray(prec, dtype=numpy.float32) self.gmm = GMM(prec.shape[0], cap) # Current mixture model. self.count = 0 # Number of samples provided so far. self.merge = numpy.empty((cap,cap), dtype=numpy.float32) # [i,j]; cost of merging two entrys, only valid when j<i, other values set high to avoid issues. self.merge[:,:] = 1e64 # For holding the temporary merge costs calculated when adding a sample... self.mergeT = numpy.empty(cap, dtype=numpy.float32) # For the C code... self.temp = numpy.empty((2, prec.shape[0], prec.shape[0]), dtype=numpy.float32) def setPrec(self, prec): """Changes the precision matrix - must be called before any samples are added, and must have the same dimensions as the current one.""" self.prec = numpy.asarray(prec, dtype=numpy.float32) def samples(self): """Returns how many samples have been added to the object.""" return self.count def prob(self, sample): """Returns the probability of the given sample - must not be called until at least one sample has been added, though it will return a positive constant if called with no samples provided.""" if self.count!=0: return self.gmm.prob(sample) else: return 1.0 def nll(self, sample): """Returns the negative log liklihood of the given sample - must not be called until at least one sample has been added, though it will return a positive constant if called with no samples provided.""" if self.count!=0: return self.gmm.nll(sample) else: return 0.0 def __merge(self, weightA, meanA, precA, weightB, meanB, precB): """Merges two Gaussians and returns the merged result, as (weight, mean, prec)""" newWeight = weightA + weightB newMean = weightA/newWeight * meanA + weightB/newWeight * meanB deltaA = meanA - newMean covA = numpy.linalg.inv(precA) + numpy.outer(deltaA, deltaA) deltaB = meanB - newMean covB = numpy.linalg.inv(precB) + numpy.outer(deltaB, deltaB) newCov = weightA/newWeight * covA + weightB/newWeight * covB newPrec = numpy.linalg.inv(newCov) return (newWeight, newMean, newPrec) def __calcMergeCost(self, weightA, meanA, precA, weightB, meanB, precB): """Calculates and returns the cost of merging two Gaussians.""" # (For anyone wondering about the fact we are comparing them against each other rather than against the result of merging them that is because this way tends to get better results.) # The log determinants and delta... logDetA = math.log(numpy.linalg.det(precA)) logDetB = math.log(numpy.linalg.det(precB)) delta = meanA - meanB # Kullback-Leibler of representing A using B... klA = logDetB - logDetA klA += numpy.trace(numpy.dot(precB, numpy.linalg.inv(precA))) klA += numpy.dot(numpy.dot(delta, precB), delta) klA -= precA.shape[0] klA *= 0.5 # Kullback-Leibler of representing B using A... klB = logDetA - logDetB klB += numpy.trace(numpy.dot(precA, numpy.linalg.inv(precB))) klB += numpy.dot(numpy.dot(delta, precA), delta) klB -= precB.shape[0] klB *= 0.5 # Return a weighted average... return weightA * klA + weightB * klB def add(self, sample): """Adds a sample, updating the kde accordingly.""" global weave try: weave = None # Below code is actually slowing things down. Am disabling for now. if weave==None: raise Exception() support = matrix_code + start_cpp() + """ // Note - designed so that A and Out pointers can be the same. void doMerge(int size, float weightA, float * meanA, float * precA, float weightB, float * meanB, float * precB, float & weightOut, float * meanOut, float * precOut, float * tVec, float * tMat1, float * tMat2) { // Handle the weight, recording the ratios needed next... float wOut = weightA + weightB; float ratioA = weightA/wOut; float ratioB = weightB/wOut; weightOut = wOut; // Do the mean - simply a weighted average - store in a temporary for now... for (int i=0; i<size; i++) { tVec[i] = ratioA * meanA[i] + ratioB * meanB[i]; } // Put the covariance of precision A into tMat1... for (int i=0; i<size*size; i++) tMat2[i] = precA[i]; Inverse(tMat2, tMat1, size); // Add the outer product of the A delta into tMat1... for (int r=0; r<size; r++) { for (int c=0; c<size; c++) { tMat1[r*size + c] += (meanA[c] - tVec[c]) * (meanA[r] - tVec[r]); } } // Put the covariance of precision B into tMat2... for (int i=0; i<size*size; i++) precOut[i] = precB[i]; Inverse(precOut, tMat2, size); // Add the outer product of the B delta into tMat2... for (int r=0; r<size; r++) { for (int c=0; c<size; c++) { tMat2[r*size + c] += (meanB[c] - tVec[c]) * (meanB[r] - tVec[r]); } } // Get the weighted average of the covariance matrices into tMat1... for (int i=0; i<size*size; i++) { tMat1[i] = ratioA * tMat1[i] + ratioB * tMat2[i]; } // Dump the inverse of tMat1 into the output precision... Inverse(tMat1, precOut, size); // Copy from the temporary mean into the output mean... for (int i=0; i<size; i++) meanOut[i] = tVec[i]; } float mergeCost(int size, float weightA, float * meanA, float * precA, float weightB, float * meanB, float * precB, float * tVec1, float * tVec2, float * tMat1, float * tMat2) { // Calculate some shared values... float logDetA = log(Determinant(precA, size)); float logDetB = log(Determinant(precB, size)); for (int i=0; i<size; i++) { tVec1[i] = meanA[i] - meanB[i]; } // tVec1 now contains the delta. // Calculate the Kullback-Leibler divergance of substituting B for A... float klA = logDetB - logDetA; for (int i=0; i<size*size; i++) tMat1[i] = precA[i]; if (Inverse(tMat1, tMat2, size)==false) return 0.0; for (int i=0; i<size; i++) { for (int j=0; j<size; j++) { klA += precB[i*size + j] * tMat2[j*size + i]; } } for (int i=0; i<size; i++) { tVec2[i] = 0.0; for (int j=0; j<size; j++) { tVec2[i] += precB[i*size + j] * tVec1[j]; } } for (int i=0; i<size; i++) klA += tVec1[i] * tVec2[i]; klA -= size; klA *= 0.5; // Calculate the Kullback-Leibler divergance of substituting A for B... float klB = logDetA - logDetB; for (int i=0; i<size*size; i++) tMat1[i] = precB[i]; if (Inverse(tMat1, tMat2, size)==false) return 0.0; for (int i=0; i<size; i++) { for (int j=0; j<size; j++) { klB += precA[i*size + j] * tMat2[j*size + i]; } } for (int i=0; i<size; i++) { tVec2[i] = 0.0; for (int j=0; j<size; j++) { tVec2[i] += precA[i*size + j] * tVec1[j]; } } for (int i=0; i<size; i++) klB += tVec1[i] * tVec2[i]; klB -= size; klB *= 0.5; // Return a weighted average of the divergances... return weightA * klA + weightB * klB; } """ code = start_cpp(support) + """ if (count < Nweight[0]) { // Pure KDE mode - just add the kernel... for (int i=0; i<Nsample[0]; i++) { MEAN2(count, i) = sample[i]; } for (int i=0; i<Nsample[0]; i++) { for (int j=0; j<Nsample[0]; j++) { PREC3(count, i, j) = BASEPREC2(i, j); } } assert(Sprec[0]==sizeof(float)); assert(Sprec[1]==sizeof(float)*Nsample[0]); log_norm[count] = 0.5 * log(Determinant(&PREC3(count, 0, 0), Nsample[0])); log_norm[count] -= 0.5 * Nsample[0] * log(2.0*M_PI); float w = 1.0 / (count+1); for (int i=0; i<=count; i++) { weight[i] = w; } // If the next sample will involve merging then we need to fill in the merging costs cache in preperation... if (count+1==Nweight[0]) { for (int i=0; i<Nweight[0]; i++) { for (int j=0; j<i; j++) { MERGE2(i, j) = mergeCost(Nsample[0], weight[i], &MEAN2(i,0), &PREC3(i,0,0), weight[j], &MEAN2(j,0), &PREC3(j,0,0), &TEMP2(0,0), &TEMP2(1,0), &TEMPPREC3(0,0,0), &TEMPPREC3(1,0,0)); } } } } else { // We have the maximum number of kernels - need to either merge the new kernel with an existing one, or merge two existing kernels and use the freed up slot for the new kernel... // Update the weights, and calculate the weight of the new kernel... float adjust = float(count) / float(count+1); for (int i=0; i<Nweight[0]; i++) weight[i] *= adjust; for (int i=0; i<Nweight[0]; i++) { for (int j=0; j<i; j++) MERGE2(i, j) *= adjust; } float w = 1.0 / float(count + 1.0); // Calculate the costs of merging the new kernel with each of the old kernels... for (int i=0; i<Nweight[0]; i++) { mergeT[i] = mergeCost(Nsample[0], w, sample, basePrec, weight[i], &MEAN2(i,0), &PREC3(i,0,0), &TEMP2(0,0), &TEMP2(1,0), &TEMPPREC3(0,0,0), &TEMPPREC3(1,0,0)); } // Find the lowest merge cost and act accordingly - either we are merging the new kernel with an old one or merging two existing kernels and putting the new kernel in on its own... int lowI = 1; int lowJ = 0; for (int i=0; i<Nweight[0]; i++) { for (int j=0; j<i; j++) { if (MERGE2(i, j) < MERGE2(lowI, lowJ)) { lowI = i; lowJ = j; } } } int lowN = 0; for (int i=1; i<Nweight[0]; i++) { if (mergeT[i] < mergeT[lowN]) lowN = i; } if (mergeT[lowN] < MERGE2(lowI, lowJ)) { // We are merging the new kernel with an existing kernel... // Do the merge... doMerge(Nsample[0], weight[lowN], &MEAN2(lowN,0), &PREC3(lowN,0,0), w, sample, basePrec, weight[lowN], &MEAN2(lowN,0), &PREC3(lowN,0,0), &TEMP2(0,0), &TEMPPREC3(0,0,0), &TEMPPREC3(1,0,0)); // Update the normalising constant... log_norm[lowN] = 0.5 * log(Determinant(&PREC3(lowN, 0, 0), Nsample[0])); log_norm[lowN] -= 0.5 * Nsample[0] * log(2.0*M_PI); // Update the array of merge costs... for (int i=0; i<Nweight[0]; i++) { if (i!=lowN) { float mc = mergeCost(Nsample[0], weight[i], &MEAN2(i,0), &PREC3(i,0,0), weight[lowN], &MEAN2(lowN,0), &PREC3(lowN,0,0), &TEMP2(0,0), &TEMP2(1,0), &TEMPPREC3(0,0,0), &TEMPPREC3(1,0,0)); if (i<lowN) MERGE2(lowN, i) = mc; else MERGE2(i, lowN) = mc; } } } else { // We are merging two existing kernels then putting the new kernel into the freed up spot... // Do the merge... doMerge(Nsample[0], weight[lowI], &MEAN2(lowI,0), &PREC3(lowI,0,0), weight[lowJ], &MEAN2(lowJ,0), &PREC3(lowJ,0,0), weight[lowI], &MEAN2(lowI,0), &PREC3(lowI,0,0), &TEMP2(0,0), &TEMPPREC3(0,0,0), &TEMPPREC3(1,0,0)); // Copy in the new kernel... weight[lowJ] = w; for (int i=0; i<Nsample[0]; i++) MEAN2(lowJ,i) = sample[i]; for (int i=0; i<Nsample[0];i++) { for (int j=0; j<Nsample[0]; j++) { PREC3(lowJ,i,j) = basePrec[i*Nsample[0] + j]; } } // Update both normalising constants... log_norm[lowI] = 0.5 * log(Determinant(&PREC3(lowI, 0, 0), Nsample[0])); log_norm[lowI] -= 0.5 * Nsample[0] * log(2.0*M_PI); log_norm[lowJ] = 0.5 * log(Determinant(&PREC3(lowJ, 0, 0), Nsample[0])); log_norm[lowJ] -= 0.5 * Nsample[0] * log(2.0*M_PI); // Update the array of merge costs... for (int i=0; i<Nweight[0]; i++) { if (i!=lowI) { float mc = mergeCost(Nsample[0], weight[i], &MEAN2(i,0), &PREC3(i,0,0), weight[lowI], &MEAN2(lowI,0), &PREC3(lowI,0,0), &TEMP2(0,0), &TEMP2(1,0), &TEMPPREC3(0,0,0), &TEMPPREC3(1,0,0)); if (i<lowI) MERGE2(lowI, i) = mc; else MERGE2(i, lowI) = mc; } } for (int i=0; i<Nweight[0]; i++) { if ((i!=lowI)&&(i!=lowJ)) { float mc = mergeCost(Nsample[0], weight[i], &MEAN2(i,0), &PREC3(i,0,0), weight[lowJ], &MEAN2(lowJ,0), &PREC3(lowJ,0,0), &TEMP2(0,0), &TEMP2(1,0), &TEMPPREC3(0,0,0), &TEMPPREC3(1,0,0)); if (i<lowJ) MERGE2(lowJ, i) = mc; else MERGE2(i, lowJ) = mc; } } } } """ sample = numpy.asarray(sample, dtype=numpy.float32).flatten() basePrec = self.prec count = self.count merge = self.merge mergeT = self.mergeT tempPrec = self.temp weight = self.gmm.weight mean = self.gmm.mean prec = self.gmm.prec log_norm = self.gmm.log_norm temp = self.gmm.temp weave.inline(code, ['sample', 'basePrec', 'count', 'merge', 'mergeT', 'tempPrec', 'weight', 'mean', 'prec', 'log_norm', 'temp'], support_code = support) self.count += 1 except Exception, e: if weave!=None: print e weave = None if self.count<self.gmm.weight.shape[0]: # Pure kde phase... self.gmm.mean[self.count,:] = numpy.asarray(sample, dtype=numpy.float32) self.gmm.prec[self.count,:,:] = self.prec self.gmm.calcNorm(self.count) self.count += 1 self.gmm.weight[:self.count] = 1.0 / float(self.count) if self.count==self.gmm.weight.shape[0]: # Next sample starts merging - need to prepare by filling in the kl array... # (Below is grossly inefficient - calculates the same things more times than is possibly funny. I'll optimise it if I ever decide that I care enough to do so.) for i in xrange(self.merge.shape[0]): for j in xrange(i): self.merge[i,j] = self.__calcMergeCost(self.gmm.weight[i], self.gmm.mean[i,:], self.gmm.prec[i,:,:], self.gmm.weight[j], self.gmm.mean[j,:], self.gmm.prec[j,:,:]) else: # Merging phase... sample = numpy.asarray(sample, dtype=numpy.float32) # Adjust weights... adjust = float(self.count) / float(self.count+1) self.gmm.weight *= adjust for i in xrange(self.merge.shape[0]): self.merge[i,:i] *= adjust self.count += 1 weight = 1.0 / float(self.count) # Calculate the merging costs for the new kernel versus the old kernels... for i in xrange(self.merge.shape[0]): self.mergeT[i] = self.__calcMergeCost(weight, sample, self.prec, self.gmm.weight[i], self.gmm.mean[i,:], self.gmm.prec[i,:,:]) # Select the best merge - it either involves the new sample or it does not... bestOld = numpy.unravel_index(numpy.argmin(self.merge), self.merge.shape) bestNew = numpy.argmin(self.mergeT) if self.mergeT[bestNew] < self.merge[bestOld]: # Easy scenario - new kernel is being merged with an existing kernel - not too much fiddling involved... # Do the merge... newWeight, newMean, newPrec = self.__merge(weight, sample, self.prec, self.gmm.weight[bestNew], self.gmm.mean[bestNew,:], self.gmm.prec[bestNew,:,:]) # Store the result... self.gmm.weight[bestNew] = newWeight self.gmm.mean[bestNew,:] = newMean self.gmm.prec[bestNew,:,:] = newPrec self.gmm.calcNorm(bestNew) # Update the merge weights... for i in xrange(self.merge.shape[0]): if i!=bestNew: cost = self.__calcMergeCost(self.gmm.weight[i], self.gmm.mean[i,:], self.gmm.prec[i,:,:], self.gmm.weight[bestNew], self.gmm.mean[bestNew,:], self.gmm.prec[bestNew,:,:]) if i<bestNew: self.merge[bestNew,i] = cost else: self.merge[i,bestNew] = cost else: # We are merging two old kernels, and then putting the new kernel into the slot freed up - this is extra fiddly... # Do the merge... newWeight, newMean, newPrec = self.__merge(self.gmm.weight[bestOld[0]], self.gmm.mean[bestOld[0],:], self.gmm.prec[bestOld[0],:,:], self.gmm.weight[bestOld[1]], self.gmm.mean[bestOld[1],:], self.gmm.prec[bestOld[1],:,:]) # Store the result, put the new component in the other slot... self.gmm.weight[bestOld[0]] = newWeight self.gmm.mean[bestOld[0],:] = newMean self.gmm.prec[bestOld[0],:,:] = newPrec self.gmm.calcNorm(bestOld[0]) self.gmm.weight[bestOld[1]] = weight self.gmm.mean[bestOld[1],:] = sample self.gmm.prec[bestOld[1],:,:] = self.prec self.gmm.calcNorm(bestOld[1]) # Update the merge weights for both the merged and new kernels... for i in xrange(self.merge.shape[0]): if i!=bestOld[0]: cost = self.__calcMergeCost(self.gmm.weight[i], self.gmm.mean[i,:], self.gmm.prec[i,:,:], self.gmm.weight[bestOld[0]], self.gmm.mean[bestOld[0],:], self.gmm.prec[bestOld[0],:,:]) if i<bestOld[0]: self.merge[bestOld[0],i] = cost else: self.merge[i,bestOld[0]] = cost for i in xrange(self.merge.shape[0]): if i!=bestOld[0] and i!=bestOld[1]: cost = self.__calcMergeCost(self.gmm.weight[i], self.gmm.mean[i,:], self.gmm.prec[i,:,:], self.gmm.weight[bestOld[1]], self.gmm.mean[bestOld[1],:], self.gmm.prec[bestOld[1],:,:]) if i<bestOld[1]: self.merge[bestOld[1],i] = cost else: self.merge[i,bestOld[1]] = cost def marginalise(self, dims): """Returns an object on which you can call prob(), but with only a subset of the dimensions. The set of dimensions is given as something that can be interpreted as a numpy array of integers - it is the dimensions to keep, it marginalises away everything else. The indexing of the returned object will match up with that in dims. Note that you must not have any repetitions in dims - that would create absurdity.""" ret = self.gmm.clone() ret.marginalise(dims) return ret
Python
# Copyright 2011 Tom SF Haines # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import math import numpy from scipy import weave from utils.start_cpp import start_cpp log_2_pi = math.log(2.0*math.pi) class GMM: """Contains a Gaussian mixture model - just a list of weights, means and precision matrices. List is of fixed size, and it has functions to determine the probability of a point in space. Components with a weight of zero are often computationally ignored. Initialises empty, which is not good for normalisation of weights - don't do it until data is avaliable! Designed to be used directly by any entity that is filling it in - interface is mostly user only.""" def __init__(self, dims, count): """dims is the dimension of the mixture model, count the number of mixture components it will consider using.""" self.weight = numpy.zeros(count, dtype=numpy.float32) self.mean = numpy.zeros((count, dims), dtype=numpy.float32) self.prec = numpy.zeros((count, dims, dims), dtype=numpy.float32) # Precision, i.e. inverse covariance. self.log_norm = numpy.zeros(count, dtype=numpy.float32) # Logarithm of the normalising multiplicative constant. self.temp = numpy.empty((2, dims), dtype=numpy.float32) # To save memory chugging in the inline code. def clone(self): """Returns a clone of this object.""" ret = GMM(self.mean.shape[1], self.mean.shape[0]) ret.weight[:] = self.weight ret.mean[:,:] = self.mean ret.prec[:,:,:] = self.prec ret.log_norm[:] = self.log_norm return ret def normWeights(self): """Scales the weights so they sum to one, as is required for correctness.""" self.weight /= self.weight.sum() def calcNorm(self, i): """Sets the normalising constant for a specific entry.""" self.log_norm[i] = 0.5 * math.log(numpy.linalg.det(self.prec[i,:,:])) self.log_norm[i] -= 0.5 * self.mean.shape[1] * log_2_pi def calcNorms(self): """Fills in the normalising constants for all components with weight.""" nzi = numpy.nonzero(self.weight)[0] for ii in xrange(nzi.shape[0]): self.log_norm[nzi[ii]] = 0.5 * math.log(numpy.linalg.det(self.prec[nzi[ii],:,:])) self.log_norm[nzi] -= 0.5*self.mean.shape[1] * log_2_pi def prob(self, sample): """Given a sample vector, as something that numpy.asarray can interpret, return the normalised probability of the sample. All values must be correct for this to work. Has inline C, but if that isn't working the implimentation is fully vectorised, so should be quite fast despite being in python.""" global weave try: if weave==None: raise Exception() code = start_cpp() + """ float ret = 0.0; for (int i=0; i<Nweight[0]; i++) { if (weight[i]>1e-6) { // Calculate the delta... for (int j=0; j<Nmean[1]; j++) { TEMP2(0, j) = SAMPLE1(j) - MEAN2(i, j); TEMP2(1, j) = 0.0; } // Multiply the precision with the delta and put it into TEMP2(1, ...)... for (int j=0; j<Nmean[1]; j++) { for (int k=0; k<Nmean[1]; k++) { TEMP2(1, j) += PREC3(i, j, k) * TEMP2(0, k); } } // Dot product TEMP2(0, ...) and TEMP2(1, ...) to get the core of the distribution... float core = 0.0; for (int j=0; j<Nmean[1]; j++) { core += TEMP2(0, j) * TEMP2(1, j); } // Factor in the rest, add it to the return... float val = weight[i] * exp(-0.5 * core + log_norm[i]); if (std::isfinite(val)) ret += val; } } return_val = ret; """ sample = numpy.asarray(sample, dtype=numpy.float32) weight = self.weight mean = self.mean prec = self.prec log_norm = self.log_norm temp = self.temp return weave.inline(code, ['sample', 'weight', 'mean', 'prec', 'log_norm', 'temp']) except Exception, e: if weave!=None: print e weave = None nzi = numpy.nonzero(self.weight)[0] sample = numpy.asarray(sample) delta = numpy.reshape(sample, (1,self.mean.shape[1])) - self.mean[nzi,:] nds = (nzi.shape[0], delta.shape[1], 1) core = (numpy.reshape(delta, nds) * self.prec[nzi,:,:]).sum(axis=1) core = (core * delta).sum(axis=1) core *= -0.5 core += self.log_norm[nzi] core = numpy.exp(core) core *= self.weight[nzi] return core[numpy.isfinite(core)].sum() # Little bit of safety. def nll(self, sample): """Given a sample vector, as something that numpy.asarray can interpret, return the negative log liklihood of the sample. All values must be correct for this to work. Has inline C, but if that isn't working the implimentation is fully vectorised, so should be quite fast despite being in python.""" global weave try: if weave==None: raise Exception() code = start_cpp() + """ float ret = -1e64; for (int i=0; i<Nweight[0]; i++) { if (weight[i]>1e-6) { // Calculate the delta... for (int j=0; j<Nmean[1]; j++) { TEMP2(0, j) = SAMPLE1(j) - MEAN2(i, j); TEMP2(1, j) = 0.0; } // Multiply the precision with the delta and put it into TEMP2(1, ...)... for (int j=0; j<Nmean[1]; j++) { for (int k=0; k<Nmean[1]; k++) { TEMP2(1, j) += PREC3(i, j, k) * TEMP2(0, k); } } // Dot product TEMP2(0, ...) and TEMP2(1, ...) to get the core of the distribution... float core = 0.0; for (int j=0; j<Nmean[1]; j++) { core += TEMP2(0, j) * TEMP2(1, j); } // Factor in the rest, add it to the return... float val = log(weight[i]) + log_norm[i] - 0.5 * core; if (std::isfinite(val)) { if (ret>val) { ret = ret + log(1.0 + exp(val - ret)); } else { ret = val + log(1.0 + exp(ret - val)); } } } } return_val = -ret; """ sample = numpy.asarray(sample, dtype=numpy.float32) weight = self.weight mean = self.mean prec = self.prec log_norm = self.log_norm temp = self.temp return weave.inline(code, ['sample', 'weight', 'mean', 'prec', 'log_norm', 'temp']) except Exception, e: if weave!=None: print e weave = None nzi = numpy.nonzero(self.weight)[0] sample = numpy.asarray(sample) delta = numpy.reshape(sample, (1,self.mean.shape[1])) - self.mean[nzi,:] nds = (nzi.shape[0], delta.shape[1], 1) core = (numpy.reshape(delta, nds) * self.prec[nzi,:,:]).sum(axis=1) core = (core * delta).sum(axis=1) core *= -0.5 core += self.log_norm[nzi] core += numpy.log(self.weight[nzi]) high = core.max() ret = high + numpy.log(numpy.exp(core-high).sum()) return -ret def marginalise(self, dims): """Given a list of dimensions this keeps those dimensions and drops the rest, i.e. marginalises them out. New version of this object will have the old indices remapped as indicated by dims.""" dims = numpy.asarray(dims) self.mean = self.mean[:, dims] for i in xrange(self.prec.shape[0]): if self.weight[i]>1e-6: self.prec[i,:,:] = numpy.linalg.inv(self.prec[i,:,:]) self.prec = self.prec[numpy.ix_(numpy.arange(self.prec.shape[0]), dims, dims)] for i in xrange(self.prec.shape[0]): if self.weight[i]>1e-6: self.prec[i,:,:] = numpy.linalg.inv(self.prec[i,:,:]) self.calcNorms()
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from start_cpp import start_cpp # Some basic matrix operations that come in use... matrix_code = start_cpp() + """ #ifndef MATRIX_CODE #define MATRIX_CODE template <typename T> inline void MemSwap(T * lhs, T * rhs, int count = 1) { while(count!=0) { T t = *lhs; *lhs = *rhs; *rhs = t; ++lhs; ++rhs; --count; } } // Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default. template <typename T> inline T Determinant(T * pos, int size, int stride = -1) { if (stride==-1) stride = size; if (size==1) return pos[0]; else { if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride]; else { T ret = 0.0; for (int i=0; i<size; i++) { if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1); T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1]; if ((i+size)%2) ret += sub; else ret -= sub; } for (int i=1; i<size; i++) { MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1); } return ret; } } } // Inverts a square matrix, will fail on singular and very occasionally on // non-singular matrices, returns true on success. Uses Gauss-Jordan elimination // with partial pivoting. // in is the input matrix, out the output matrix, just be aware that the input matrix is trashed. // You have to provide its size (Its square, obviously.), and optionally a stride if different from size. template <typename T> inline bool Inverse(T * in, T * out, int size, int stride = -1) { if (stride==-1) stride = size; for (int r=0; r<size; r++) { for (int c=0; c<size; c++) { out[r*stride + c] = (c==r)?1.0:0.0; } } for (int r=0; r<size; r++) { // Find largest pivot and swap in, fail if best we can get is 0... T max = in[r*stride + r]; int index = r; for (int i=r+1; i<size; i++) { if (fabs(in[i*stride + r])>fabs(max)) { max = in[i*stride + r]; index = i; } } if (index!=r) { MemSwap(&in[index*stride], &in[r*stride], size); MemSwap(&out[index*stride], &out[r*stride], size); } if (fabs(max-0.0)<1e-6) return false; // Divide through the entire row... max = 1.0/max; in[r*stride + r] = 1.0; for (int i=r+1; i<size; i++) in[r*stride + i] *= max; for (int i=0; i<size; i++) out[r*stride + i] *= max; // Row subtract to generate 0's in the current column, so it matches an identity matrix... for (int i=0; i<size; i++) { if (i==r) continue; T factor = in[i*stride + r]; in[i*stride + r] = 0.0; for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j]; for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j]; } } return true; } #endif """
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import time class ProgBar: """Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops.""" def __init__(self, width = 60, onCallback = None): self.start = time.time() self.fill = 0 self.width = width self.onCallback = onCallback sys.stdout.write(('_'*self.width)+'\n') sys.stdout.flush() def __del__(self): self.end = time.time() self.__show(self.width) sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n') sys.stdout.flush() def callback(self, nDone, nToDo): """Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo).""" if self.onCallback: self.onCallback() n = int(float(self.width)*float(nDone)/float(nToDo)) n = min((n,self.width)) if n>self.fill: self.__show(n) def __show(self,n): sys.stdout.write('|'*(n-self.fill)) sys.stdout.flush() self.fill = n
Python
# Copyright (c) 2012, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pydoc import inspect class DocGen: """A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code.""" def __init__(self, name, title = None, summary = None): """name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title.""" if title==None: title = name if summary==None: summary = title self.doc = pydoc.HTMLDoc() self.html = open('%s.html'%name,'w') self.html.write('<html>\n') self.html.write('<head>\n') self.html.write('<title>%s</title>\n'%title) self.html.write('</head>\n') self.html.write('<body>\n') self.html_variables = '' self.html_functions = '' self.html_classes = '' self.wiki = open('%s.wiki'%name,'w') self.wiki.write('#summary %s\n\n'%summary) self.wiki.write('= %s= \n\n'%title) self.wiki_variables = '' self.wiki_functions = '' self.wiki_classes = '' def __del__(self): if self.html_variables!='': self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables)) if self.html_functions!='': self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions)) if self.html_classes!='': self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes)) self.html.write('</body>\n') self.html.write('</html>\n') self.html.close() if self.wiki_variables!='': self.wiki.write('= Variables =\n\n') self.wiki.write(self.wiki_variables) self.wiki.write('\n') if self.wiki_functions!='': self.wiki.write('= Functions =\n\n') self.wiki.write(self.wiki_functions) self.wiki.write('\n') if self.wiki_classes!='': self.wiki.write('= Classes =\n\n') self.wiki.write(self.wiki_classes) self.wiki.write('\n') self.wiki.close() def addFile(self, fn, title, fls = True): """Given a filename and section title adds the contents of said file to the output. Various flags influence how this works.""" html = [] wiki = [] for i, line in enumerate(open(fn,'r').readlines()): hl = line.replace('\n', '') if i==0 and fls: hl = '<strong>' + hl + '</strong>' for ext in ['py','txt']: if '.%s - '%ext in hl: s = hl.split('.%s - '%ext, 1) hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1] html.append(hl) wl = line.strip() if i==0 and fls: wl = '*%s*'%wl for ext in ['py','txt']: if '.%s - '%ext in wl: s = wl.split('.%s - '%ext, 1) wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n' wiki.append(wl) self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html))) self.wiki.write('== %s ==\n'%title) self.wiki.write('\n'.join(wiki)) self.wiki.write('----\n\n') def addVariable(self, var, desc): """Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc..""" self.html_variables += '<strong>%s</strong><br/>'%var self.html_variables += '%s<br/><br/>\n'%desc self.wiki_variables += '*`%s`*\n'%var self.wiki_variables += ' %s\n\n'%desc def addFunction(self, func): """Adds a function to the documentation. You provide the actual function instance.""" self.html_functions += self.doc.docroutine(func).replace('&nbsp;',' ') self.html_functions += '\n' name = func.__name__ args, varargs, keywords, defaults = inspect.getargspec(func) doc = inspect.getdoc(func) if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_functions += ' %s\n\n'%doc def addClass(self, cls): """Adds a class to the documentation. You provide the actual class object.""" self.html_classes += self.doc.docclass(cls).replace('&nbsp;',' ') self.html_classes += '\n' name = cls.__name__ parents = filter(lambda a: a!=cls, inspect.getmro(cls)) doc = inspect.getdoc(cls) par_str = '' if len(parents)!=0: par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents)) self.wiki_classes += '== %s(%s) ==\n'%(name, par_str) self.wiki_classes += ' %s\n\n'%doc methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x)) def method_key(pair): if pair[0]=='__init__': return '___' else: return pair[0] methods.sort(key=method_key) for name, method in methods: if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'): if inspect.ismethod(method): args, varargs, keywords, defaults = inspect.getargspec(method) else: args = ['?'] varargs = None keywords = None defaults = None if defaults==None: defaults = list() defaults = (len(args)-len(defaults)) * [None] + list(defaults) arg_str = '' if len(args)!=0: arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults)) if varargs!=None: arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs if keywords!=None: arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords def fetch_doc(cls, name): try: method = getattr(cls, name) if method.__doc__!=None: return inspect.getdoc(method) except: pass for parent in filter(lambda a: a!=cls, inspect.getmro(cls)): ret = fetch_doc(parent, name) if ret!=None: return ret return None doc = fetch_doc(cls, name) self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str) self.wiki_classes += ' %s\n\n'%doc variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float)) for name, var in variables: if not name.startswith('__'): if hasattr(var, '__doc__'): d = var.__doc__ else: d = str(var) self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
Python
# Copyright (c) 2011, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest import random import math from scipy.special import gammaln, psi, polygamma from scipy import weave from utils.start_cpp import start_cpp # Provides various gamma-related functions... gamma_code = start_cpp() + """ #ifndef GAMMA_CODE #define GAMMA_CODE #include <cmath> // Returns the natural logarithm of the Gamma function... // (Uses Lanczos's approximation.) double lnGamma(double z) { static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7}; if (z<0.5) { // Use reflection formula, as approximation doesn't work down here... return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z); } else { double x = coeff[0]; for (int i=1;i<9;i++) x += coeff[i]/(z+i-1); double t = z + 6.5; return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x); } } // Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values... double digamma(double z) { static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point. double ret = 0.0; while (z<highVal) { ret -= 1.0/z; z += 1.0; } double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz4 = iz2*iz2; double iz6 = iz4*iz2; ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0; return ret; } // Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically... double trigamma(double z) { static const double highVal = 8.0; double ret = 0.0; while (z<highVal) { ret += 1.0/(z*z); z += 1.0; } z -= 1.0; double iz1 = 1.0/z; double iz2 = iz1*iz1; double iz3 = iz1*iz2; double iz5 = iz3*iz2; double iz7 = iz5*iz2; double iz9 = iz7*iz2; ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0; return ret; } #endif """ def lnGamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function""" code = start_cpp(gamma_code) + """ return_val = lnGamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def digamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function""" code = start_cpp(gamma_code) + """ return_val = digamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) def trigamma(z): """Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function""" code = start_cpp(gamma_code) + """ return_val = trigamma(z); """ return weave.inline(code, ['z'], support_code=gamma_code) class TestFuncs(unittest.TestCase): """Test code for the assorted gamma-related functions.""" def test_compile(self): code = start_cpp(gamma_code) + """ """ weave.inline(code, support_code=gamma_code) def test_error_lngamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = lnGamma(z) good = gammaln(z) assert(math.fabs(own-good)<1e-12) def test_error_digamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = digamma(z) good = psi(z) assert(math.fabs(own-good)<1e-9) def test_error_trigamma(self): for _ in xrange(1000): z = random.uniform(0.01, 100.0) own = trigamma(z) good = polygamma(1,z) assert(math.fabs(own-good)<1e-9) # If this file is run do the unit tests... if __name__ == '__main__': unittest.main()
Python
# -*- coding: utf-8 -*- # Copyright (c) 2010, Tom SF Haines # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import inspect import hashlib def start_cpp(hash_str = None): """This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>""" frame = inspect.currentframe().f_back info = inspect.getframeinfo(frame) if hash_str==None: return '#line %i "%s"\n'%(info[1],info[0]) else: h = hashlib.md5() h.update(hash_str) hash_val = h.hexdigest() return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
Python